repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/official/nlp/modeling/layers/reuse_attention.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based attention layer."""
# pylint: disable=g-classes-have-attributes
import collections
import math
import string
import numpy as np
import tensorflow as tf
from official.modeling import tf_utils
_CHR_IDX = string.ascii_lowercase
def _build_attention_equation(rank, attn_axes):
"""Builds einsum equations for the attention computation.
Query, key, value inputs after projection are expected to have the shape as:
`(bs, <non-attention dims>, <attention dims>, num_heads, channels)`.
`bs` and `<non-attention dims>` are treated as `<batch dims>`.
The attention operations can be generalized:
(1) Query-key dot product:
`(<batch dims>, <query attention dims>, num_heads, channels), (<batch dims>,
<key attention dims>, num_heads, channels) -> (<batch dims>,
num_heads, <query attention dims>, <key attention dims>)`
(2) Combination:
`(<batch dims>, num_heads, <query attention dims>, <key attention dims>),
(<batch dims>, <value attention dims>, num_heads, channels) -> (<batch dims>,
<query attention dims>, num_heads, channels)`
Args:
rank: Rank of query, key, value tensors.
attn_axes: List/tuple of axes, `[-1, rank)`,
that attention will be applied to.
Returns:
Einsum equations.
"""
target_notation = _CHR_IDX[:rank]
# `batch_dims` includes the head dim.
batch_dims = tuple(np.delete(range(rank), attn_axes + (rank - 1,)))
letter_offset = rank
source_notation = ""
for i in range(rank):
if i in batch_dims or i == rank - 1:
source_notation += target_notation[i]
else:
source_notation += _CHR_IDX[letter_offset]
letter_offset += 1
product_notation = "".join([target_notation[i] for i in batch_dims] +
[target_notation[i] for i in attn_axes] +
[source_notation[i] for i in attn_axes])
dot_product_equation = "%s,%s->%s" % (source_notation, target_notation,
product_notation)
attn_scores_rank = len(product_notation)
combine_equation = "%s,%s->%s" % (product_notation, source_notation,
target_notation)
return dot_product_equation, combine_equation, attn_scores_rank
def _build_proj_equation(free_dims, bound_dims, output_dims):
"""Builds an einsum equation for projections inside multi-head attention."""
input_str = ""
kernel_str = ""
output_str = ""
bias_axes = ""
letter_offset = 0
for i in range(free_dims):
char = _CHR_IDX[i + letter_offset]
input_str += char
output_str += char
letter_offset += free_dims
for i in range(bound_dims):
char = _CHR_IDX[i + letter_offset]
input_str += char
kernel_str += char
letter_offset += bound_dims
for i in range(output_dims):
char = _CHR_IDX[i + letter_offset]
kernel_str += char
output_str += char
bias_axes += char
equation = "%s,%s->%s" % (input_str, kernel_str, output_str)
return equation, bias_axes, len(output_str)
def _get_output_shape(output_rank, known_last_dims):
return [None] * (output_rank - len(known_last_dims)) + list(known_last_dims)
class ReuseMultiHeadAttention(tf.keras.layers.Layer):
"""MultiHeadAttention layer.
This is an implementation of multi-headed attention as described in the paper
"Attention is all you Need" (Vaswani et al., 2017).
If `query`, `key,` `value` are the same, then
this is self-attention. Each timestep in `query` attends to the
corresponding sequence in `key`, and returns a fixed-width vector.
This layer first projects `query`, `key` and `value`. These are
(effectively) a list of tensors of length `num_attention_heads`, where the
corresponding shapes are `(batch_size, <query dimensions>, key_dim)`,
`(batch_size, <key/value dimensions>, key_dim)`,
`(batch_size, <key/value dimensions>, value_dim)`.
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor.
Finally, the result tensor with the last dimension as value_dim can take an
linear projection and return.
Examples:
Performs 1D cross-attention over two sequence inputs with an attention mask.
Returns the additional attention weights over heads.
>>> layer = MultiHeadAttention(num_heads=2, key_dim=2)
>>> target = tf.keras.Input(shape=[8, 16])
>>> source = tf.keras.Input(shape=[4, 16])
>>> output_tensor, weights = layer(target, source,
... return_attention_scores=True)
>>> print(output_tensor.shape)
(None, 8, 16)
>>> print(weights.shape)
(None, 2, 8, 4)
Performs 2D self-attention over a 5D input tensor on axes 2 and 3.
>>> layer = MultiHeadAttention(num_heads=2, key_dim=2, attention_axes=(2, 3))
>>> input_tensor = tf.keras.Input(shape=[5, 3, 4, 16])
>>> output_tensor = layer(input_tensor, input_tensor)
>>> print(output_tensor.shape)
(None, 5, 3, 4, 16)
Args:
num_heads: Number of attention heads.
key_dim: Size of each attention head for query and key.
value_dim: Size of each attention head for value.
dropout: Dropout probability.
reuse_attention: An integer specifying number of heads to reuse.
-1 for all heads.
use_relative_pe: Whether to use relative position bias.
max_sequence_length: Used to set the size of the relative positin encodings.
use_bias: Boolean, whether the dense layers use bias vectors/matrices.
output_shape: The expected shape of an output tensor, besides the batch and
sequence dims. If not specified, projects back to the key feature dim.
attention_axes: axes over which the attention is applied. `None` means
attention over all axes, but batch, heads, and features.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
Call arguments:
query: Query `Tensor` of shape `(B, T, dim)`.
value: Value `Tensor` of shape `(B, S, dim)`.
key: Optional key `Tensor` of shape `(B, S, dim)`. If not given, will use
`value` for both `key` and `value`, which is the most common case.
attention_mask: a boolean mask of shape `(B, T, S)`, that prevents
attention to certain positions. The boolean mask specifies which query
elements can attend to which key elements, 1 indicates attention and 0
indicates no attention. Broadcasting can happen for the missing batch
dimensions and the head dimension.
return_attention_scores: A boolean to indicate whether the output should
be attention output if True, or (attention_output, attention_scores) if
False. Defaults to False.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
Defaults to either using the training mode of the parent layer/model,
or False (inference) if there is no parent layer.
Returns:
attention_output: The result of the computation, of shape `(B, T, E)`,
where `T` is for target sequence shapes and `E` is the query input last
dimension if `output_shape` is `None`. Otherwise, the multi-head outputs
are project to the shape specified by `output_shape`.
attention_scores: [Optional] multi-head attention coeffients over
attention axes.
"""
def __init__(self,
num_heads,
key_dim,
value_dim=None,
dropout=0.0,
reuse_attention=0,
use_relative_pe=False,
pe_max_seq_length=512,
use_bias=True,
output_shape=None,
attention_axes=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super().__init__(**kwargs)
self._num_heads = num_heads
self._key_dim = key_dim
self._value_dim = value_dim if value_dim else key_dim
self._dropout = dropout
if reuse_attention > self._num_heads or reuse_attention < -1:
raise ValueError("reuse_attention should be between -1 "
"and %d in call to %s." % (self.__class__,
self._num_heads))
if reuse_attention == -1:
reuse_attention = self._num_heads
self._reuse_heads = reuse_attention
self._use_relative_pe = use_relative_pe
self._pe_max_seq_length = pe_max_seq_length
self._use_bias = use_bias
self._output_shape = output_shape
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
if attention_axes is not None and not isinstance(attention_axes,
collections.abc.Sized):
self._attention_axes = (attention_axes,)
else:
self._attention_axes = attention_axes
self._built_from_signature = False
self._query_shape, self._key_shape, self._value_shape = None, None, None
# Use relative PE only if reuse_heads < num_heads.
if self._use_relative_pe and self._reuse_heads < self._num_heads:
# Determine the dtype from global policy.
policy = tf.keras.mixed_precision.global_policy()
if policy.name == "mixed_bfloat16":
policy = tf.bfloat16
elif policy.name == "mixed_float16":
policy = tf.float16
else:
policy = tf.float32
self._position_embeddings = tf.Variable(
name="relative_position_embeddings",
initial_value=lambda: tf.random.truncated_normal( # pylint: disable=g-long-lambda
[
1, self._num_heads - self._reuse_heads, 2 * self.
_pe_max_seq_length - 1
], mean=0.0, stddev=0.2, dtype=policy),
trainable=True, dtype=policy)
def get_config(self):
config = {
"num_heads": self._num_heads,
"key_dim": self._key_dim,
"value_dim": self._value_dim,
"dropout": self._dropout,
"use_bias": self._use_bias,
"output_shape": self._output_shape,
"attention_axes": self._attention_axes,
"reuse_attention": self._reuse_heads,
"use_relative_pe": self._use_relative_pe,
"pe_max_seq_length": self._pe_max_seq_length,
"kernel_initializer":
tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer":
tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer":
tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer":
tf.keras.regularizers.serialize(self._bias_regularizer),
"activity_regularizer":
tf.keras.regularizers.serialize(self._activity_regularizer),
"kernel_constraint":
tf.keras.constraints.serialize(self._kernel_constraint),
"bias_constraint":
tf.keras.constraints.serialize(self._bias_constraint),
"query_shape": self._query_shape,
"key_shape": self._key_shape,
"value_shape": self._value_shape,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
# If the layer has a different build() function from the Keras default,
# we need to trigger the customized build to create weights.
query_shape = config.pop("query_shape")
key_shape = config.pop("key_shape")
value_shape = config.pop("value_shape")
layer = cls(**config)
if None in [query_shape, key_shape, value_shape]:
tf.get_logger().warning(
"One of dimensions of the input shape is missing. It should have been"
" memorized when the layer was serialized. "
"%s is created without weights.",
str(cls))
else:
layer._build_from_signature(query_shape, value_shape, key_shape) # pylint: disable=protected-access
return layer
def _build_from_signature(self, query, value, key=None):
"""Builds layers and variables.
Once the method is called, self._built_from_signature will be set to True.
Args:
query: Query tensor or TensorShape.
value: Value tensor or TensorShape.
key: Key tensor or TensorShape.
"""
self._built_from_signature = True
if hasattr(query, "shape"):
self._query_shape = tf.TensorShape(query.shape)
else:
self._query_shape = tf.TensorShape(query)
if hasattr(value, "shape"):
self._value_shape = tf.TensorShape(value.shape)
else:
self._value_shape = tf.TensorShape(value)
if key is None:
self._key_shape = self._value_shape
elif hasattr(key, "shape"):
self._key_shape = tf.TensorShape(key.shape)
else:
self._key_shape = tf.TensorShape(key)
common_kwargs = dict(
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
# Any setup work performed only once should happen in an `init_scope`
# to avoid creating symbolic Tensors that will later pollute any eager
# operations.
with tf.init_scope():
free_dims = self._query_shape.rank - 1
if self._reuse_heads < self._num_heads:
einsum_equation, bias_axes, output_rank = _build_proj_equation(
free_dims, bound_dims=1, output_dims=2)
self._query_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(
output_rank - 1,
[self._num_heads - self._reuse_heads, self._key_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="query",
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs)
einsum_equation, bias_axes, output_rank = _build_proj_equation(
self._key_shape.rank - 1, bound_dims=1, output_dims=2)
self._key_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(
output_rank - 1,
[self._num_heads - self._reuse_heads, self._key_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="key",
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs)
einsum_equation, bias_axes, output_rank = _build_proj_equation(
self._value_shape.rank - 1, bound_dims=1, output_dims=2)
self._value_dense = []
if self._reuse_heads > 0:
self._value_dense.append(
tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(
output_rank - 1, [self._reuse_heads, self._value_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="value_reuse",
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(
self._bias_initializer),
**common_kwargs))
if self._reuse_heads < self._num_heads:
self._value_dense.append(
tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(
output_rank - 1,
[self._num_heads - self._reuse_heads, self._value_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="value_new",
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(
self._bias_initializer),
**common_kwargs))
# Builds the attention computations for multi-head dot product attention.
# These computations could be wrapped into the keras attention layer once
# it support mult-head einsum computations.
self._build_attention(output_rank)
self._output_dense = []
if self._reuse_heads > 0:
self._output_dense.append(self._make_output_dense(
free_dims, common_kwargs, "attention_output_reuse"))
if self._reuse_heads < self._num_heads:
self._output_dense.append(self._make_output_dense(
free_dims, common_kwargs, "attention_output_new",
self._reuse_heads == 0))
def _make_output_dense(self, free_dims, common_kwargs, name=None,
use_bias=True):
"""Builds the output projection matrix.
Args:
free_dims: Number of free dimensions for einsum equation building.
common_kwargs: Common keyword arguments for einsum layer.
name: Name for the projection layer.
use_bias: Use bias if self._use_bias is true
Returns:
Projection layer.
"""
if self._output_shape:
if not isinstance(self._output_shape, collections.abc.Sized):
output_shape = [self._output_shape]
else:
output_shape = self._output_shape
else:
output_shape = [self._query_shape[-1]]
einsum_equation, bias_axes, output_rank = _build_proj_equation(
free_dims, bound_dims=2, output_dims=len(output_shape))
return tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1, output_shape),
bias_axes=bias_axes if (use_bias and self._use_bias) else None,
name=name,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs)
def _build_attention(self, rank):
"""Builds multi-head dot-product attention computations.
This function builds attributes necessary for `_compute_attention` to
customize attention computation to replace the default dot-product
attention.
Args:
rank: the rank of query, key, value tensors.
"""
if self._attention_axes is None:
self._attention_axes = tuple(range(1, rank - 2))
else:
self._attention_axes = tuple(self._attention_axes)
self._dot_product_equation, self._combine_equation, attn_scores_rank = (
_build_attention_equation(rank, attn_axes=self._attention_axes))
norm_axes = tuple(
range(attn_scores_rank - len(self._attention_axes), attn_scores_rank))
self._softmax = tf.keras.layers.Softmax(axis=norm_axes)
self._dropout_layer = tf.keras.layers.Dropout(rate=self._dropout)
def _masked_softmax(self, attention_scores, attention_mask=None):
# Normalize the attention scores to probabilities.
# `attention_scores` = [B, N, T, S]
if attention_mask is not None:
# The expand dim happens starting from the `num_heads` dimension,
# (<batch_dims>, num_heads, <query_attention_dims, key_attention_dims>)
mask_expansion_axes = [-len(self._attention_axes) * 2 - 1]
for _ in range(len(attention_scores.shape) - len(attention_mask.shape)):
attention_mask = tf.expand_dims(
attention_mask, axis=mask_expansion_axes)
return self._softmax(attention_scores, attention_mask)
def _compute_relative_position(self, query_seq_length, key_seq_length):
position_zero = self._pe_max_seq_length - 1
# We take the vector position variable and concatenate to form a matrix of
# relative position encodings. i=0 indicates reltaive position is 0.
indices = tf.expand_dims(tf.range(0, -query_seq_length, -1),
-1) + tf.range(key_seq_length) + position_zero
indices = tf.maximum(indices, 0)
indices = tf.minimum(indices, 2*self._pe_max_seq_length-2)
attention_biases = tf.gather(self._position_embeddings, indices, axis=2)
return attention_biases
def _compute_attention(self,
query,
key,
value,
reuse_scores=None,
attention_mask=None,
training=None):
"""Applies Dot-product attention with query, key, value tensors.
This function defines the computation inside `call` with projected
multi-head Q, K, V inputs. Users can override this function for customized
attention implementation.
Args:
query: Projected query `Tensor` of shape `(B, T, N, key_dim)`.
key: Projected key `Tensor` of shape `(B, T, N, key_dim)`.
value: Projected value `Tensor` of shape `(B, T, N, value_dim)`.
reuse_scores: Attention scores from a previous layer if needed.
attention_mask: a boolean mask of shape `(B, T, S)`, that prevents
attention to certain positions.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Returns:
attention_output: Multi-headed outputs of attention computation.
attention_scores: Multi-headed attention weights.
"""
# Partial or no reuse
if self._reuse_heads < self._num_heads:
query = tf.multiply(query, 1.0 / math.sqrt(float(self._key_dim)))
new_scores = tf.einsum(self._dot_product_equation, key, query)
# Add relative position embeddings if required.
if self._use_relative_pe:
new_scores = new_scores + self._compute_relative_position(
tf.shape(query)[1], tf.shape(key)[1])
new_scores = self._masked_softmax(new_scores, attention_mask)
if self._reuse_heads > 0: # Partial reuse
reuse_scores = reuse_scores[:, :self._reuse_heads, :, :]
attention_scores = tf.concat([new_scores, reuse_scores], 1)
else: # No reuse
attention_scores = new_scores
else: # Full reuse
attention_scores = reuse_scores
new_scores = None
# `context_layer` = [B, T, N, H]
attention_output = []
# Partial or full reuse
if self._reuse_heads > 0:
attention_output.append(
tf.einsum(self._combine_equation, self._dropout_layer(
reuse_scores, training=training), value[0]))
# Partial or no reuse
if self._reuse_heads < self._num_heads:
attention_output.append(
tf.einsum(self._combine_equation, self._dropout_layer(
new_scores, training=training), value[-1]))
return attention_output, attention_scores
def call(self,
query,
value,
key=None,
attention_mask=None,
return_attention_scores=False,
training=None,
reuse_attention_scores=None):
if self._reuse_heads > 0 and reuse_attention_scores is None:
raise ValueError("reuse_attention_scores cannot be None when "
"reuse_attention is True or > 0.")
if not self._built_from_signature:
self._build_from_signature(query=query, value=value, key=key)
if key is None:
key = value
# N = `num_attention_heads`
# H = `size_per_head`
# `value` = [B, S, N, H]
value = [vd(value) for vd in self._value_dense]
if self._reuse_heads < self._num_heads:
# `query` = [B, T, N ,H]
query = self._query_dense(query)
# `key` = [B, S, N, H]
key = self._key_dense(key)
else:
query, key = None, None
attention_output, attention_scores = self._compute_attention(
query, key, value, reuse_attention_scores, attention_mask, training)
attention_output = [od(attention_output[i]) for i, od in enumerate(
self._output_dense)]
if len(attention_output) == 1:
attention_output = attention_output[0]
else:
attention_output = attention_output[0] + attention_output[1]
if return_attention_scores:
return attention_output, attention_scores
return attention_output
| 25,658 | 41.133005 | 106 | py |
models | models-master/official/nlp/modeling/layers/pack_optimization.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pack sequence optimization on accelerators."""
from typing import Dict
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling.layers import rezero_transformer
from official.nlp.modeling.layers import self_attention_mask
from official.nlp.modeling.layers import transformer_encoder_block
from official.nlp.modeling.layers import transformer_scaffold
@tf.keras.utils.register_keras_serializable(package='Text')
class PackBertEmbeddings(tf.keras.layers.Layer):
"""Performs packing tricks for BERT inputs to improve TPU utilization."""
def __init__(self, pack_sequences: int, **kwargs):
super().__init__(**kwargs)
self.pack_sequences = pack_sequences
def call(self, input_embeddings: tf.Tensor,
input_mask: tf.Tensor) -> Dict[str, tf.Tensor]:
batch_size, seq_len, embedding_dim = tf_utils.get_shape_list(
input_embeddings, expected_rank=3)
reduced_batch_size = batch_size // self.pack_sequences
packed_seq_len = self.pack_sequences * seq_len
packed_embeddings = tf.reshape(
input_embeddings, [reduced_batch_size, packed_seq_len, embedding_dim])
input_mask = tf.reshape(input_mask, [reduced_batch_size, packed_seq_len])
example_ids = 1 + tf.range(self.pack_sequences)
# Shape: [batch_size, seq_len, pack_sequences].
example_ids = tf.tile(example_ids[None, :, None],
[reduced_batch_size, 1, seq_len])
example_ids = tf.reshape(example_ids, [reduced_batch_size, packed_seq_len])
example_ids = tf.where(
tf.math.equal(input_mask, 0), tf.zeros_like(example_ids), example_ids)
packing_mask = tf.cast(
tf.equal(
tf.expand_dims(example_ids, 2), tf.expand_dims(example_ids, 1)),
dtype=tf.bool)
attention_mask = self_attention_mask.get_mask(
packed_embeddings, input_mask, dtype=tf.bool)
combined_attention_mask = tf.cast(
tf.math.logical_and(attention_mask, packing_mask), tf.float32)
return dict(
packed_embeddings=packed_embeddings,
combined_attention_mask=combined_attention_mask)
@tf.keras.utils.register_keras_serializable(package='Text')
class StridedTransformerEncoderBlock(
transformer_encoder_block.TransformerEncoderBlock):
"""Transformer layer for packing optimization to stride over inputs."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self._output_range is not None:
raise ValueError('StridedTransformerEncoderBlock does not '
'support `output_range` argument.')
def call(self, inputs, stride: tf.Tensor):
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
input_tensor, attention_mask = inputs
key_value = None
elif len(inputs) == 3:
input_tensor, key_value, attention_mask = inputs
else:
raise ValueError('Unexpected inputs to %s with length at %d' %
(self.__class__, len(inputs)))
else:
input_tensor, key_value, attention_mask = (inputs, None, None)
if self._norm_first:
source_tensor = input_tensor[:, ::stride, :]
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm_kv(key_value)
target_tensor = input_tensor[:, ::stride, :]
if attention_mask is not None:
attention_mask = attention_mask[:, ::stride, :]
if key_value is None:
key_value = input_tensor
attention_output = self._attention_layer(
query=target_tensor, value=key_value, attention_mask=attention_mask)
attention_output = self._attention_dropout(attention_output)
if self._norm_first:
# Important to not combine `self._norm_first` and
# `self._use_query_residual` into one if clause because else is only for
# `_norm_first == False`.
if self._use_query_residual:
attention_output = source_tensor + attention_output
else:
if self._use_query_residual:
attention_output = target_tensor + attention_output
attention_output = self._attention_layer_norm(attention_output)
if self._norm_first:
source_attention_output = attention_output
attention_output = self._output_layer_norm(attention_output)
inner_output = self._intermediate_dense(attention_output)
inner_output = self._intermediate_activation_layer(inner_output)
inner_output = self._inner_dropout_layer(inner_output)
layer_output = self._output_dense(inner_output)
layer_output = self._output_dropout(layer_output)
if self._norm_first:
return source_attention_output + layer_output
layer_output = tf.cast(layer_output, tf.float32)
return self._output_layer_norm(layer_output + attention_output)
@tf.keras.utils.register_keras_serializable(package='Text')
class StridedReZeroTransformer(rezero_transformer.ReZeroTransformer):
"""ReZeroTransformer for packing optimization to stride over inputs."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self._output_range is not None:
raise ValueError(f'{self.__class__} does not '
'support `output_range` argument.')
def call(self, inputs, stride: tf.Tensor):
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
input_tensor, attention_mask = inputs
key_value = None
elif len(inputs) == 3:
input_tensor, key_value, attention_mask = inputs
else:
raise ValueError(f'Unexpected inputs to {self.__class__} with '
f'length at {len(inputs)}.')
else:
input_tensor, key_value, attention_mask = (inputs, None, None)
target_tensor = input_tensor[:, ::stride, :]
if attention_mask is not None:
attention_mask = attention_mask[:, ::stride, :]
if key_value is None:
key_value = input_tensor
attention_output = self._attention_layer(
query=target_tensor, value=key_value, attention_mask=attention_mask)
attention_output = self._attention_dropout(attention_output)
attention_output = target_tensor + self._rezero_a * attention_output
if self._use_layer_norm:
attention_output = self._attention_layer_norm(attention_output)
else:
attention_output = tf.cast(attention_output, tf.float32)
intermediate_output = self._intermediate_dense(attention_output)
intermediate_output = self._inner_activation_layer(intermediate_output)
layer_output = self._output_dense(intermediate_output)
layer_output = self._output_dropout(layer_output)
layer_output = attention_output + tf.cast(self._rezero_a_ffn * layer_output,
tf.float32)
if self._use_layer_norm:
layer_output = self._output_layer_norm(layer_output)
return layer_output
@tf.keras.utils.register_keras_serializable(package='Text')
class StridedTransformerScaffold(transformer_scaffold.TransformerScaffold):
"""TransformerScaffold for packing optimization to stride over inputs."""
def call(self, inputs, stride: tf.Tensor, training=None):
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
input_tensor, attention_mask = inputs
key_value = None
elif len(inputs) == 3:
input_tensor, key_value, attention_mask = inputs
else:
raise ValueError('Unexpected inputs to %s with length at %d' %
(self.__class__, len(inputs)))
else:
input_tensor, key_value, attention_mask = (inputs, None, None)
if key_value is None:
key_value = input_tensor
if self._norm_first:
source_tensor = input_tensor[:, ::stride, :]
input_tensor = self._attention_layer_norm(input_tensor, training=training)
if attention_mask is not None:
attention_mask = attention_mask[:, ::stride, :]
target_tensor = input_tensor[:, ::stride, :]
attention_output = self._attention_layer(
query=target_tensor,
value=key_value,
attention_mask=attention_mask,
training=training)
attention_output = self._attention_dropout(
attention_output, training=training)
if self._norm_first:
attention_output = source_tensor + attention_output
else:
attention_output = self._attention_layer_norm(
target_tensor + attention_output, training=training)
if self._norm_first:
source_attention_output = attention_output
attention_output = self._output_layer_norm(
attention_output, training=training)
if self._feedforward_block is None:
intermediate_output = self._intermediate_dense(attention_output)
intermediate_output = self._intermediate_activation_layer(
intermediate_output)
layer_output = self._output_dense(intermediate_output, training=training)
layer_output = self._output_dropout(layer_output, training=training)
layer_output = tf.cast(layer_output, tf.float32)
if self._norm_first:
layer_output = source_attention_output + layer_output
else:
layer_output = self._output_layer_norm(
layer_output + attention_output, training=training)
else:
if self._norm_first:
# if norm_first, assume the feedforward block will not apply layer norm
layer_output = self._feedforward_block(
attention_output, training=training)
layer_output += source_attention_output
else:
# if not norm_first, assume that the feedforwad does apply layer norm
layer_output = self._feedforward_block(
attention_output, training=training)
return layer_output
| 10,279 | 39.956175 | 80 | py |
models | models-master/official/nlp/modeling/layers/multi_channel_attention.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-channel Attention."""
# pylint: disable=g-classes-have-attributes
import math
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling.layers import masked_softmax
class VotingAttention(tf.keras.layers.Layer):
"""Voting Attention layer.
Args:
num_heads: The number of attention heads.
head_size: Per-head hidden size.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
"""
def __init__(self,
num_heads,
head_size,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super().__init__(**kwargs)
self._num_heads = num_heads
self._head_size = head_size
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
def build(self, unused_input_shapes):
common_kwargs = dict(
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
self._query_dense = tf.keras.layers.EinsumDense(
"BAE,ENH->BANH",
output_shape=(None, self._num_heads, self._head_size),
bias_axes="NH",
name="query",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs)
self._key_dense = tf.keras.layers.EinsumDense(
"BAE,ENH->BANH",
output_shape=(None, self._num_heads, self._head_size),
bias_axes="NH",
name="key",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs)
super().build(unused_input_shapes)
def call(self, encoder_outputs, doc_attention_mask):
num_docs = tf_utils.get_shape_list(encoder_outputs, expected_rank=[4])[1]
cls_embeddings = encoder_outputs[:, :, 0, :]
key = self._key_dense(cls_embeddings)
query = self._query_dense(cls_embeddings)
doc_attention_mask = tf.cast(doc_attention_mask, tf.float32)
key = tf.einsum("BANH,BA->BANH", key, doc_attention_mask)
query = tf.einsum("BANH,BA->BANH", query, doc_attention_mask)
attention_matrix = tf.einsum("BXNH,BYNH->BNXY", query, key)
mask = tf.ones([num_docs, num_docs])
mask = tf.linalg.set_diag(mask, tf.zeros(num_docs))
attention_matrix = tf.einsum("BNXY,XY->BNXY", attention_matrix, mask)
doc_attention_probs = tf.einsum("BNAY->BNA", attention_matrix)
doc_attention_probs = tf.einsum("BNA->BA", doc_attention_probs)
infadder = (1.0 - doc_attention_mask) * -100000.0
return tf.nn.softmax(doc_attention_probs + infadder)
class MultiChannelAttention(tf.keras.layers.MultiHeadAttention):
"""Multi-channel Attention layer.
Introduced in, [Generating Representative Headlines for News Stories
](https://arxiv.org/abs/2001.09386). Expects multiple cross-attention
target sequences.
Call args:
query: Query `Tensor` of shape `[B, T, dim]`.
value: Value `Tensor` of shape `[B, A, S, dim]`, where A denotes the
context_attention_weights: Context weights of shape `[B, N, T, A]`, where N
is the number of attention heads. Combines multi-channel sources
context tensors according to the distribution among channels.
key: Optional key `Tensor` of shape `[B, A, S, dim]`. If not given, will use
`value` for both `key` and `value`, which is the most common case.
attention_mask: A boolean mask of shape `[B, T, S]`, that prevents attention
to certain positions.
"""
def _build_attention(self, rank):
super()._build_attention(rank) # pytype: disable=attribute-error # typed-keras
self._masked_softmax = masked_softmax.MaskedSoftmax(mask_expansion_axes=[2])
def call(self,
query,
value,
key=None,
context_attention_weights=None,
attention_mask=None):
if not self._built_from_signature:
self._build_from_signature(query, value, key=key)
if key is None:
key = value
# Scalar dimensions referenced here:
# B = batch size (number of stories)
# A = num_docs (number of docs)
# F = target sequence length
# T = source sequence length
# N = `num_attention_heads`
# H = `size_per_head`
# `query_tensor` = [B, F, N ,H]
query_tensor = self._query_dense(query)
# `key_tensor` = [B, A, T, N, H]
key_tensor = self._key_dense(key)
# `value_tensor` = [B, A, T, N, H]
value_tensor = self._value_dense(value)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
attention_scores = tf.einsum("BATNH,BFNH->BANFT", key_tensor, query_tensor)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(self._key_dim)))
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, A, N, F, T]
attention_probs = self._masked_softmax(attention_scores, attention_mask)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self._dropout_layer(attention_probs)
# `context_layer` = [B, F, N, H]
context_layer = tf.einsum("BANFT,BATNH->BAFNH", attention_probs,
value_tensor)
attention_output = tf.einsum("BNFA,BAFNH->BFNH", context_attention_weights,
context_layer)
attention_output = self._output_dense(attention_output)
return attention_output
| 7,312 | 40.316384 | 84 | py |
models | models-master/official/nlp/modeling/layers/text_layers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras Layers for BERT-specific preprocessing."""
# pylint: disable=g-import-not-at-top
from typing import Any, Dict, List, Mapping, Optional, Text, Union
from absl import logging
import tensorflow as tf
try:
# pytype: disable=import-error
import tensorflow_text as text
from tensorflow_text.python.ops import bert_tokenizer
# pytype: enable=import-error
except ImportError:
text = None
bert_tokenizer = None
except tf.errors.NotFoundError as e:
logging.warn("Encountered error when importing tensorflow_text: %s", e)
text = None
bert_tokenizer = None
def _check_if_tf_text_installed():
if text is None:
raise ImportError("import tensorflow_text failed, please install "
"'tensorflow-text-nightly'.")
def _truncate_row_lengths(ragged_tensor: tf.RaggedTensor,
new_lengths: tf.Tensor) -> tf.RaggedTensor:
"""Truncates the rows of `ragged_tensor` to the given row lengths."""
new_lengths = tf.broadcast_to(new_lengths,
ragged_tensor.bounding_shape()[0:1])
def fn(x):
row, new_length = x
return row[0:new_length]
fn_dtype = tf.RaggedTensorSpec(dtype=ragged_tensor.dtype,
ragged_rank=ragged_tensor.ragged_rank - 1)
result = tf.map_fn(fn, (ragged_tensor, new_lengths), dtype=fn_dtype)
# Work around broken shape propagation: without this, result has unknown rank.
flat_values_shape = [None] * ragged_tensor.flat_values.shape.rank
result = result.with_flat_values(
tf.ensure_shape(result.flat_values, flat_values_shape))
return result
class BertTokenizer(tf.keras.layers.Layer):
"""Wraps TF.Text's BertTokenizer with pre-defined vocab as a Keras Layer.
Attributes:
tokenize_with_offsets: If true, calls
`text.BertTokenizer.tokenize_with_offsets()` instead of plain
`text.BertTokenizer.tokenize()` and outputs a triple of
`(tokens, start_offsets, limit_offsets)`.
raw_table_access: An object with methods `.lookup(keys) and `.size()`
that operate on the raw lookup table of tokens. It can be used to
look up special token synbols like `[MASK]`.
"""
def __init__(self, *,
vocab_file: str,
lower_case: Optional[bool] = None,
tokenize_with_offsets: bool = False,
tokenizer_kwargs: Optional[Mapping[Text, Any]] = None,
**kwargs):
"""Initialize a `BertTokenizer` layer.
Args:
vocab_file: A Python string with the path of the vocabulary file.
This is a text file with newline-separated wordpiece tokens.
This layer initializes a lookup table from it that gets used with
`text.BertTokenizer`.
lower_case: Optional boolean forwarded to `text.BertTokenizer`.
If true, input text is converted to lower case (where applicable)
before tokenization. This must be set to match the way in which
the `vocab_file` was created. If passed, this overrides whatever value
may have been passed in `tokenizer_kwargs`.
tokenize_with_offsets: A Python boolean. If true, this layer calls
`text.BertTokenizer.tokenize_with_offsets()` instead of plain
`text.BertTokenizer.tokenize()` and outputs a triple of
`(tokens, start_offsets, limit_offsets)`
insead of just tokens.
tokenizer_kwargs: Optional mapping with keyword arguments to forward to
`text.BertTokenizer`'s constructor.
**kwargs: Standard arguments to `Layer()`.
Raises:
ImportError: If importing `tensorflow_text` failed.
"""
_check_if_tf_text_installed()
self.tokenize_with_offsets = tokenize_with_offsets
# TODO(b/177326279): Stop storing the vocab table initializer as an
# attribute when https://github.com/tensorflow/tensorflow/issues/46456
# has been fixed in the TensorFlow versions of the TF Hub users that load
# a SavedModel created from this layer. Due to that issue, loading such a
# SavedModel forgets to add .vocab_table._initializer as a trackable
# dependency of .vocab_table, so that saving it again to a second SavedModel
# (e.g., the final model built using TF Hub) does not properly track
# the ._vocab_table._initializer._filename as an Asset.
self._vocab_table, self._vocab_initializer_donotuse = (
self._create_vocab_table_and_initializer(vocab_file))
self._special_tokens_dict = self._create_special_tokens_dict(
self._vocab_table, vocab_file)
super().__init__(**kwargs)
tokenizer_kwargs = dict(tokenizer_kwargs or {})
if lower_case is not None:
tokenizer_kwargs["lower_case"] = lower_case
self._bert_tokenizer = text.BertTokenizer(self._vocab_table,
**tokenizer_kwargs)
@property
def vocab_size(self):
return self._vocab_table.size()
def _create_vocab_table_and_initializer(self, vocab_file):
vocab_initializer = tf.lookup.TextFileInitializer(
vocab_file,
key_dtype=tf.string, key_index=tf.lookup.TextFileIndex.WHOLE_LINE,
value_dtype=tf.int64, value_index=tf.lookup.TextFileIndex.LINE_NUMBER)
vocab_table = tf.lookup.StaticHashTable(vocab_initializer, default_value=-1)
return vocab_table, vocab_initializer
def call(self, inputs: tf.Tensor):
"""Calls `text.BertTokenizer` on inputs.
Args:
inputs: A string Tensor of shape `(batch_size,)`.
Returns:
One or three of `RaggedTensors` if `tokenize_with_offsets` is False or
True, respectively. These are
tokens: A `RaggedTensor` of shape
`[batch_size, (words), (pieces_per_word)]`
and type int32. `tokens[i,j,k]` contains the k-th wordpiece of the
j-th word in the i-th input.
start_offsets, limit_offsets: If `tokenize_with_offsets` is True,
RaggedTensors of type int64 with the same indices as tokens.
Element `[i,j,k]` contains the byte offset at the start, or past the
end, resp., for the k-th wordpiece of the j-th word in the i-th input.
"""
# Prepare to reshape the result to work around broken shape inference.
batch_size = tf.shape(inputs)[0]
def _reshape(rt):
values = rt.values
row_splits = rt.row_splits
row_splits = tf.reshape(row_splits, [batch_size + 1])
return tf.RaggedTensor.from_row_splits(values, row_splits)
# Call the tokenizer.
if self.tokenize_with_offsets:
tokens, start_offsets, limit_offsets = (
self._bert_tokenizer.tokenize_with_offsets(inputs))
tokens = tf.cast(tokens, dtype=tf.int32)
return _reshape(tokens), _reshape(start_offsets), _reshape(limit_offsets)
else:
tokens = self._bert_tokenizer.tokenize(inputs)
tokens = tf.cast(tokens, dtype=tf.int32)
return _reshape(tokens)
def get_config(self):
# Skip in tf.saved_model.save(); fail if called direcly.
raise NotImplementedError("TODO(b/170480226): implement")
def get_special_tokens_dict(self):
"""Returns dict of token ids, keyed by standard names for their purpose.
Returns:
A dict from Python strings to Python integers. Each key is a standard
name for a special token describing its use. (For example, "padding_id"
is what BERT traditionally calls "[PAD]" but others may call "<pad>".)
The corresponding value is the integer token id. If a special token
is not found, its entry is omitted from the dict.
The supported keys and tokens are:
* start_of_sequence_id: looked up from "[CLS]"
* end_of_segment_id: looked up from "[SEP]"
* padding_id: looked up form "[PAD]"
* mask_id: looked up from "[MASK]"
* vocab_size: one past the largest token id used
"""
return self._special_tokens_dict
def _create_special_tokens_dict(self, vocab_table, vocab_file):
special_tokens = dict(start_of_sequence_id="[CLS]",
end_of_segment_id="[SEP]",
padding_id="[PAD]",
mask_id="[MASK]")
with tf.init_scope():
if tf.executing_eagerly():
special_token_ids = vocab_table.lookup(
tf.constant(list(special_tokens.values()), tf.string))
vocab_size = vocab_table.size()
else:
# A blast from the past: non-eager init context while building Model.
# This can happen with Estimator or tf.compat.v1.disable_v2_behavior().
logging.warning(
"Non-eager init context; computing "
"BertTokenizer's special_tokens_dict in tf.compat.v1.Session")
with tf.Graph().as_default():
local_vocab_table, _ = self._create_vocab_table_and_initializer(
vocab_file)
special_token_ids_tensor = local_vocab_table.lookup(
tf.constant(list(special_tokens.values()), tf.string))
vocab_size_tensor = local_vocab_table.size()
init_ops = [tf.compat.v1.initialize_all_tables()]
with tf.compat.v1.Session() as sess:
sess.run(init_ops)
special_token_ids, vocab_size = sess.run(
[special_token_ids_tensor, vocab_size_tensor])
result = dict(
vocab_size=int(vocab_size) # Numpy to Python.
)
for k, v in zip(special_tokens, special_token_ids):
v = int(v)
if v >= 0:
result[k] = v
else:
logging.warning("Could not find %s as token \"%s\" in vocab file %s",
k, special_tokens[k], vocab_file)
return result
class SentencepieceTokenizer(tf.keras.layers.Layer):
"""Wraps `tf_text.SentencepieceTokenizer` as a Keras Layer.
Attributes:
tokenize_with_offsets: If true, calls
`SentencepieceTokenizer.tokenize_with_offsets()`
instead of plain `.tokenize()` and outputs a triple of
`(tokens, start_offsets, limit_offsets)`.
"""
def __init__(self,
*,
lower_case: bool,
model_file_path: Optional[str] = None,
model_serialized_proto: Optional[str] = None,
tokenize_with_offsets: bool = False,
nbest_size: int = 0,
alpha: float = 1.0,
strip_diacritics: bool = False,
**kwargs):
"""Initializes a SentencepieceTokenizer layer.
Args:
lower_case: A Python boolean indicating whether to lowercase the string
before tokenization. NOTE: New models are encouraged to build `*_cf`
(case folding) normalization into the Sentencepiece model itself and
avoid this extra step.
model_file_path: A Python string with the path of the sentencepiece model.
Exactly one of `model_file_path` and `model_serialized_proto` can be
specified. In either case, the Keras model config for this layer will
store the actual proto (not a filename passed here).
model_serialized_proto: The sentencepiece model serialized proto string.
tokenize_with_offsets: A Python boolean. If true, this layer calls
`SentencepieceTokenizer.tokenize_with_offsets()` instead of
plain `.tokenize()` and outputs a triple of
`(tokens, start_offsets, limit_offsets)` insead of just tokens.
Note that when following `strip_diacritics` is set to True, returning
offsets is not supported now.
nbest_size: A scalar for sampling:
nbest_size = {0,1}: No sampling is performed. (default)
nbest_size > 1: samples from the nbest_size results.
nbest_size < 0: assuming that nbest_size is infinite and samples
from the all hypothesis (lattice) using
forward-filtering-and-backward-sampling algorithm.
alpha: A scalar for a smoothing parameter. Inverse temperature for
probability rescaling.
strip_diacritics: Whether to strip diacritics or not. Note that stripping
diacritics requires additional text normalization and dropping bytes,
which makes it impossible to keep track of the offsets now. Hence
when `strip_diacritics` is set to True, we don't yet support
`tokenize_with_offsets`. NOTE: New models are encouraged to put this
into custom normalization rules for the Sentencepiece model itself to
avoid this extra step and the limitation regarding offsets.
**kwargs: standard arguments to `Layer()`.
Raises:
ImportError: if importing tensorflow_text failed.
"""
_check_if_tf_text_installed()
super().__init__(**kwargs)
if bool(model_file_path) == bool(model_serialized_proto):
raise ValueError("Exact one of `model_file_path` and "
"`model_serialized_proto` can be specified.")
# TODO(b/181866850): Support tokenize_with_offsets for strip_diacritics=True
if tokenize_with_offsets and strip_diacritics:
raise ValueError("`tokenize_with_offsets` is not supported when "
"`strip_diacritics` is set to True.")
if model_file_path:
self._model_serialized_proto = tf.io.gfile.GFile(model_file_path,
"rb").read()
else:
self._model_serialized_proto = model_serialized_proto
self._lower_case = lower_case
self.tokenize_with_offsets = tokenize_with_offsets
self._nbest_size = nbest_size
self._alpha = alpha
self._strip_diacritics = strip_diacritics
self._tokenizer = self._create_tokenizer()
self._special_tokens_dict = self._create_special_tokens_dict()
def _create_tokenizer(self):
return text.SentencepieceTokenizer(
model=self._model_serialized_proto,
out_type=tf.int32,
nbest_size=self._nbest_size,
alpha=self._alpha)
@property
def vocab_size(self):
return self._tokenizer.vocab_size()
def call(self, inputs: tf.Tensor):
"""Calls `text.SentencepieceTokenizer` on inputs.
Args:
inputs: A string Tensor of shape `(batch_size,)`.
Returns:
One or three of RaggedTensors if tokenize_with_offsets is False or True,
respectively. These are
tokens: A RaggedTensor of shape `[batch_size, (pieces)]` and type `int32`.
`tokens[i,j]` contains the j-th piece in the i-th input.
start_offsets, limit_offsets: If `tokenize_with_offsets` is True,
RaggedTensors of type `int64` with the same indices as tokens.
Element `[i,j]` contains the byte offset at the start, or past the
end, resp., for the j-th piece in the i-th input.
"""
if self._strip_diacritics:
if self.tokenize_with_offsets:
raise ValueError("`tokenize_with_offsets` is not supported yet when "
"`strip_diacritics` is set to True (b/181866850).")
inputs = text.normalize_utf8(inputs, "NFD")
inputs = tf.strings.regex_replace(inputs, r"\p{Mn}", "")
if self._lower_case:
inputs = text.case_fold_utf8(inputs)
# Prepare to reshape the result to work around broken shape inference.
batch_size = tf.shape(inputs)[0]
def _reshape(rt):
values = rt.values
row_splits = rt.row_splits
row_splits = tf.reshape(row_splits, [batch_size + 1])
return tf.RaggedTensor.from_row_splits(values, row_splits)
# Call the tokenizer.
if self.tokenize_with_offsets:
tokens, start_offsets, limit_offsets = (
self._tokenizer.tokenize_with_offsets(inputs))
return _reshape(tokens), _reshape(start_offsets), _reshape(limit_offsets)
else:
tokens = self._tokenizer.tokenize(inputs)
return _reshape(tokens)
def get_config(self):
# Skip in tf.saved_model.save(); fail if called direcly.
raise NotImplementedError("TODO(b/170480226): implement")
def get_special_tokens_dict(self):
"""Returns dict of token ids, keyed by standard names for their purpose.
Returns:
A dict from Python strings to Python integers. Each key is a standard
name for a special token describing its use. (For example, "padding_id"
is what Sentencepiece calls "<pad>" but others may call "[PAD]".)
The corresponding value is the integer token id. If a special token
is not found, its entry is omitted from the dict.
The supported keys and tokens are:
* start_of_sequence_id: looked up from "[CLS]"
* end_of_segment_id: looked up from "[SEP]"
* padding_id: looked up from "<pad>"
* mask_id: looked up from "[MASK]"
* vocab_size: one past the largest token id used
"""
return self._special_tokens_dict
def _create_special_tokens_dict(self):
special_tokens = dict(
start_of_sequence_id=b"[CLS]",
end_of_segment_id=b"[SEP]",
padding_id=b"<pad>",
mask_id=b"[MASK]")
with tf.init_scope():
if tf.executing_eagerly():
special_token_ids = self._tokenizer.string_to_id(
tf.constant(list(special_tokens.values()), tf.string))
inverse_tokens = self._tokenizer.id_to_string(special_token_ids)
vocab_size = self._tokenizer.vocab_size()
else:
# A blast from the past: non-eager init context while building Model.
# This can happen with Estimator or tf.compat.v1.disable_v2_behavior().
logging.warning(
"Non-eager init context; computing SentencepieceTokenizer's "
"special_tokens_dict in tf.compat.v1.Session")
with tf.Graph().as_default():
local_tokenizer = self._create_tokenizer()
special_token_ids_tensor = local_tokenizer.string_to_id(
tf.constant(list(special_tokens.values()), tf.string))
inverse_tokens_tensor = local_tokenizer.id_to_string(
special_token_ids_tensor)
vocab_size_tensor = local_tokenizer.vocab_size()
with tf.compat.v1.Session() as sess:
special_token_ids, inverse_tokens, vocab_size = sess.run(
[special_token_ids_tensor, inverse_tokens_tensor,
vocab_size_tensor])
result = dict(
vocab_size=int(vocab_size) # Numpy to Python.
)
for name, token_id, inverse_token in zip(special_tokens,
special_token_ids,
inverse_tokens):
if special_tokens[name] == inverse_token:
result[name] = int(token_id)
else:
logging.warning(
"Could not find %s as token \"%s\" in sentencepiece model, "
"got \"%s\"", name, special_tokens[name], inverse_token)
return result
class BertPackInputs(tf.keras.layers.Layer):
"""Packs tokens into model inputs for BERT."""
def __init__(self,
seq_length,
*,
start_of_sequence_id=None,
end_of_segment_id=None,
padding_id=None,
special_tokens_dict=None,
truncator="round_robin",
**kwargs):
"""Initializes with a target `seq_length`, relevant token ids and truncator.
Args:
seq_length: The desired output length. Must not exceed the max_seq_length
that was fixed at training time for the BERT model receiving the inputs.
start_of_sequence_id: The numeric id of the token that is to be placed
at the start of each sequence (called "[CLS]" for BERT).
end_of_segment_id: The numeric id of the token that is to be placed
at the end of each input segment (called "[SEP]" for BERT).
padding_id: The numeric id of the token that is to be placed into the
unused positions after the last segment in the sequence
(called "[PAD]" for BERT).
special_tokens_dict: Optionally, a dict from Python strings to Python
integers that contains values for `start_of_sequence_id`,
`end_of_segment_id` and `padding_id`. (Further values in the dict are
silenty ignored.) If this is passed, separate *_id arguments must be
omitted.
truncator: The algorithm to truncate a list of batched segments to fit a
per-example length limit. The value can be either `round_robin` or
`waterfall`:
(1) For "round_robin" algorithm, available space is assigned
one token at a time in a round-robin fashion to the inputs that still
need some, until the limit is reached. It currently only supports
one or two segments.
(2) For "waterfall" algorithm, the allocation of the budget is done
using a "waterfall" algorithm that allocates quota in a
left-to-right manner and fills up the buckets until we run out of
budget. It support arbitrary number of segments.
**kwargs: standard arguments to `Layer()`.
Raises:
ImportError: if importing `tensorflow_text` failed.
"""
_check_if_tf_text_installed()
super().__init__(**kwargs)
self.seq_length = seq_length
if truncator not in ("round_robin", "waterfall"):
raise ValueError("Only 'round_robin' and 'waterfall' algorithms are "
"supported, but got %s" % truncator)
self.truncator = truncator
self._init_token_ids(
start_of_sequence_id=start_of_sequence_id,
end_of_segment_id=end_of_segment_id,
padding_id=padding_id,
special_tokens_dict=special_tokens_dict)
def _init_token_ids(
self, *,
start_of_sequence_id,
end_of_segment_id,
padding_id,
special_tokens_dict):
usage = ("Must pass either all of start_of_sequence_id, end_of_segment_id, "
"padding_id as arguments, or else a special_tokens_dict "
"with those keys.")
special_tokens_args = [start_of_sequence_id, end_of_segment_id, padding_id]
if special_tokens_dict is None:
if any(x is None for x in special_tokens_args):
return ValueError(usage)
self.start_of_sequence_id = int(start_of_sequence_id)
self.end_of_segment_id = int(end_of_segment_id)
self.padding_id = int(padding_id)
else:
if any(x is not None for x in special_tokens_args):
return ValueError(usage)
self.start_of_sequence_id = int(
special_tokens_dict["start_of_sequence_id"])
self.end_of_segment_id = int(special_tokens_dict["end_of_segment_id"])
self.padding_id = int(special_tokens_dict["padding_id"])
def get_config(self) -> Dict[str, Any]:
config = super().get_config()
config["seq_length"] = self.seq_length
config["start_of_sequence_id"] = self.start_of_sequence_id
config["end_of_segment_id"] = self.end_of_segment_id
config["padding_id"] = self.padding_id
config["truncator"] = self.truncator
return config
def call(self, inputs: Union[tf.RaggedTensor, List[tf.RaggedTensor]]):
"""Adds special tokens to pack a list of segments into BERT input Tensors.
Args:
inputs: A Python list of one or two RaggedTensors, each with the batched
values one input segment. The j-th segment of the i-th input example
consists of slice `inputs[j][i, ...]`.
Returns:
A nest of Tensors for use as input to the BERT TransformerEncoder.
"""
# BertPackInputsSavedModelWrapper relies on only calling bert_pack_inputs()
return BertPackInputs.bert_pack_inputs(
inputs, self.seq_length,
start_of_sequence_id=self.start_of_sequence_id,
end_of_segment_id=self.end_of_segment_id,
padding_id=self.padding_id,
truncator=self.truncator)
@staticmethod
def bert_pack_inputs(inputs: Union[tf.RaggedTensor, List[tf.RaggedTensor]],
seq_length: Union[int, tf.Tensor],
start_of_sequence_id: Union[int, tf.Tensor],
end_of_segment_id: Union[int, tf.Tensor],
padding_id: Union[int, tf.Tensor],
truncator="round_robin"):
"""Freestanding equivalent of the BertPackInputs layer."""
_check_if_tf_text_installed()
# Sanitize inputs.
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
if not inputs:
raise ValueError("At least one input is required for packing")
input_ranks = [rt.shape.rank for rt in inputs]
if None in input_ranks or len(set(input_ranks)) > 1:
raise ValueError("All inputs for packing must have the same known rank, "
"found ranks " + ",".join(input_ranks))
# Flatten inputs to [batch_size, (tokens)].
if input_ranks[0] > 2:
inputs = [rt.merge_dims(1, -1) for rt in inputs]
# In case inputs weren't truncated (as they should have been),
# fall back to some ad-hoc truncation.
num_special_tokens = len(inputs) + 1
if truncator == "round_robin":
trimmed_segments = text.RoundRobinTrimmer(seq_length -
num_special_tokens).trim(inputs)
elif truncator == "waterfall":
trimmed_segments = text.WaterfallTrimmer(
seq_length - num_special_tokens).trim(inputs)
else:
raise ValueError("Unsupported truncator: %s" % truncator)
# Combine segments.
segments_combined, segment_ids = text.combine_segments(
trimmed_segments,
start_of_sequence_id=start_of_sequence_id,
end_of_segment_id=end_of_segment_id)
# Pad to dense Tensors.
input_word_ids, _ = text.pad_model_inputs(segments_combined, seq_length,
pad_value=padding_id)
input_type_ids, input_mask = text.pad_model_inputs(segment_ids, seq_length,
pad_value=0)
# Work around broken shape inference.
output_shape = tf.stack([
inputs[0].nrows(out_type=tf.int32), # batch_size
tf.cast(seq_length, dtype=tf.int32)])
def _reshape(t):
return tf.reshape(t, output_shape)
# Assemble nest of input tensors as expected by BERT TransformerEncoder.
return dict(input_word_ids=_reshape(input_word_ids),
input_mask=_reshape(input_mask),
input_type_ids=_reshape(input_type_ids))
class FastWordpieceBertTokenizer(tf.keras.layers.Layer):
"""A bert tokenizer keras layer using text.FastWordpieceTokenizer.
See details: "Fast WordPiece Tokenization" (https://arxiv.org/abs/2012.15524)
"""
def __init__(self,
*,
vocab_file: str,
lower_case: bool,
tokenize_with_offsets: bool = False,
**kwargs):
"""Initializes a FastWordpieceBertTokenizer layer.
Args:
vocab_file: A Python string with the path of the vocabulary file. This is
a text file with newline-separated wordpiece tokens. This layer loads
a list of tokens from it to create text.FastWordpieceTokenizer.
lower_case: A Python boolean forwarded to text.BasicTokenizer. If true,
input text is converted to lower case (where applicable) before
tokenization. This must be set to match the way in which the vocab_file
was created.
tokenize_with_offsets: A Python boolean. If true, this layer calls
FastWordpieceTokenizer.tokenize_with_offsets() instead of plain
.tokenize() and outputs a triple of (tokens, start_offsets,
limit_offsets) insead of just tokens.
**kwargs: standard arguments to Layer().
"""
super().__init__(**kwargs)
logging.info("Initialize a FastWordpieceBertTokenizer.")
self.tokenize_with_offsets = tokenize_with_offsets
self._basic_tokenizer = bert_tokenizer.BasicTokenizer(lower_case=lower_case)
# Read the vocab file into a list of tokens to create `fast_wp_tokenizer`.
self._vocab = [line.rstrip() for line in tf.io.gfile.GFile(vocab_file)]
self._fast_wp_tokenizer = text.FastWordpieceTokenizer(
vocab=self._vocab, token_out_type=tf.int32, no_pretokenization=True)
self._special_tokens_dict = self._create_special_tokens_dict()
@property
def vocab_size(self):
return len(self._vocab)
def get_config(self):
# Skip in tf.saved_model.save(); fail if called direcly.
# We cannot just put the original, user-supplied vocab file name into
# the config, because the path has to change as the SavedModel is copied
# around.
raise NotImplementedError("Not implemented yet.")
def get_special_tokens_dict(self):
"""Returns dict of token ids, keyed by standard names for their purpose.
Returns:
A dict from Python strings to Python integers. Each key is a standard
name for a special token describing its use. (For example, "padding_id"
is what BERT traditionally calls "[PAD]" but others may call "<pad>".)
The corresponding value is the integer token id. If a special token
is not found, its entry is omitted from the dict.
The supported keys and tokens are:
* start_of_sequence_id: looked up from "[CLS]"
* end_of_segment_id: looked up from "[SEP]"
* padding_id: looked up form "[PAD]"
* mask_id: looked up from "[MASK]"
* vocab_size: one past the largest token id used
"""
return self._special_tokens_dict
def _create_special_tokens_dict(self):
"""Creates dict of token ids, keyed by standard names for their purpose."""
special_tokens = {"vocab_size": self.vocab_size}
def add_special_token(key, token):
try:
token_id = self._vocab.index(token)
special_tokens[key] = token_id
except ValueError:
# Similar as nlp.modeling.layers.BertTokenizer, if a special token
# is not found, its entry is omitted from the dict.
logging.warning("Could not find %s as token \"%s\" in vocab file", key,
token)
add_special_token("start_of_sequence_id", "[CLS]")
add_special_token("end_of_segment_id", "[SEP]")
add_special_token("padding_id", "[PAD]")
add_special_token("mask_id", "[MASK]")
return special_tokens
def _tokenize_with_offsets(self, text_input: tf.Tensor):
tokens, begin, _ = self._basic_tokenizer.tokenize_with_offsets(text_input)
wordpieces, wp_begin, wp_end = (
self._fast_wp_tokenizer.tokenize_with_offsets(tokens))
begin_expanded = tf.expand_dims(begin, axis=2)
final_begin = begin_expanded + wp_begin
final_end = begin_expanded + wp_end
return wordpieces, final_begin, final_end
def _tokenize(self, text_input: tf.Tensor):
tokens = self._basic_tokenizer.tokenize(text_input)
return self._fast_wp_tokenizer.tokenize(tokens)
def call(self, inputs: tf.Tensor):
"""Calls text.BertTokenizer on inputs.
Args:
inputs: A string Tensor of shape [batch_size].
Returns:
One or three of RaggedTensors if tokenize_with_offsets is False or True,
respectively. These are
tokens: A RaggedTensor of shape [batch_size, (words), (pieces_per_word)]
and type int32. tokens[i,j,k] contains the k-th wordpiece of the
j-th word in the i-th input.
start_offsets, limit_offsets: If tokenize_with_offsets is True,
RaggedTensors of type int64 with the same indices as tokens.
Element [i,j,k] contains the byte offset at the start, or past the
end, resp., for the k-th wordpiece of the j-th word in the i-th input.
"""
# Prepare to reshape the result to work around broken shape inference.
batch_size = tf.shape(inputs)[0]
def _reshape(rt):
values = rt.values
row_splits = rt.row_splits
row_splits = tf.reshape(row_splits, [batch_size + 1])
return tf.RaggedTensor.from_row_splits(values, row_splits)
if self.tokenize_with_offsets:
tokens, start_offsets, limit_offsets = self._tokenize_with_offsets(inputs)
return _reshape(tokens), _reshape(start_offsets), _reshape(limit_offsets)
else:
tokens = self._tokenize(inputs)
return _reshape(tokens)
| 32,516 | 43.001353 | 80 | py |
models | models-master/official/nlp/modeling/layers/transformer_encoder_block_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-based transformer block layer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers.transformer_encoder_block import TransformerEncoderBlock
@parameterized.named_parameters(('base', TransformerEncoderBlock))
class TransformerEncoderBlockLayerTest(
tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(TransformerEncoderBlockLayerTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy('float32')
def test_layer_creation(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_creation_with_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_invocation(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# Create a model from the test layer.
model = tf.keras.Model(data_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
_ = model.predict(input_data)
def test_layer_invocation_with_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_layer_output_range(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu')
_ = new_layer([input_data, mask_data], output_range=1)
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer([input_data, mask_data], output_range=1)
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
output_tensor = test_layer([input_data, mask_data], output_range=1)
self.assertAllClose(new_output_tensor, output_tensor, atol=5e-5, rtol=0.003)
def test_layer_output_range_without_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
norm_first=True)
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
output_tensor = test_layer(input_data)
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
norm_first=True)
_ = new_layer(input_data, output_range=1)
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer(input_data, output_range=1)
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
def test_layer_output_range_with_pre_norm(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
norm_first=True)
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
norm_first=True)
_ = new_layer([input_data, mask_data], output_range=1)
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer([input_data, mask_data], output_range=1)
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
output_tensor = test_layer([input_data, mask_data], output_range=1)
self.assertAllClose(new_output_tensor, output_tensor, atol=5e-5, rtol=0.003)
def test_layer_invocation_with_float16_dtype(self, transformer_cls):
tf.keras.mixed_precision.set_global_policy('mixed_float16')
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = (10 * np.random.random_sample(
(batch_size, sequence_length, width)))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_transform_with_initializer(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list())
def test_dynamic_layer_sequence(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Create a 3-dimensional input (the first dimension is implicit).
width = 30
input_tensor = tf.keras.Input(shape=(None, width))
output_tensor = test_layer(input_tensor)
model = tf.keras.Model(input_tensor, output_tensor)
input_length = 17
input_data = np.ones((1, input_length, width))
output_data = model.predict(input_data)
self.assertAllEqual([1, input_length, width], output_data.shape)
def test_separate_qkv(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=2,
inner_dim=128,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Forward path.
q_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 16], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
inputs = [q_tensor, kv_tensor, dummy_mask]
output = test_layer(inputs)
self.assertEqual(output.shape, q_tensor.shape)
class TransformerEncoderBlockLayerTestWithoutParams(
tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(TransformerEncoderBlockLayerTestWithoutParams, self).tearDown()
tf.keras.mixed_precision.set_global_policy('float32')
def test_raises_invalid_arg_error_when_q_kv_dims_are_different(self):
test_layer = TransformerEncoderBlock(
num_attention_heads=2,
inner_dim=128,
inner_activation='relu',
norm_first=True)
# Forward path.
q_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 32], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
inputs = [q_tensor, kv_tensor, dummy_mask]
with self.assertRaises(tf.errors.InvalidArgumentError):
test_layer(inputs)
@parameterized.named_parameters(('output_range_not_none', 2),
('output_range_none', None))
def test_needs_diff_q_kv_att_layer_norm_to_be_true_for_diff_q_and_kv_dims(
self, output_range):
test_layer = TransformerEncoderBlock(
num_attention_heads=2,
inner_dim=128,
inner_activation='relu',
norm_first=True)
# Forward path.
q_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 32], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
inputs = [q_tensor, kv_tensor, dummy_mask]
with self.assertRaises(tf.errors.InvalidArgumentError):
test_layer(inputs, output_range=output_range)
test_layer = TransformerEncoderBlock(
num_attention_heads=2,
inner_dim=128,
inner_activation='relu',
diff_q_kv_att_layer_norm=True,
norm_first=True)
# Forward path.
test_layer(inputs)
@parameterized.named_parameters(('norm_first_is_true', True),
('norm_first_is_false', False))
def test_use_query_residual_false_removes_add_op(self, norm_first):
graph_with_res = tf.Graph()
with graph_with_res.as_default():
layer = TransformerEncoderBlock(
num_attention_heads=2,
inner_dim=128,
inner_activation='relu',
norm_first=norm_first)
inputs = tf.keras.Input(shape=(None, None, 2))
outputs = layer(inputs)
tf.keras.Model(inputs=inputs, outputs=outputs)
graph_without_res = tf.Graph()
with graph_without_res.as_default():
layer = TransformerEncoderBlock(
num_attention_heads=2,
inner_dim=128,
inner_activation='relu',
norm_first=norm_first,
use_query_residual=False)
inputs = tf.keras.Input(shape=(None, None, 2))
outputs = layer(inputs)
tf.keras.Model(inputs=inputs, outputs=outputs)
graph_with_res_names = {x.name for x in graph_with_res.get_operations()}
graph_without_res_names = {
x.name for x in graph_without_res.get_operations()
}
self.assertIn('transformer_encoder_block/add',
list(graph_with_res_names - graph_without_res_names)[0])
self.assertEmpty(graph_without_res_names - graph_with_res_names)
@parameterized.named_parameters(('key_dim_is_none', None, 128, 2, 128 // 2),
('key_dim_is_not_none', 30, 128, 2, 30))
def test_key_dim(self, key_dim, q_tensor_last_dim, some_num_attention_heads,
expected):
some_inner_dim = 32
some_inner_activation = 'relu'
test_layer = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
key_dim=key_dim)
q_tensor = tf.zeros([2, 4, q_tensor_last_dim], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 32], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
test_layer([q_tensor, kv_tensor, dummy_mask])
self.assertEqual(expected,
test_layer._attention_layer.get_config()['key_dim'])
@parameterized.named_parameters(
('output_last_dim_is_none_use_query_residual_false', False, None, 128,
128),
('output_last_dim_is_none_use_query_residual_true', True, None, 128, 128),
('output_last_dim_is_not_none', False, 30, 128, 30))
def test_output_last_dim(self, use_query_residual, output_last_dim,
q_tensor_last_dim, expected):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
test_layer = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
# Must be false for multi-head output to be different from
# first input's last dim
use_query_residual=use_query_residual,
output_last_dim=output_last_dim)
q_tensor = tf.zeros([2, 4, q_tensor_last_dim], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 32], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
output = test_layer([q_tensor, kv_tensor, dummy_mask])
self.assertEqual(output.numpy().shape[-1], expected)
@parameterized.named_parameters(('value_dim_is_none', None, 128, 2, 128 // 2),
('value_dim_is_not_none', 30, 128, 2, 30))
def test_value_dim(self, value_dim, q_tensor_last_dim,
some_num_attention_heads, expected):
some_inner_dim = 32
some_inner_activation = 'relu'
test_layer = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
value_dim=value_dim)
q_tensor = tf.zeros([2, 4, q_tensor_last_dim], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 32], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
test_layer([q_tensor, kv_tensor, dummy_mask])
self.assertEqual(expected,
test_layer._attention_layer.get_config()['value_dim'])
class TransformerArgumentTest(tf.test.TestCase, parameterized.TestCase):
def test_use_bias_norm_first(self):
num_attention_heads = 2
hidden_size = 16
encoder_block = TransformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=32,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
attention_initializer=tf.keras.initializers.RandomUniform(
minval=0., maxval=1.))
# Forward path.
dummy_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 4], dtype=tf.float32)
inputs = [dummy_tensor, dummy_mask]
output = encoder_block(inputs)
self.assertEqual(output.shape, (2, 4, hidden_size))
def test_norm_first_false_and_diff_q_kv_att_layer_norm_true_raises(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
with self.assertRaises(ValueError):
TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
norm_first=False,
diff_q_kv_att_layer_norm=True)
def test_diff_q_kv_att_layer_norm_is_part_of_config_1(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
norm_first=False)
self.assertIn('diff_q_kv_att_layer_norm', encoder.get_config())
self.assertFalse(encoder.get_config()['diff_q_kv_att_layer_norm'])
def test_diff_q_kv_att_layer_norm_is_part_of_config_2(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
norm_first=True,
diff_q_kv_att_layer_norm=True)
self.assertIn('diff_q_kv_att_layer_norm', encoder.get_config())
self.assertTrue(encoder.get_config()['diff_q_kv_att_layer_norm'])
def test_use_query_residual_is_part_of_config_1(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation)
self.assertIn('use_query_residual', encoder.get_config())
self.assertTrue(encoder.get_config()['use_query_residual'])
def test_use_query_residual_is_part_of_config_2(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
use_query_residual=False)
self.assertIn('use_query_residual', encoder.get_config())
self.assertFalse(encoder.get_config()['use_query_residual'])
def test_key_dim_is_part_of_config_1(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation)
self.assertIn('key_dim', encoder.get_config())
self.assertIsNone(encoder.get_config()['key_dim'])
def test_key_dim_is_part_of_config_2(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
key_dim = 10
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
key_dim=key_dim)
self.assertIn('key_dim', encoder.get_config())
self.assertEqual(key_dim, encoder.get_config()['key_dim'])
def test_value_dim_is_part_of_config_1(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation)
self.assertIn('value_dim', encoder.get_config())
self.assertIsNone(encoder.get_config()['value_dim'])
def test_value_dim_is_part_of_config_2(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
value_dim = 10
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
value_dim=value_dim)
self.assertIn('value_dim', encoder.get_config())
self.assertEqual(value_dim, encoder.get_config()['value_dim'])
def test_output_last_dim_is_part_of_config_1(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation)
self.assertIn('output_last_dim', encoder.get_config())
self.assertIsNone(encoder.get_config()['output_last_dim'])
def test_output_last_dim_is_part_of_config_2(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
output_last_dim = 10
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
output_last_dim=output_last_dim)
self.assertIn('output_last_dim', encoder.get_config())
self.assertEqual(output_last_dim, encoder.get_config()['output_last_dim'])
def test_get_config(self):
num_attention_heads = 2
encoder_block = TransformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=32,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
attention_initializer=tf.keras.initializers.RandomUniform(
minval=0., maxval=1.),
use_query_residual=False,
key_dim=20,
value_dim=30,
output_last_dim=40,
diff_q_kv_att_layer_norm=True)
encoder_block_config = encoder_block.get_config()
new_encoder_block = TransformerEncoderBlock.from_config(
encoder_block_config)
self.assertEqual(encoder_block_config, new_encoder_block.get_config())
@parameterized.parameters({'attention_axes': None}, {'attention_axes': [1]},
{'attention_axes': [2]}, {'attention_axes': [1, 2]})
def test_several_attention_axes(self, attention_axes):
test_layer = TransformerEncoderBlock(
inner_dim=32,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
num_attention_heads=10,
attention_axes=attention_axes)
num_rows = 21
num_cols = 13
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(num_rows, num_cols, width))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
@parameterized.parameters(
{
'output_dropout': 0.1,
'attention_dropout': 0.2,
'inner_dropout': 0.3
}, {
'output_dropout': 0.0,
'attention_dropout': 0.2,
'inner_dropout': 0.3
}, {
'output_dropout': 0.1,
'attention_dropout': 0.0,
'inner_dropout': 0.3
}, {
'output_dropout': 0.1,
'attention_dropout': 0.2,
'inner_dropout': 0.0
})
def test_dropout_config(self, output_dropout, attention_dropout,
inner_dropout):
test_layer = TransformerEncoderBlock(
num_attention_heads=2,
inner_dim=32,
inner_activation='relu',
output_dropout=output_dropout,
attention_dropout=attention_dropout,
inner_dropout=inner_dropout)
seq_len = 21
hidden_size = 512
input_tensor = tf.keras.Input(shape=(seq_len, hidden_size))
_ = test_layer(input_tensor)
true_output_dropout = test_layer._output_dropout.get_config()['rate']
true_attention_dropout = test_layer._attention_dropout.get_config()['rate']
true_inner_dropout = test_layer._inner_dropout_layer.get_config()['rate']
self.assertEqual(true_output_dropout, output_dropout)
self.assertEqual(true_attention_dropout, attention_dropout)
self.assertEqual(true_inner_dropout, inner_dropout)
@parameterized.named_parameters(
(
'return_attention_scores_is_false',
False,
),
(
'return_attention_scores_is_true',
True,
),
)
def test_return_attention_scores(self, return_attention_scores):
num_attention_heads = 7
sequence_length = 21
width = 80
test_layer = TransformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=2048,
inner_activation='relu',
return_attention_scores=return_attention_scores)
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
expected_layer_output_shape = [None, sequence_length, width]
expected_attention_scores_shape = [
None, num_attention_heads, sequence_length, sequence_length
]
if return_attention_scores:
self.assertIsInstance(output_tensor, tuple)
self.assertLen(output_tensor, 2)
# First is the standard output.
self.assertEqual(output_tensor[0].shape.as_list(),
expected_layer_output_shape)
# Second is the attention scores.
self.assertEqual(output_tensor[1].shape.as_list(),
expected_attention_scores_shape)
else:
# Only the standard layer output.
self.assertEqual(output_tensor.shape.as_list(),
expected_layer_output_shape)
if __name__ == '__main__':
tf.test.main()
| 27,534 | 38.561782 | 90 | py |
models | models-master/official/nlp/modeling/layers/moe.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mixture of Experts layers and their routing mechanisms."""
import dataclasses
from typing import Callable, Optional, Tuple
import tensorflow as tf
from official.modeling import tf_utils
_InitializerType = tf.keras.initializers.Initializer
_DEFAULT_KERNEL_INITIALIZER = tf.keras.initializers.TruncatedNormal(stddev=2e-2)
_DEFAULT_BIAS_INITIALIZER = tf.keras.initializers.Zeros()
################## Routers (gating functions) ##################
def _router_z_loss(router_logits: tf.Tensor) -> float:
"""Computes router z-loss.
The router z-loss was introduced in Designing Effective Sparse Expert Models
(https://arxiv.org/abs/2202.08906). It encourages router logits to remain
small in an effort to improve stability.
Args:
router_logits: <float32>[num_groups, tokens_per_group, num_experts] router
logits.
Returns:
Scalar router z-loss <float32>.
"""
num_groups = tf.shape(router_logits)[0]
tokens_per_group = router_logits.shape[1]
log_z = tf.math.reduce_logsumexp(router_logits, axis=-1)
z_loss = log_z**2
return tf.math.reduce_sum(z_loss) / tf.cast(
num_groups * tokens_per_group, tf.float32)
@dataclasses.dataclass
class RouterMask:
"""Dispatch and combine arrays for expert routing with masked matmuls.
Attributes:
dispatch_mask:
<float>[num_groups, tokens_per_group, num_experts, expert_capacity]
dispatch array that is 1 if the token gets routed to the
corresponding expert, and 0 otherwise.
combine_array:
<float>[num_groups, tokens_per_group, num_experts, expert_capacity]
combine array used for combining expert outputs and
scaling with router probability.
"""
dispatch_mask: tf.Tensor
combine_array: tf.Tensor
RouterOutput = RouterMask
class Router(tf.keras.layers.Layer):
"""Abstract base router class, defining router API and inner workings.
Computations are performed in float32 for stability, and returned after
conversion according to the precision policy. See the discussion of
"selective precision" in https://arxiv.org/abs/2101.03961.
Uses Keras add_loss() and add_metric() APIs.
Attributes:
num_experts: Number of experts, used to check consistency with
FeedForwardExperts.
jitter_noise: Amplitude of jitter noise applied to router logits.
router_weights: Dense layer that computes logits for all tokens, which are
then used as expert or token weights.
"""
def __init__(
self,
num_experts: int,
*,
jitter_noise: float = 0.0,
use_bias: bool = True,
kernel_initializer: _InitializerType = _DEFAULT_KERNEL_INITIALIZER,
bias_initializer: _InitializerType = _DEFAULT_BIAS_INITIALIZER,
router_z_loss_weight: float = 0.0,
export_metrics: bool = True,
name: str = "router",
**kwargs):
"""Init.
Args:
num_experts: Number of experts.
jitter_noise: Amplitude of jitter noise applied to router logits.
use_bias: Whether or not to use the bias term in computing the router
weights.
kernel_initializer: Kernel initializer for router weights.
bias_initializer: Bias initializer for router weights.
router_z_loss_weight: Weight for router_z_loss. Use non-zero values if
running into training instability (esp. with dtype 'bfloat16' or lower).
export_metrics: Whether to export metrics using Keras add_metric API.
name: Layer name.
**kwargs: Forwarded to super.
"""
super().__init__(name=name, **kwargs)
self.num_experts = num_experts # Used to check consistency with
# FeedForwardExperts.
self.jitter_noise = jitter_noise
self.router_z_loss_weight = router_z_loss_weight
self._export_metrics = export_metrics
self.router_weights = tf.keras.layers.Dense(
num_experts,
use_bias=use_bias,
kernel_initializer=tf_utils.clone_initializer(kernel_initializer),
bias_initializer=tf_utils.clone_initializer(bias_initializer),
name="router_weights",
dtype=tf.float32)
def call(self,
inputs: tf.Tensor,
*,
expert_capacity: int,
training: Optional[bool] = None) -> RouterOutput:
"""Computes dispatch and combine arrays for routing to experts.
Args:
inputs: Inputs to send to experts of shape
<float>[num_groups, tokens_per_group, hidden_dim].
expert_capacity: Each group will send this many tokens to each expert.
training: If true, apply jitter noise during routing. If not provided
taken from tf.keras.backend.
Returns:
Router indices or mask arrays (depending on router type).
"""
if training is None:
training = tf.keras.backend.learning_phase()
# inputs shape <float>[num_groups, tokens_per_group, hidden_dim]
router_probs, router_logits = self._compute_router_probabilities(
inputs, apply_jitter=training)
# router_probs <float32>[num_groups, tokens_per_group, num_experts]
# router_logits <float>[num_groups, tokens_per_group, num_experts]
unscaled_router_z_loss = _router_z_loss(router_logits)
router_z_loss = self.router_z_loss_weight * unscaled_router_z_loss
self.add_loss(router_z_loss)
if self._export_metrics:
self.add_metric(unscaled_router_z_loss, name="unscaled_router_z_loss")
self.add_metric(router_z_loss, name="router_z_loss")
routing_instructions = self._compute_routing_instructions(
router_probs, expert_capacity)
return routing_instructions
def _compute_router_probabilities(
self, inputs: tf.Tensor,
apply_jitter: bool) -> Tuple[tf.Tensor, tf.Tensor]:
"""Computes router probabilities from input tokens.
Args:
inputs: Inputs from which router probabilities are computed, shape
<float>[num_groups, tokens_per_group, hidden_dim].
apply_jitter: If true, apply jitter noise.
Returns:
- <float32>[num_groups, tokens_per_group, num_experts] probabilities for
each token and expert. Used for routing tokens to experts.
- <float32>[num_groups, tokens_per_group, num_experts] raw router logits.
Used for computing router z-loss.
"""
if apply_jitter and self.jitter_noise > 0:
inputs *= tf.random.uniform(
tf.shape(inputs),
minval=1.0 - self.jitter_noise,
maxval=1.0 + self.jitter_noise,
dtype=inputs.dtype)
# inputs <float>, router_logits <float32>
router_logits = self.router_weights(inputs)
router_probs = tf.keras.activations.softmax(router_logits, axis=-1)
return router_probs, router_logits
def _compute_routing_instructions(self, router_probs: tf.Tensor,
expert_capacity: int) -> RouterOutput:
"""Computes instructions for routing inputs to experts."""
raise NotImplementedError(
"Router is an abstract class that should be subclassed.")
class MaskedRouter(Router):
"""Abstract base router class for masked matmul dispatch routers.
MaskedRouter(s) return RouterMask(s) containing a dispatch mask and combine
array for sending and receiving (via masked matmuls) inputs and outputs to and
from experts.
Routing using masked matmuls is generally faster than scatter-based routing on
TPUs.
Uses Keras add_loss() and add_metric() APIs.
"""
def _compute_routing_instructions(self, router_probs: tf.Tensor,
expert_capacity: int) -> RouterMask:
"""Computes masks for the top-k experts per token.
Args:
router_probs: <float32>[num_groups, tokens_per_group, num_experts]
probabilities used to determine the routing of tokens to the experts.
expert_capacity: Each group will send this many tokens to each expert.
Returns:
Router mask arrays.
"""
raise NotImplementedError(
"MaskedRouter is an abstract class that should be subclassed.")
class ExpertsChooseMaskedRouter(MaskedRouter):
"""Masked matmul router using experts choose tokens assignment.
This router uses the same mechanism as in Mixture-of-Experts with Expert
Choice (https://arxiv.org/abs/2202.09368): each expert selects its top
expert_capacity tokens. An individual token may be processed by multiple
experts or none at all.
Note: "experts choose routing" should not be used in decoder blocks because it
breaks the autoregressive behavior, leading to a mismatch between training
(teacher forcing) and inference (autoregressive decoding).
Uses Keras add_loss() and add_metric() APIs.
"""
def _compute_routing_instructions(self, router_probs: tf.Tensor,
expert_capacity: int) -> RouterMask:
"""Computes masks for the highest probability token per expert.
Args:
router_probs: <float32>[num_groups, tokens_per_group, num_experts]
probabilities used to determine the routing of tokens to the experts.
expert_capacity: Each group will send this many tokens to each expert.
Returns:
Dispatch and combine arrays for routing with masked matmuls.
"""
num_groups = tf.shape(router_probs)[0]
tokens_per_group = router_probs.shape[1]
router_probs_t = tf.transpose(router_probs, perm=[0, 2, 1])
# router_probs_t: <float32>[num_groups, num_experts, tokens_per_group]
# Top expert_capacity router probability and corresponding token indices for
# each expert.
# Shapes [num_groups, num_experts, expert_capacity]
_, expert_index = tf.math.top_k(
router_probs_t, k=expert_capacity, sorted=False)
# Convert to one-hot mask of expert indices for each token in each group.
# Shape: [num_groups, tokens_per_group, num_experts, expert_capacity].
dispatch_mask = tf.one_hot(
expert_index, tokens_per_group, axis=1, dtype=router_probs.dtype)
# The combine array will be used for combining expert outputs, scaled by the
# router probabilities.
# Shape: [num_groups, num_experts, tokens_per_group, expert_capacity]
combine_array = tf.expand_dims(router_probs, axis=3) * dispatch_mask
# Add load balancing loss.
# Each expert is choosing tokens until it reaches full capacity, so we don't
# need an auxiliary loading balancing loss for expert choice routing.
if self._export_metrics:
self.add_metric(0.0, name="load_balancing_loss")
# Gather expert metrics.
# Number of tokens that were dispatched to at least one expert.
num_tokens = num_groups * tokens_per_group
num_tokens_dispatched_somewhere = tf.math.reduce_sum(tf.math.reduce_max(
dispatch_mask, axis=(-1, -2)))
fraction_tokens_left_behind = 1.0 - tf.cast(
num_tokens_dispatched_somewhere, tf.float32) / tf.cast(
num_tokens, tf.float32)
# Total number of tokens that were dispatched (one token could be
# dispatched to multiple experts).
num_tokens_dispatched = tf.math.reduce_sum(dispatch_mask)
# Of the tokens dispatched, how confident was the router in its routing?
router_confidence = tf.math.reduce_sum(
combine_array) / num_tokens_dispatched
expert_usage = 1.0 # Experts fully utilized when "expert choose tokens"
self.add_metric(fraction_tokens_left_behind,
name="fraction_tokens_left_behind")
self.add_metric(router_confidence, name="router_confidence")
self.add_metric(expert_usage, name="expert_usage")
# Return to default dtype now that router computation is complete.
dispatch_mask = tf.cast(dispatch_mask, self.compute_dtype)
combine_array = tf.cast(combine_array, self.compute_dtype)
output = RouterMask(dispatch_mask, combine_array)
return output
################## Model layers ##################
class FeedForward(tf.keras.layers.Layer):
"""Feed-forward layer - position independent, dense, nonlinear transformation.
Typically used in an MLP Transformer block.
"""
def __init__(
self,
d_ff: int,
*,
inner_dropout: float = 0.0,
output_dropout: float = 0.0,
activation: Callable[[tf.Tensor], tf.Tensor] = tf.keras.activations.gelu,
kernel_initializer: _InitializerType = _DEFAULT_KERNEL_INITIALIZER,
bias_initializer: _InitializerType = _DEFAULT_BIAS_INITIALIZER,
name: str = "feed_forward",
**kwargs):
"""Initializes layer.
Args:
d_ff: Dimension of feed-forward layer.
inner_dropout: The dropout probability to be applied after intermediate
activations.
output_dropout: The dropout probability to be applied after output layer.
activation: (Nonlinear) transform applied in layer.
kernel_initializer: Initialization scheme for kernel.
bias_initializer: Initialization scheme for bias.
name: Layer name.
**kwargs: Forwarded to super.
"""
super().__init__(name=name, **kwargs)
self.activation = activation
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.intermediate_layer = tf.keras.layers.Dense(
d_ff,
kernel_initializer=tf_utils.clone_initializer(self.kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self.bias_initializer),
name="intermediate")
self.inner_dropout_layer = tf.keras.layers.Dropout(
inner_dropout)
self.output_dropout_layer = tf.keras.layers.Dropout(output_dropout)
def build(self, input_shape: Tuple[int, int, int]):
"""Creates the input shape dependent output weight variables."""
self.output_layer = tf.keras.layers.Dense(
input_shape[-1],
kernel_initializer=tf_utils.clone_initializer(self.kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self.bias_initializer),
name="output")
def call(self,
inputs: tf.Tensor,
*,
training: Optional[bool] = None) -> tf.Tensor:
"""Applies layer to inputs.
Args:
inputs: Batch of input embeddings, of shape
<float>[batch_size, seq_len, hidden_dim].
training: Only apply dropout during training.
Returns:
Transformed inputs with the same shape as inputs
<float>[batch_size, seq_len, hidden_dim].
"""
x = self.intermediate_layer(inputs)
x = self.activation(x)
x = self.inner_dropout_layer(x, training=training)
x = self.output_layer(x)
x = self.output_dropout_layer(x, training=training)
return x
class FeedForwardExperts(tf.keras.layers.Layer):
"""Feed-forward layer with multiple experts.
Note that call() takes inputs with shape
[num_groups, num_experts, expert_capacity, hidden_dim]
which is different from the usual [batch_size, seq_len, hidden_dim] used by
the FeedForward layer.
The experts are independent FeedForward layers of the
same shape, i.e. the kernel doesn't have shape [hidden_dim, out_dim], but
[num_experts, hidden_dim, out_dim].
"""
def __init__(
self,
num_experts: int,
d_ff: int,
*,
inner_dropout: float = 0.0,
output_dropout: float = 0.0,
activation: Callable[[tf.Tensor], tf.Tensor] = tf.keras.activations.gelu,
kernel_initializer: _InitializerType = _DEFAULT_KERNEL_INITIALIZER,
bias_initializer: _InitializerType = _DEFAULT_BIAS_INITIALIZER,
name: str = "experts",
**kwargs):
"""Initializes layer.
Args:
num_experts: Number of experts (i.e. number of independent feed-forward
blocks).
d_ff: Dimension of feed-forward layer of each expert.
inner_dropout: The dropout probability to be applied after intermediate
activations.
output_dropout: The dropout probability to be applied after output layer.
activation: (Nonlinear) transform applied in layer.
kernel_initializer: Initialization scheme for kernel.
bias_initializer: Initialization scheme for bias.
name: Layer name.
**kwargs: Forwarded to super.
"""
super().__init__(name=name, **kwargs)
self.num_experts = num_experts
self.activation = activation
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.intermediate_layer = tf.keras.layers.EinsumDense(
"gech,ehf->gecf",
output_shape=(self.num_experts, None, d_ff),
bias_axes="ef",
kernel_initializer=tf_utils.clone_initializer(self.kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self.bias_initializer),
name="intermediate")
self.inner_dropout_layer = tf.keras.layers.Dropout(
inner_dropout)
self.output_dropout_layer = tf.keras.layers.Dropout(output_dropout)
def build(self, input_shape: Tuple[int, int, int, int]):
"""Creates the input shape dependent output weight variables."""
if input_shape[1] != self.num_experts:
raise ValueError(
f"Input shape {input_shape} is inconsistent with num_experts "
f"{self.num_experts}.")
self.output_layer = tf.keras.layers.EinsumDense(
"gecf,efh->gech",
output_shape=(self.num_experts, None, input_shape[-1]),
bias_axes="eh",
kernel_initializer=tf_utils.clone_initializer(self.kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self.bias_initializer),
name="output")
def call(self,
inputs: tf.Tensor,
*,
training: Optional[bool] = None) -> tf.Tensor:
"""Applies layer to inputs.
Args:
inputs: Inputs of shape
<float>[num_groups, num_experts, expert_capacity, hidden_dim].
training: Only apply dropout during training.
Returns:
Transformed inputs with the same shape as inputs
<float>[num_groups, num_experts, expert_capacity, hidden_dim].
"""
x = self.intermediate_layer(inputs)
x = self.activation(x)
x = self.inner_dropout_layer(x, training=training)
x = self.output_layer(x)
x = self.output_dropout_layer(x, training=training)
return x
class MoeLayer(tf.keras.layers.Layer):
"""Sparse MoE layer with per-token routing.
In this TF implementation, all experts need to fit onto a single device
allowing for batch parallelism only.
Uses Keras add_loss() and add_metric() APIs.
Attributes:
num_experts: Number of experts (i.e. number of independent feed-forward
blocks).
"""
def __init__(
self,
experts: FeedForwardExperts,
router: MaskedRouter,
*,
train_capacity_factor: float = 1.0,
eval_capacity_factor: float = 1.0,
examples_per_group: float = 1.0,
name: str = "moe",
**kwargs):
"""Init.
Args:
experts: Instance of FeedForwardExperts. Needs to have the same
num_experts as the router.
router: Instance of MaskedRouter to route the tokens to
the different experts.
train_capacity_factor: Scaling factor to increase the expert token
capacity during training. This factor plays an analogous, but slightly
different, role depending on the routing assignment algorithm:
- For "tokens choose" routing, the capacity factor only affects the
maximum number of tokens that an expert will process. It does not
affect how many experts a given token is routed to; see the
num_selected_experts attributes of "tokens choose" routers.
- For "experts choose" routing, because experts always fill their
buffer, increasing the capacity factor will increase the number of
tokens that an expert will process AND will indirectly increase the
number of experts that a given token is routed to.
eval_capacity_factor: As above, but used during evaluation.
examples_per_group: Number of examples to form a group. Router then
performs top_k token selection for each expert on a per group basis.
E.g. when `examples_per_group=4.0`, tokens are assigned to experts in
groups formed from 4 examples. When `examples_per_group=0.5`,
each example is split into 2 groups.
`examples_per_group` must divide the local batch size.
A larger group size will result in slower but more accurate top-k and
sorting computations, whereas a smaller group size will result in faster
but more approximate (and potentially less stable) routing choices.
In practice, we find that imperfect routing choices are tolerable and
recommend choosing a group size on the order of 4096 tokens, although
this number will vary based on model configuration and size.
name: Layer name.
**kwargs: Forwarded to super.
"""
super().__init__(name=name, **kwargs)
self._experts = experts
self._router = router
self.num_experts = experts.num_experts
assert experts.num_experts == router.num_experts
self._train_capacity_factor = train_capacity_factor
self._eval_capacity_factor = eval_capacity_factor
self._examples_per_group = examples_per_group
def call(self,
inputs: tf.Tensor,
*,
training: Optional[bool] = None) -> tf.Tensor:
"""Applies MoeLayer.
Args:
inputs: Batch of input embeddings of shape
<float>[batch_size, seq_length, hidden_dim].
training: Only apply dropout and jitter noise during training. If not
provided taken from tf.keras.backend.
Returns:
Transformed inputs with same shape as inputs:
<float>[batch_size, seq_length, hidden_dim].
Raises:
ValueError if we cannot find a group_size satisfying given requirements.
"""
if training is None:
training = tf.keras.backend.learning_phase()
# inputs shape [batch_size, seq_length, hidden_dim]
batch_size, seq_length, hidden_dim = inputs.shape
if batch_size is not None:
if self._examples_per_group > batch_size:
raise ValueError(
f"examples_per_group={self._examples_per_group} is larger than the "
"number of examples available in the local (per-device) batch_size="
f"{batch_size}. Either decrease examples_per_group or increase the "
"batch_size.")
tokens_per_group = int(seq_length * self._examples_per_group)
if training:
capacity_factor = self._train_capacity_factor
else:
capacity_factor = self._eval_capacity_factor
# Each group will send expert_capacity tokens to each expert.
expert_capacity = int(
round(capacity_factor * tokens_per_group / self.num_experts))
# Reshape batch and sequence/token dimensions for expert routing.
x = tf.reshape(inputs, (-1, tokens_per_group, hidden_dim))
x = self._mask_and_dispatch_to_experts(x, expert_capacity, training)
# Return to original input shape.
x = tf.reshape(x, (-1, seq_length, hidden_dim))
return x
def _mask_and_dispatch_to_experts(self, inputs: tf.Tensor,
expert_capacity: int,
training: bool) -> tf.Tensor:
"""Wraps expert masked routing and dispatching algorithm.
This algorithm takes the following steps:
(1) Compute dispatch mask and combine array using self._router.
(2) Dispatch inputs to experts based on dispatch mask.
(3) Recombine individual expert outputs using combine array.
Args:
inputs: <float>[num_groups, tokens_per_group, hidden_dim] inputs to
send to experts.
expert_capacity: Each group will send this many tokens to each expert.
training: If true, apply jitter noise during routing and dropout
during expert computation.
Returns:
<float>[num_groups, num_tokens_per_group, hidden_dim] outputs from
experts.
"""
# Shape [num_groups, tokens_per_group, num_experts, expert_capacity]
router_mask = self._router(
inputs,
expert_capacity=expert_capacity,
training=training)
# Shape [num_groups, num_experts, expert_capacity, hidden_dim]
expert_inputs = tf.einsum(
"gtec,gth->gech",
router_mask.dispatch_mask,
inputs)
expert_outputs = self._experts(expert_inputs, training=training)
# Shape [num_groups, tokens_per_group, hidden_dim]
combined_outputs = tf.einsum(
"gtec,gech->gth",
router_mask.combine_array,
expert_outputs)
return combined_outputs
class MoeLayerWithBackbone(tf.keras.layers.Layer):
"""Sparse MoE layer plus a FeedForward layer evaluated for all tokens.
Uses Keras add_loss() and add_metric() APIs.
"""
def __init__(
self,
moe: MoeLayer,
backbone_d_ff: int,
*,
inner_dropout: float = 0.0,
output_dropout: float = 0.0,
activation: Callable[[tf.Tensor],
tf.Tensor] = tf.keras.activations.gelu,
kernel_initializer: _InitializerType = _DEFAULT_KERNEL_INITIALIZER,
bias_initializer: _InitializerType = _DEFAULT_BIAS_INITIALIZER,
name: str = "moe_with_backbone",
**kwargs):
"""Init.
Args:
moe: Instance of MoeLayer with experts and router.
backbone_d_ff: Dimension of feed-forward layer of a lightweight backbone,
which is evaluated for all tokens.
inner_dropout: The dropout probability to be applied after intermediate
activations for the backbone.
output_dropout: The dropout probability to be applied after the output
of the backbone.
activation: (Nonlinear) transform applied in the backbone.
kernel_initializer: Initialization scheme for kernels in the backbone.
bias_initializer: Initialization scheme for biases in the backbone.
name: Layer name.
**kwargs: Forwarded to super.
"""
super().__init__(name=name, **kwargs)
self._moe = moe
self._backbone = FeedForward(
backbone_d_ff,
inner_dropout=inner_dropout,
output_dropout=output_dropout,
activation=activation,
kernel_initializer=tf_utils.clone_initializer(kernel_initializer),
bias_initializer=tf_utils.clone_initializer(bias_initializer),
name="backbone")
def call(self,
inputs: tf.Tensor,
*,
training: Optional[bool] = None) -> tf.Tensor:
"""Applies MoeLayerWithBackbone layer.
Args:
inputs: Batch of input embeddings of shape
<float>[batch_size, seq_length, hidden_dim].
training: Only apply dropout and jitter noise during training. If not
provided taken from tf.keras.backend.
Returns:
Transformed inputs with same shape as inputs:
<float>[batch_size, seq_length, hidden_dim].
"""
return self._backbone(
inputs, training=training) + self._moe(
inputs, training=training)
| 27,430 | 36.993075 | 80 | py |
models | models-master/official/nlp/modeling/layers/transformer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based transformer block layer."""
# pylint: disable=g-classes-have-attributes
from absl import logging
import gin
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling.layers import attention
from official.nlp.modeling.layers import multi_channel_attention
from official.nlp.modeling.layers import transformer_encoder_block
from official.nlp.modeling.layers.util import tf_function_if_eager
@tf.keras.utils.register_keras_serializable(package="Text")
class Transformer(transformer_encoder_block.TransformerEncoderBlock):
"""Transformer layer.
This layer implements the Transformer from "Attention Is All You Need".
(https://arxiv.org/abs/1706.03762).
**Warning: this layer is deprecated. Please don't use it. Use the
`TransformerEncoderBlock` layer instead.**
Args:
num_attention_heads: Number of attention heads.
intermediate_size: Size of the intermediate layer.
intermediate_activation: Activation for the intermediate layer.
dropout_rate: Dropout probability for the post-attention and output dropout.
attention_dropout_rate: Dropout probability for within the attention layer.
output_range: the sequence output range, [0, output_range) by slicing the
target sequence. `None` means the target sequence is not sliced.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
use_bias: Whether to enable use_bias in attention layer. If set False,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate dense
layers. If set False, output of attention and intermediate dense layers is
normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
intermediate_dropout: Dropout probability for intermediate_dropout_layer.
attention_initializer: Initializer for kernels of attention layers. If set
`None`, attention layers use kernel_initializer as initializer for kernel.
"""
def __init__(self,
num_attention_heads,
intermediate_size,
intermediate_activation,
dropout_rate=0.0,
attention_dropout_rate=0.0,
output_range=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_bias=True,
norm_first=False,
norm_epsilon=1e-12,
intermediate_dropout=0.0,
attention_initializer=None,
**kwargs):
super().__init__(
num_attention_heads=num_attention_heads,
inner_dim=intermediate_size,
inner_activation=intermediate_activation,
output_dropout=dropout_rate,
attention_dropout=attention_dropout_rate,
output_range=output_range,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
use_bias=use_bias,
norm_first=norm_first,
norm_epsilon=norm_epsilon,
inner_dropout=intermediate_dropout,
attention_initializer=attention_initializer,
**kwargs)
logging.warning("The `Transformer` layer is deprecated. Please directly "
"use `TransformerEncoderBlock`.")
def get_config(self):
return {
"num_attention_heads": self._num_heads,
"intermediate_size": self._inner_dim,
"intermediate_activation": self._inner_activation,
"dropout_rate": self._output_dropout_rate,
"attention_dropout_rate": self._attention_dropout_rate,
"output_range": self._output_range,
"kernel_initializer": tf_utils.serialize_initializer(
self._kernel_initializer, use_legacy_format=True
),
"bias_initializer": tf_utils.serialize_initializer(
self._bias_initializer, use_legacy_format=True
),
"kernel_regularizer": tf_utils.serialize_regularizer(
self._kernel_regularizer, use_legacy_format=True
),
"bias_regularizer": tf_utils.serialize_regularizer(
self._bias_regularizer, use_legacy_format=True
),
"activity_regularizer": tf_utils.serialize_regularizer(
self._activity_regularizer, use_legacy_format=True
),
"kernel_constraint": tf_utils.serialize_constraint(
self._kernel_constraint, use_legacy_format=True
),
"bias_constraint": tf_utils.serialize_constraint(
self._bias_constraint, use_legacy_format=True
),
"use_bias": self._use_bias,
"norm_first": self._norm_first,
"norm_epsilon": self._norm_epsilon,
"intermediate_dropout": self._inner_dropout,
"attention_initializer": tf_utils.serialize_initializer(
self._attention_initializer, use_legacy_format=True
),
}
@tf.keras.utils.register_keras_serializable(package="Text")
@gin.configurable
class CompiledTransformer(Transformer):
@tf_function_if_eager(experimental_compile=True)
def call(self, inputs):
return super().call(inputs)
@tf.keras.utils.register_keras_serializable(package="Text")
class TransformerDecoderBlock(tf.keras.layers.Layer):
"""Single transformer layer for decoder.
It has three sub-layers:
(1) a multi-head self-attention mechanism.
(2) a encoder-decoder attention.
(3) a positionwise fully connected feed-forward network.
Args:
num_attention_heads: Number of attention heads.
intermediate_size: Size of the intermediate layer.
intermediate_activation: Activation for the intermediate layer.
dropout_rate: Dropout probability for the post-attention and output dropout.
attention_dropout_rate: Dropout probability for within the attention layer.
multi_channel_cross_attention: Whether to use `MultiChannelAttention` for
cross-attention between target sequences and source sequences.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
use_bias: Whether to enable use_bias in attention layer. If set False,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate dense
layers. If set False, output of attention and intermediate dense layers is
normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
intermediate_dropout: Dropout probability for intermediate_dropout_layer.
attention_initializer: Initializer for kernels of attention layers. If set
`None`, attention layers use kernel_initializer as initializer for kernel.
"""
def __init__(self,
num_attention_heads,
intermediate_size,
intermediate_activation,
dropout_rate=0.0,
attention_dropout_rate=0.0,
multi_channel_cross_attention=False,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_bias=True,
norm_first=False,
norm_epsilon=1e-12,
intermediate_dropout=0.0,
attention_initializer=None,
**kwargs):
super().__init__(**kwargs)
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.intermediate_activation = tf.keras.activations.get(
intermediate_activation)
self.dropout_rate = dropout_rate
self.attention_dropout_rate = attention_dropout_rate
self.multi_channel_cross_attention = multi_channel_cross_attention
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._intermediate_dropout = intermediate_dropout
if attention_initializer:
self._attention_initializer = tf.keras.initializers.get(
attention_initializer)
else:
self._attention_initializer = tf_utils.clone_initializer(
self._kernel_initializer)
if self.multi_channel_cross_attention:
self._cross_attention_cls = multi_channel_attention.MultiChannelAttention
else:
self._cross_attention_cls = attention.MultiHeadAttention
def build(self, input_shape):
target_tensor_shape = tf.TensorShape(input_shape[0])
if len(target_tensor_shape.as_list()) != 3:
raise ValueError("TransformerLayer expects a three-dimensional input of "
"shape [batch, sequence, width].")
hidden_size = target_tensor_shape[2]
if hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, self.num_attention_heads))
self.attention_head_size = int(hidden_size) // self.num_attention_heads
common_kwargs = dict(
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
# Self attention.
self.self_attention = attention.CachedAttention(
num_heads=self.num_attention_heads,
key_dim=self.attention_head_size,
dropout=self.attention_dropout_rate,
use_bias=self._use_bias,
kernel_initializer=tf_utils.clone_initializer(
self._attention_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
name="self_attention",
**common_kwargs)
self.self_attention_output_dense = tf.keras.layers.EinsumDense(
"abc,cd->abd",
output_shape=(None, hidden_size),
bias_axes="d",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
name="output",
**common_kwargs)
self.self_attention_dropout = tf.keras.layers.Dropout(
rate=self.dropout_rate)
self.self_attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype="float32"))
# Encoder-decoder attention.
self.encdec_attention = self._cross_attention_cls(
num_heads=self.num_attention_heads,
key_dim=self.attention_head_size,
dropout=self.attention_dropout_rate,
output_shape=hidden_size,
use_bias=self._use_bias,
kernel_initializer=tf_utils.clone_initializer(
self._attention_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
name="attention/encdec",
**common_kwargs)
self.encdec_attention_dropout = tf.keras.layers.Dropout(
rate=self.dropout_rate)
self.encdec_attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="attention/encdec_output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype="float32"))
# Feed-forward projection.
self.intermediate_dense = tf.keras.layers.EinsumDense(
"abc,cd->abd",
output_shape=(None, self.intermediate_size),
bias_axes="d",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
name="intermediate",
**common_kwargs)
self.intermediate_activation_layer = tf.keras.layers.Activation(
self.intermediate_activation)
self._intermediate_dropout_layer = tf.keras.layers.Dropout(
rate=self._intermediate_dropout)
self.output_dense = tf.keras.layers.EinsumDense(
"abc,cd->abd",
output_shape=(None, hidden_size),
bias_axes="d",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
name="output",
**common_kwargs)
self.output_dropout = tf.keras.layers.Dropout(rate=self.dropout_rate)
self.output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype="float32")
super().build(input_shape)
def get_config(self):
config = {
"num_attention_heads": self.num_attention_heads,
"intermediate_size": self.intermediate_size,
"intermediate_activation": self.intermediate_activation,
"dropout_rate": self.dropout_rate,
"attention_dropout_rate": self.attention_dropout_rate,
"multi_channel_cross_attention": self.multi_channel_cross_attention,
"kernel_initializer": tf_utils.serialize_initializer(
self._kernel_initializer, use_legacy_format=True
),
"bias_initializer": tf_utils.serialize_initializer(
self._bias_initializer, use_legacy_format=True
),
"kernel_regularizer": tf_utils.serialize_regularizer(
self._kernel_regularizer, use_legacy_format=True
),
"bias_regularizer": tf_utils.serialize_regularizer(
self._bias_regularizer, use_legacy_format=True
),
"activity_regularizer": tf_utils.serialize_regularizer(
self._activity_regularizer, use_legacy_format=True
),
"kernel_constraint": tf_utils.serialize_constraint(
self._kernel_constraint, use_legacy_format=True
),
"bias_constraint": tf_utils.serialize_constraint(
self._bias_constraint, use_legacy_format=True
),
"use_bias": self._use_bias,
"norm_first": self._norm_first,
"norm_epsilon": self._norm_epsilon,
"intermediate_dropout": self._intermediate_dropout,
"attention_initializer": tf_utils.serialize_initializer(
self._attention_initializer, use_legacy_format=True
),
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def common_layers_with_encoder(self):
"""Gets layer objects that can make a Transformer encoder block."""
return [
self.self_attention, self.self_attention_layer_norm,
self.intermediate_dense, self.output_dense, self.output_layer_norm
]
def call(self, inputs, cache=None, decode_loop_step=None):
if self.multi_channel_cross_attention:
if len(inputs) != 5:
raise ValueError(
"TransformerDecoderBlock must have 5 inputs, when it uses "
"multi_channel_cross_attention. But it got: %d" % len(inputs))
elif len(inputs) != 4:
raise ValueError(
"TransformerDecoderBlock must have 4 inputs, but it got: %d" %
len(inputs))
input_tensor, memory, attention_mask, self_attention_mask = inputs[:4]
source_tensor = input_tensor
if self._norm_first:
input_tensor = self.self_attention_layer_norm(input_tensor)
self_attention_output, cache = self.self_attention(
query=input_tensor,
value=input_tensor,
attention_mask=self_attention_mask,
cache=cache,
decode_loop_step=decode_loop_step)
self_attention_output = self.self_attention_dropout(self_attention_output)
if self._norm_first:
self_attention_output = source_tensor + self_attention_output
else:
self_attention_output = self.self_attention_layer_norm(
input_tensor + self_attention_output)
if self._norm_first:
source_self_attention_output = self_attention_output
self_attention_output = self.encdec_attention_layer_norm(
self_attention_output)
cross_attn_inputs = dict(
query=self_attention_output,
value=memory,
attention_mask=attention_mask)
if self.multi_channel_cross_attention:
# Accesses the 5-th input tensor for the doc-attention probabilities.
cross_attn_inputs["context_attention_weights"] = inputs[-1]
attention_output = self.encdec_attention(**cross_attn_inputs)
attention_output = self.encdec_attention_dropout(attention_output)
if self._norm_first:
attention_output = source_self_attention_output + attention_output
else:
attention_output = self.encdec_attention_layer_norm(
self_attention_output + attention_output)
if self._norm_first:
source_attention_output = attention_output
attention_output = self.output_layer_norm(attention_output)
intermediate_output = self.intermediate_dense(attention_output)
intermediate_output = self.intermediate_activation_layer(
intermediate_output)
intermediate_output = self._intermediate_dropout_layer(intermediate_output)
layer_output = self.output_dense(intermediate_output)
layer_output = self.output_dropout(layer_output)
if self._norm_first:
layer_output = source_attention_output + layer_output
else:
layer_output = self.output_layer_norm(layer_output + attention_output)
return layer_output, cache
| 19,414 | 42.925339 | 80 | py |
models | models-master/official/nlp/modeling/layers/block_diag_feedforward_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-based gated feedforward layer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import block_diag_feedforward
# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It
# guarantees forward compatibility of this code for the V2 switchover.
class BlockDiagFeedforwardTest(tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(BlockDiagFeedforwardTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy("float32")
@parameterized.parameters(
(1, True, "float32"),
(1, True, "mixed_float16"),
(1, False, "float32"),
(1, False, "mixed_float16"),
(2, True, "float32"),
(2, True, "mixed_float16"),
(2, False, "float32"),
(2, False, "mixed_float16"),
)
def test_layer_creation(self, num_blocks, apply_mixing, dtype):
tf.keras.mixed_precision.set_global_policy(dtype)
kwargs = dict(
intermediate_size=128,
intermediate_activation="relu",
dropout=0.1,
num_blocks=num_blocks,
apply_mixing=apply_mixing,
kernel_initializer="glorot_uniform",
bias_initializer="zeros")
test_layer = block_diag_feedforward.BlockDiagFeedforward(**kwargs)
sequence_length = 64
width = 128
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
@parameterized.parameters(
(1, True, "float32"),
(1, True, "mixed_float16"),
(1, False, "float32"),
(1, False, "mixed_float16"),
(2, True, "float32"),
(2, True, "mixed_float16"),
(2, False, "float32"),
(2, False, "mixed_float16"),
)
def test_layer_invocation(self, num_blocks, apply_mixing, dtype):
tf.keras.mixed_precision.set_global_policy(dtype)
kwargs = dict(
intermediate_size=16,
intermediate_activation="relu",
dropout=0.1,
num_blocks=num_blocks,
apply_mixing=apply_mixing,
kernel_initializer="glorot_uniform",
bias_initializer="zeros")
test_layer = block_diag_feedforward.BlockDiagFeedforward(**kwargs)
sequence_length = 16
width = 32
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# Create a model from the test layer.
model = tf.keras.Model(data_tensor, output_tensor)
# Invoke the model on test data.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
output_data = model.predict(input_data)
self.assertEqual(output_data.shape, (batch_size, sequence_length, width))
def test_get_config(self):
kwargs = dict(
intermediate_size=16,
intermediate_activation="relu",
dropout=0.1,
num_blocks=2,
apply_mixing=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros")
test_layer = block_diag_feedforward.BlockDiagFeedforward(**kwargs)
new_layer = block_diag_feedforward.BlockDiagFeedforward.from_config(
test_layer.get_config())
self.assertAllEqual(test_layer.get_config(), new_layer.get_config())
if __name__ == "__main__":
tf.test.main()
| 4,171 | 34.355932 | 80 | py |
models | models-master/official/nlp/modeling/layers/position_embedding_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-based positional embedding layer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import position_embedding
class PositionEmbeddingLayerTest(tf.test.TestCase):
def test_static_layer_output_shape(self):
# Create a 3-dimensional input (the first dimension is implicit).
sequence_length = 21
test_layer = position_embedding.PositionEmbedding(
max_length=sequence_length)
width = 30
input_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(input_tensor)
# When using static positional embedding shapes, the output is expected
# to be the same as the input shape in all dimensions save batch.
expected_output_shape = [None, sequence_length, width]
self.assertEqual(expected_output_shape, output_tensor.shape.as_list())
# The default output dtype for this layer should be tf.float32.
self.assertEqual(tf.float32, output_tensor.dtype)
def test_non_default_axis_static(self):
# Create a 3-dimensional input (the first dimension is implicit).
sequence_length = 21
test_layer = position_embedding.PositionEmbedding(
max_length=sequence_length, seq_axis=2)
width = 30
input_tensor = tf.keras.Input(shape=(width, sequence_length, width))
output_tensor = test_layer(input_tensor)
# When using static positional embedding shapes, the output is expected
# to be the same as the input shape in all dimensions save batch.
expected_output_shape = [None, width, sequence_length, width]
self.assertEqual(expected_output_shape, output_tensor.shape.as_list())
# The default output dtype for this layer should be tf.float32.
self.assertEqual(tf.float32, output_tensor.dtype)
def test_float16_dtype(self):
# Create a 3-dimensional input (the first dimension is implicit).
sequence_length = 21
test_layer = position_embedding.PositionEmbedding(
max_length=sequence_length, dtype="float16")
width = 30
input_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(input_tensor)
# When using static positional embedding shapes, the output is expected
# to be the same as the input shape in all dimensions save batch.
expected_output_shape = [None, sequence_length, width]
self.assertEqual(expected_output_shape, output_tensor.shape.as_list())
# The default output dtype for this layer should be tf.float32.
self.assertEqual(tf.float16, output_tensor.dtype)
def test_dynamic_layer_output_shape(self):
max_sequence_length = 40
test_layer = position_embedding.PositionEmbedding(
max_length=max_sequence_length)
# Create a 3-dimensional input (the first dimension is implicit).
width = 30
input_tensor = tf.keras.Input(shape=(None, width))
output_tensor = test_layer(input_tensor)
# When using dynamic positional embedding shapes, the output is expected
# to be the same as the input shape in all dimensions - but may be None if
# the input shape is None there.
expected_output_shape = [None, None, width]
self.assertEqual(expected_output_shape, output_tensor.shape.as_list())
def test_non_default_axis_dynamic(self):
max_sequence_length = 60
test_layer = position_embedding.PositionEmbedding(
max_length=max_sequence_length, seq_axis=2)
# Create a 3-dimensional input (the first dimension is implicit).
width = 30
input_tensor = tf.keras.Input(shape=(None, None, width))
output_tensor = test_layer(input_tensor)
# When using dynamic positional embedding shapes, the output is expected
# to be the same as the input shape in all dimensions - but may be None if
# the input shape is None there.
expected_output_shape = [None, None, None, width]
self.assertEqual(expected_output_shape, output_tensor.shape.as_list())
def test_dynamic_layer_slicing(self):
max_sequence_length = 40
test_layer = position_embedding.PositionEmbedding(
max_length=max_sequence_length)
# Create a 3-dimensional input (the first dimension is implicit).
width = 30
input_tensor = tf.keras.Input(shape=(None, width))
output_tensor = test_layer(input_tensor)
model = tf.keras.Model(input_tensor, output_tensor)
# Create input data that is shorter than max_sequence_length, which should
# trigger a down-slice.
input_length = 17
# Note: This test explicitly uses a batch size of 1. This is to get around
# Keras' restriction on Model invocations: inputs are expected to have the
# same batch cardinality as outputs. In practice, this layer should be used
# inside a model, where it can be projected when added to another tensor.
input_data = np.ones((1, input_length, width))
output_data = model.predict(input_data)
self.assertAllEqual([1, input_length, width], output_data.shape)
class RelativePositionEmbeddingLayerTest(tf.test.TestCase):
def test_relative_tensor_input(self):
hidden_size = 8
test_layer = position_embedding.RelativePositionEmbedding(
hidden_size=hidden_size)
# create a 3-dimensional input for test_layer to infer length as 1.
input_tensor = tf.constant([[[0] * hidden_size]])
output_tensor = test_layer(input_tensor)
# expected output is the theoretical result of the input based on
# sine cosine relative position embedding formula.
expected_output_tensor = tf.constant([[0, 0, 0, 0, 1, 1, 1, 1]])
self.assertAllEqual(output_tensor, expected_output_tensor)
def test_relative_length_input(self):
hidden_size = 8
# When we do not have tensor as input, we explicitly specify length
# value when initializing test_layer.
test_layer = position_embedding.RelativePositionEmbedding(
hidden_size=hidden_size)
input_tensor = None
output_tensor = test_layer(input_tensor, length=1)
# expected output is the theoretical result of the input based on
# sine cosine relative position embedding formula.
expected_output_tensor = tf.constant([[0, 0, 0, 0, 1, 1, 1, 1]])
self.assertAllEqual(output_tensor, expected_output_tensor)
class RelativePositionBiasTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(("bidirectional", True),
("unidirectional", False))
def test_relative_position_bias(self, bidirectional):
query = tf.zeros((4, 4, 2))
key = tf.zeros((4, 2, 2))
l = position_embedding.RelativePositionBias(
num_heads=3,
bidirectional=bidirectional,
name="foo")
self.assertEqual(l(query, key).shape, (4, 3, 4, 2))
self.assertLen(l.trainable_variables, 1)
self.assertEqual(l.trainable_variables[0].name, "foo/rel_embedding:0")
def test_relative_position_bucket(self):
context_position = tf.range(3)[:, None]
memory_position = tf.range(2)[None, :]
relative_position = memory_position - context_position
outputs = position_embedding._relative_position_bucket(relative_position)
self.assertAllEqual(outputs.numpy(), np.array([[0, 17], [1, 0], [2, 1]]))
outputs = position_embedding._relative_position_bucket(
relative_position, bidirectional=False)
self.assertAllEqual(outputs.numpy(), np.array([[0, 0], [1, 0], [2, 1]]))
if __name__ == "__main__":
tf.test.main()
| 8,009 | 41.606383 | 79 | py |
models | models-master/official/nlp/modeling/layers/reuse_attention_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the attention layer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import reuse_attention as attention
class ReuseMultiHeadAttentionTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("key_value_same_proj", None, None, [40, 80]),
("key_value_different_proj", 32, 60, [40, 60]),
)
def test_non_masked_attention(self, value_dim, output_shape, output_dims):
"""Test that the attention layer can be created without a mask tensor."""
test_layer = attention.ReuseMultiHeadAttention(
num_heads=12,
key_dim=64,
value_dim=value_dim,
output_shape=output_shape)
# Create a 3-dimensional input (the first dimension is implicit).
query = tf.keras.Input(shape=(40, 80))
value = tf.keras.Input(shape=(20, 80))
output = test_layer(query=query, value=value)
self.assertEqual(output.shape.as_list(), [None] + output_dims)
def test_non_masked_self_attention(self):
"""Test with one input (self-attenntion) and no mask tensor."""
test_layer = attention.ReuseMultiHeadAttention(
num_heads=12, key_dim=64)
# Create a 3-dimensional input (the first dimension is implicit).
query = tf.keras.Input(shape=(40, 80))
output = test_layer(query, query)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
def test_attention_scores(self):
"""Test attention outputs with coefficients."""
test_layer = attention.ReuseMultiHeadAttention(
num_heads=12, key_dim=64)
# Create a 3-dimensional input (the first dimension is implicit).
query = tf.keras.Input(shape=(40, 80))
output, coef = test_layer(query, query, return_attention_scores=True)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
self.assertEqual(coef.shape.as_list(), [None, 12, 40, 40])
def test_attention_scores_with_values(self):
"""Test attention outputs with coefficients."""
test_layer = attention.ReuseMultiHeadAttention(
num_heads=12, key_dim=64)
# Create a 3-dimensional input (the first dimension is implicit).
query = tf.keras.Input(shape=(40, 80))
value = tf.keras.Input(shape=(60, 80))
output, coef = test_layer(query, value, return_attention_scores=True)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
self.assertEqual(coef.shape.as_list(), [None, 12, 40, 60])
@parameterized.named_parameters(
("with_bias", True, 0), ("no_bias", False, 0),
("reuse_all_with_bias", True, -1), ("reuse_all_no_bias", False, -1),
("reuse_partial_with_bias", True, 1),
("reuse_partial_no_bias", False, 1))
def test_masked_attention(self, use_bias, reuse_attention):
"""Test with a mask tensor."""
test_layer = attention.ReuseMultiHeadAttention(
num_heads=2, key_dim=2, use_bias=use_bias,
reuse_attention=reuse_attention)
# Create a 3-dimensional input (the first dimension is implicit).
batch_size = 3
query = tf.keras.Input(shape=(4, 8))
value = tf.keras.Input(shape=(2, 8))
mask_tensor = tf.keras.Input(shape=(4, 2))
reuse_attention_scores = tf.keras.Input(shape=(2, 4, 2))
output = test_layer(query=query, value=value, attention_mask=mask_tensor,
reuse_attention_scores=reuse_attention_scores)
# Create a model containing the test layer.
model = tf.keras.Model(
[query, value, mask_tensor, reuse_attention_scores], output)
# Generate data for the input (non-mask) tensors.
from_data = 10 * np.random.random_sample((batch_size, 4, 8))
to_data = 10 * np.random.random_sample((batch_size, 2, 8))
reuse_scores = np.random.random_sample((batch_size, 2, 4, 2))
# Invoke the data with a random set of mask data. This should mask at least
# one element.
mask_data = np.random.randint(2, size=(batch_size, 4, 2))
masked_output_data = model.predict(
[from_data, to_data, mask_data, reuse_scores])
# Invoke the same data, but with a null mask (where no elements are masked).
null_mask_data = np.ones((batch_size, 4, 2))
unmasked_output_data = model.predict(
[from_data, to_data, null_mask_data, reuse_scores])
# Because one data is masked and one is not, the outputs should not be the
# same.
if reuse_attention == -1:
self.assertAllEqual(masked_output_data, unmasked_output_data)
else:
self.assertNotAllClose(masked_output_data, unmasked_output_data)
# Tests the layer with three inputs: Q, K, V.
key = tf.keras.Input(shape=(2, 8))
output = test_layer(query, value=value, key=key, attention_mask=mask_tensor,
reuse_attention_scores=reuse_attention_scores)
model = tf.keras.Model(
[query, value, key, mask_tensor, reuse_attention_scores], output)
masked_output_data = model.predict(
[from_data, to_data, to_data, mask_data, reuse_scores])
unmasked_output_data = model.predict(
[from_data, to_data, to_data, null_mask_data, reuse_scores])
# Because one data is masked and one is not, the outputs should not be the
# same.
if reuse_attention == -1:
self.assertAllEqual(masked_output_data, unmasked_output_data)
else:
self.assertNotAllClose(masked_output_data, unmasked_output_data)
if reuse_attention > 0:
self.assertLen(test_layer._output_dense, 2)
if use_bias:
if reuse_attention == 0:
self.assertLen(test_layer._query_dense.trainable_variables, 2)
self.assertLen(test_layer._output_dense[0].trainable_variables, 2)
if len(test_layer._output_dense) == 2:
self.assertLen(test_layer._output_dense[1].trainable_variables, 1)
else:
if reuse_attention == 0:
self.assertLen(test_layer._query_dense.trainable_variables, 1)
self.assertLen(test_layer._output_dense[0].trainable_variables, 1)
if len(test_layer._output_dense) == 2:
self.assertLen(test_layer._output_dense[1].trainable_variables, 1)
def test_initializer(self):
"""Test with a specified initializer."""
test_layer = attention.ReuseMultiHeadAttention(
num_heads=12,
key_dim=64,
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Create a 3-dimensional input (the first dimension is implicit).
query = tf.keras.Input(shape=(40, 80))
output = test_layer(query, query)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
def test_masked_attention_with_scores(self):
"""Test with a mask tensor."""
test_layer = attention.ReuseMultiHeadAttention(
num_heads=2, key_dim=2)
# Create a 3-dimensional input (the first dimension is implicit).
batch_size = 3
query = tf.keras.Input(shape=(4, 8))
value = tf.keras.Input(shape=(2, 8))
mask_tensor = tf.keras.Input(shape=(4, 2))
output = test_layer(query=query, value=value, attention_mask=mask_tensor)
# Create a model containing the test layer.
model = tf.keras.Model([query, value, mask_tensor], output)
# Generate data for the input (non-mask) tensors.
from_data = 10 * np.random.random_sample((batch_size, 4, 8))
to_data = 10 * np.random.random_sample((batch_size, 2, 8))
# Invoke the data with a random set of mask data. This should mask at least
# one element.
mask_data = np.random.randint(2, size=(batch_size, 4, 2))
masked_output_data = model.predict([from_data, to_data, mask_data])
# Invoke the same data, but with a null mask (where no elements are masked).
null_mask_data = np.ones((batch_size, 4, 2))
unmasked_output_data = model.predict([from_data, to_data, null_mask_data])
# Because one data is masked and one is not, the outputs should not be the
# same.
self.assertNotAllClose(masked_output_data, unmasked_output_data)
# Create a model containing attention scores.
output, scores = test_layer(
query=query, value=value, attention_mask=mask_tensor,
return_attention_scores=True)
model = tf.keras.Model([query, value, mask_tensor], [output, scores])
masked_output_data_score, masked_score = model.predict(
[from_data, to_data, mask_data])
unmasked_output_data_score, unmasked_score = model.predict(
[from_data, to_data, null_mask_data])
self.assertNotAllClose(masked_output_data_score, unmasked_output_data_score)
self.assertAllClose(masked_output_data, masked_output_data_score)
self.assertAllClose(unmasked_output_data, unmasked_output_data_score)
self.assertNotAllClose(masked_score, unmasked_score)
@parameterized.named_parameters(
("4d_inputs_1freebatch_mask2", [3, 4], [3, 2], [4, 2],
(2,)), ("4d_inputs_1freebatch_mask3", [3, 4], [3, 2], [3, 4, 2], (2,)),
("4d_inputs_1freebatch_mask4", [3, 4], [3, 2], [3, 2, 4, 2],
(2,)), ("4D_inputs_2D_attention", [3, 4], [3, 2], [3, 4, 3, 2], (1, 2)),
("5D_inputs_2D_attention", [5, 3, 4], [5, 3, 2], [3, 4, 3, 2], (2, 3)),
("5D_inputs_2D_attention_fullmask", [5, 3, 4], [5, 3, 2], [5, 3, 4, 3, 2],
(2, 3)))
def test_high_dim_attention(self, q_dims, v_dims, mask_dims, attention_axes):
"""Test with a mask tensor."""
test_layer = attention.ReuseMultiHeadAttention(
num_heads=2, key_dim=2, attention_axes=attention_axes)
batch_size, hidden_size = 3, 8
# Generate data for the input (non-mask) tensors.
query_shape = [batch_size] + q_dims + [hidden_size]
value_shape = [batch_size] + v_dims + [hidden_size]
mask_shape = [batch_size] + mask_dims
query = 10 * np.random.random_sample(query_shape)
value = 10 * np.random.random_sample(value_shape)
# Invoke the data with a random set of mask data. This should mask at least
# one element.
mask_data = np.random.randint(2, size=mask_shape).astype("bool")
# Invoke the same data, but with a null mask (where no elements are masked).
null_mask_data = np.ones(mask_shape)
# Because one data is masked and one is not, the outputs should not be the
# same.
query_tensor = tf.keras.Input(query_shape[1:], name="query")
value_tensor = tf.keras.Input(value_shape[1:], name="value")
mask_tensor = tf.keras.Input(mask_shape[1:], name="mask")
output = test_layer(query=query_tensor, value=value_tensor,
attention_mask=mask_tensor)
model = tf.keras.Model([query_tensor, value_tensor, mask_tensor], output)
self.assertNotAllClose(
model.predict([query, value, mask_data]),
model.predict([query, value, null_mask_data]))
def test_dropout(self):
test_layer = attention.ReuseMultiHeadAttention(
num_heads=2, key_dim=2, dropout=0.5)
# Generate data for the input (non-mask) tensors.
from_data = tf.keras.backend.ones(shape=(32, 4, 8))
to_data = tf.keras.backend.ones(shape=(32, 2, 8))
train_out = test_layer(from_data, to_data, None, None, None, True)
test_out = test_layer(from_data, to_data, None, None, None, False)
# Output should be close when not in training mode,
# and should not be close when enabling dropout in training mode.
self.assertNotAllClose(
tf.keras.backend.eval(train_out),
tf.keras.backend.eval(test_out))
def test_non_masked_self_attention_with_reuse(self):
"""Test with one input (self-attenntion) and no mask tensor."""
test_layer = attention.ReuseMultiHeadAttention(
num_heads=12, key_dim=64, reuse_attention=True)
# Create a 3-dimensional input (the first dimension is implicit).
query = tf.keras.Input(shape=(40, 80))
reuse_scores = tf.keras.Input(shape=(12, 40, 40))
output = test_layer(query, query, reuse_attention_scores=reuse_scores)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
@parameterized.named_parameters(
("no_reuse_with_pe_max_seq_length_20", False, 20),
("reuse_all_with_pe_max_seq_length_20", True, 20),
("reuse_partial_with_pe_max_seq_length_20", 5, 20),
("no_reuse_with_pe_max_seq_length_40", False, 40),
("reuse_all_with_pe_max_seq_length_40", True, 40),
("reuse_partial_with_pe_max_seq_length_40", 5, 40))
def test_non_masked_self_attention_with_relative_pe(self, reuse_attention,
pe_max_seq_length):
"""Test with one input (self-attenntion) and no mask tensor."""
test_layer = attention.ReuseMultiHeadAttention(
num_heads=12, key_dim=64, reuse_attention=reuse_attention,
use_relative_pe=True, pe_max_seq_length=pe_max_seq_length)
# Create a 3-dimensional input (the first dimension is implicit).
query = tf.keras.Input(shape=(40, 80))
reuse_scores = tf.keras.Input(shape=(12, 40, 40))
output = test_layer(query, query, reuse_attention_scores=reuse_scores)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
query = tf.keras.Input(shape=(30, 80))
reuse_scores = tf.keras.Input(shape=(12, 30, 30))
output = test_layer(query, query, reuse_attention_scores=reuse_scores)
self.assertEqual(output.shape.as_list(), [None, 30, 80])
query = tf.keras.Input(shape=(30, 80))
key = tf.keras.Input(shape=(20, 80))
reuse_scores = tf.keras.Input(shape=(12, 30, 20))
output = test_layer(query, key, reuse_attention_scores=reuse_scores)
self.assertEqual(output.shape.as_list(), [None, 30, 80])
query = tf.keras.Input(shape=(50, 80))
key = tf.keras.Input(shape=(60, 80))
reuse_scores = tf.keras.Input(shape=(12, 50, 60))
output = test_layer(query, key, reuse_attention_scores=reuse_scores)
self.assertEqual(output.shape.as_list(), [None, 50, 80])
if __name__ == "__main__":
tf.test.main()
| 14,319 | 45.95082 | 80 | py |
models | models-master/official/nlp/modeling/layers/factorized_embedding.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A factorized embedding layer."""
# pylint: disable=g-classes-have-attributes
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling.layers import on_device_embedding
@tf.keras.utils.register_keras_serializable(package='Text')
class FactorizedEmbedding(on_device_embedding.OnDeviceEmbedding):
"""A factorized embeddings layer for supporting larger embeddings.
Arguments:
vocab_size: Number of elements in the vocabulary.
embedding_width: Width of word embeddings.
output_dim: The output dimension of this layer.
initializer: The initializer to use for the embedding weights. Defaults to
"glorot_uniform".
use_one_hot: Whether to use tf.one_hot over tf.gather for the embedding
lookup. Defaults to False (that is, using tf.gather). Setting this option
to True may improve performance, especially on small vocabulary sizes, but
will generally require more memory.
scale_factor: Whether to scale the output embeddings. Defaults to None (that
is, not to scale). Setting this option to a float will let values in
output embeddings multiplied by scale_factor.
"""
def __init__(self,
vocab_size: int,
embedding_width: int,
output_dim: int,
initializer='glorot_uniform',
use_one_hot=False,
scale_factor=None,
**kwargs):
super().__init__(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=initializer,
use_one_hot=use_one_hot,
scale_factor=scale_factor,
**kwargs)
self._output_dim = output_dim
def get_config(self):
config = {'output_dim': self._output_dim}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
self._embedding_projection = tf.keras.layers.EinsumDense(
'...x,xy->...y',
output_shape=self._output_dim,
bias_axes=None,
kernel_initializer=tf_utils.clone_initializer(self._initializer),
name='embedding_projection')
super().build(input_shape)
def call(self, inputs):
output = super().call(inputs)
return self._embedding_projection(output)
| 2,892 | 36.571429 | 80 | py |
models | models-master/official/nlp/modeling/layers/mat_mul_with_margin.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dot product with margin layer."""
# pylint: disable=g-classes-have-attributes
from typing import Tuple
# Import libraries
import tensorflow as tf
from official.modeling import tf_utils
@tf.keras.utils.register_keras_serializable(package='Text')
class MatMulWithMargin(tf.keras.layers.Layer):
"""This layer computs a dot product matrix given two encoded inputs.
Args:
logit_scale: The scaling factor of dot products when doing training.
logit_margin: The margin value between the positive and negative examples
when doing training.
"""
def __init__(self,
logit_scale=1.0,
logit_margin=0.0,
**kwargs):
super().__init__(**kwargs)
self.logit_scale = logit_scale
self.logit_margin = logit_margin
def call(self, left_encoded: tf.Tensor,
right_encoded: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
batch_size = tf_utils.get_shape_list(
left_encoded, name='sequence_output_tensor')[0]
# Left -> Right dot product.
left_dot_products = tf.matmul(
left_encoded, right_encoded, transpose_b=True)
self.left_logits = self.logit_scale * (
left_dot_products - self.logit_margin * tf.eye(batch_size))
# Right -> Left dot product.
self.right_logits = tf.transpose(self.left_logits)
return (self.left_logits, self.right_logits)
def get_config(self):
config = {
'logit_scale': self.logit_scale,
'logit_margin': self.logit_margin}
config.update(super().get_config())
return config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 2,250 | 31.157143 | 77 | py |
models | models-master/official/nlp/modeling/layers/relative_attention.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based relative attention layers."""
import math
import string
import tensorflow as tf
_CHR_IDX = string.ascii_lowercase
def _build_proj_equation(free_dims, bound_dims, output_dims):
"""Builds an einsum equation for projections inside multi-head attention."""
input_str = ""
kernel_str = ""
output_str = ""
bias_axes = ""
letter_offset = 0
for i in range(free_dims):
char = _CHR_IDX[i + letter_offset]
input_str += char
output_str += char
letter_offset += free_dims
for i in range(bound_dims):
char = _CHR_IDX[i + letter_offset]
input_str += char
kernel_str += char
letter_offset += bound_dims
for i in range(output_dims):
char = _CHR_IDX[i + letter_offset]
kernel_str += char
output_str += char
bias_axes += char
equation = "%s,%s->%s" % (input_str, kernel_str, output_str)
return equation, bias_axes, len(output_str)
def _get_output_shape(output_rank, known_last_dims):
return [None] * (output_rank - len(known_last_dims)) + list(known_last_dims)
def _rel_shift(x, klen=-1):
"""Performs relative shift to form the relative attention score."""
x = tf.transpose(x, perm=[2, 3, 0, 1])
x_size = tf.shape(x)
x = tf.reshape(x, [x_size[1], x_size[0], x_size[2], x_size[3]])
x = tf.slice(x, [1, 0, 0, 0], [-1, -1, -1, -1])
x = tf.reshape(x, [x_size[0], x_size[1] - 1, x_size[2], x_size[3]])
x = tf.slice(x, [0, 0, 0, 0], [-1, klen, -1, -1])
x = tf.transpose(x, perm=[2, 3, 0, 1])
return x
@tf.keras.utils.register_keras_serializable(package="Text")
class MultiHeadRelativeAttention(tf.keras.layers.MultiHeadAttention):
"""A multi-head attention layer with relative attention + position encoding.
This layer shares the same input/output projections as the common
`tf.keras.layers.MultiHeadAttention` layer.
When it calculates attention logits, position encoding is projected to form
relative keys. The logits are composed by shifted relative logits and content
logits.
**Note: This layer is currently experimental.
Attributes:
kernel_initializer: The kernel initializer. Defaults to variance_scaling.
Call args:
query: Query `Tensor` of shape `[B, T, dim]`.
value: Value `Tensor` of shape `[B, S, dim]`.
content_attention_bias: Bias `Tensor` for content based attention of shape
`[num_heads, dim]`.
positional_attention_bias: Bias `Tensor` for position based attention of
shape `[num_heads, dim]`.
key: Optional key `Tensor` of shape `[B, S, dim]`. If not given, will use
`value` for both `key` and `value`, which is the most common case.
relative_position_encoding: Relative positional encoding `Tensor` of shape
`[B, L, dim]`.
segment_matrix: Optional `Tensor` representing segmentation IDs used in
XLNet of shape `[B, S, S + M]`.
segment_encoding: Optional `Tensor` representing the segmentation encoding
as used in XLNet of shape `[2, num_heads, dim]`.
segment_attention_bias: Optional trainable bias parameter added to the query
had when calculating the segment-based attention score used in XLNet of
shape `[num_heads, dim]`.
state: Optional `Tensor` of shape `[B, M, E]` where M is the length of the
state or memory. If passed, this is also attended over as in Transformer
XL.
attention_mask: A boolean mask of shape `[B, T, S]` that prevents attention
to certain positions.
"""
def __init__(self,
kernel_initializer="variance_scaling",
**kwargs):
super().__init__(kernel_initializer=kernel_initializer,
**kwargs)
def _build_from_signature(self, query, value, key=None):
super(MultiHeadRelativeAttention, self)._build_from_signature(
query=query,
value=value,
key=key)
if hasattr(value, "shape"):
value_shape = tf.TensorShape(value.shape)
else:
value_shape = value
if key is None:
key_shape = value_shape
elif hasattr(key, "shape"):
key_shape = tf.TensorShape(key.shape)
else:
key_shape = key
common_kwargs = dict(
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
with tf.init_scope():
einsum_equation, _, output_rank = _build_proj_equation(
key_shape.rank - 1, bound_dims=1, output_dims=2)
self._encoding_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1,
[self._num_heads, self._key_dim]),
bias_axes=None,
name="encoding",
**common_kwargs)
def compute_attention(self,
query,
key,
value,
position,
content_attention_bias,
positional_attention_bias,
segment_matrix=None,
segment_encoding=None,
segment_attention_bias=None,
attention_mask=None):
"""Computes the attention.
This function defines the computation inside `call` with projected
multihead Q, K, V, R inputs.
Args:
query: Projected query `Tensor` of shape `[B, T, N, key_dim]`.
key: Projected key `Tensor` of shape `[B, S + M, N, key_dim]`.
value: Projected value `Tensor` of shape `[B, S + M, N, key_dim]`.
position: Projected position `Tensor` of shape `[B, L, N, key_dim]`.
content_attention_bias: Trainable bias parameter added to the query head
when calculating the content-based attention score.
positional_attention_bias: Trainable bias parameter added to the query
head when calculating the position-based attention score.
segment_matrix: Optional `Tensor` representing segmentation IDs used in
XLNet.
segment_encoding: Optional trainable `Tensor` representing the
segmentation encoding as used in XLNet.
segment_attention_bias: Optional trainable bias parameter added to the
query had when calculating the segment-based attention score used in
XLNet.
attention_mask: (default None) Optional mask that is added to attention
logits. If state is not None, the mask source sequence dimension should
extend M.
Returns:
attention_output: Multi-headed output of attention computation of shape
`[B, S, N, key_dim]`.
"""
content_attention = tf.einsum(self._dot_product_equation,
key,
query + content_attention_bias)
positional_attention = tf.einsum(self._dot_product_equation,
position,
query + positional_attention_bias)
positional_attention = _rel_shift(
positional_attention, klen=tf.shape(content_attention)[3])
if segment_matrix is not None:
segment_attention = tf.einsum("bind,snd->bnis",
query + segment_attention_bias,
segment_encoding)
target_shape = tf.shape(positional_attention)
segment_attention = tf.where(
tf.broadcast_to(tf.expand_dims(segment_matrix, 1), target_shape),
tf.broadcast_to(segment_attention[:, :, :, 1:], target_shape),
tf.broadcast_to(segment_attention[:, :, :, :1], target_shape))
attention_sum = (
content_attention + positional_attention + segment_attention)
else:
attention_sum = content_attention + positional_attention
attention_scores = tf.multiply(
attention_sum, 1.0 / math.sqrt(float(self._key_dim)))
attention_scores = self._masked_softmax(attention_scores, attention_mask)
attention_output = self._dropout_layer(attention_scores)
attention_output = tf.einsum(self._combine_equation,
attention_output,
value)
return attention_output
def call(self, # pytype: disable=signature-mismatch # overriding-parameter-count-checks
query,
value,
content_attention_bias,
positional_attention_bias,
key=None,
relative_position_encoding=None,
segment_matrix=None,
segment_encoding=None,
segment_attention_bias=None,
state=None,
attention_mask=None):
"""Compute multi-head relative attention over inputs.
Size glossary:
* Number of heads (H): the number of attention heads.
* Value size (V): the size of each value embedding per head.
* Key size (K): the size of each key embedding per head. Equally, the size
of each query embedding per head. Typically K <= V.
* Batch dimensions (B).
* Query (target) attention axes shape (T).
* Value (source) attention axes shape (S), the rank must match the target.
* Encoding length (L): The relative positional encoding length.
Args:
query: attention input.
value: attention input.
content_attention_bias: A trainable bias parameter added to the query head
when calculating the content-based attention score.
positional_attention_bias: A trainable bias parameter added to the query
head when calculating the position-based attention score.
key: attention input.
relative_position_encoding: relative positional encoding for key and
value.
segment_matrix: Optional `Tensor` representing segmentation IDs used in
XLNet.
segment_encoding: Optional `Tensor` representing the segmentation encoding
as used in XLNet.
segment_attention_bias: Optional trainable bias parameter added to the
query had when calculating the segment-based attention score used in
XLNet.
state: (default None) optional state. If passed, this is also attended
over as in TransformerXL.
attention_mask: (default None) Optional mask that is added to attention
logits. If state is not None, the mask source sequence dimension should
extend M.
Returns:
attention_output: The result of the computation, of shape [B, T, E],
where `T` is for target sequence shapes and `E` is the query input last
dimension if `output_shape` is `None`. Otherwise, the multi-head outputs
are projected to the shape specified by `output_shape`.
"""
if not self._built_from_signature:
self._build_from_signature(query, value, key=key)
if key is None:
key = value
if state is not None and state.shape.ndims > 1:
value = tf.concat([state, value], 1)
key = tf.concat([state, key], 1)
# `query` = [B, T, N ,H]
query = self._query_dense(query)
# `key` = [B, S + M, N, H]
key = self._key_dense(key)
# `value` = [B, S + M, N, H]
value = self._value_dense(value)
# `position` = [B, L, N, H]
position = self._encoding_dense(relative_position_encoding)
attention_output = self.compute_attention(
query=query,
key=key,
value=value,
position=position,
content_attention_bias=content_attention_bias,
positional_attention_bias=positional_attention_bias,
segment_matrix=segment_matrix,
segment_encoding=segment_encoding,
segment_attention_bias=segment_attention_bias,
attention_mask=attention_mask)
# `attention_output` = [B, S, N, H]
attention_output = self._output_dense(attention_output)
return attention_output
@tf.keras.utils.register_keras_serializable(package="Text")
class TwoStreamRelativeAttention(MultiHeadRelativeAttention):
"""Two-stream relative self-attention for XLNet.
In XLNet, each token has two associated vectors at each self-attention layer,
the content stream (h) and the query stream (g).
The content stream is the self-attention stream as in Transformer XL and
represents the context and content (the token itself).
The query stream only has access to contextual information and the position,
but not the content.
This layer shares the same build signature as
`tf.keras.layers.MultiHeadAttention` but has different input/output
projections.
**Note: This layer is currently experimental.
Call args:
content_stream: `Tensor` of shape `[B, T, dim]`.
content_attention_bias: Bias `Tensor` for content based attention of shape
`[num_heads, dim]`.
positional_attention_bias: Bias `Tensor` for position based attention of
shape `[num_heads, dim]`.
query_stream: `Tensor` of shape `[B, P, dim]`.
target_mapping: `Tensor` of shape `[B, P, S]`.
relative_position_encoding: Relative positional encoding `Tensor` of shape
`[B, L, dim]`.
segment_matrix: Optional `Tensor` representing segmentation IDs used in
XLNet of shape `[B, S, S + M]`.
segment_encoding: Optional `Tensor` representing the segmentation
encoding as used in XLNet of shape `[2, num_heads, dim]`.
segment_attention_bias: Optional trainable bias parameter added to the
query had when calculating the segment-based attention score used in
XLNet of shape `[num_heads, dim]`.
state: Optional `Tensor` of shape [B, M, E] where M is the length of the
state or memory.
If passed, this is also attended over as in Transformer XL.
content_attention_mask: a boolean mask of shape `[B, T, S]` that
prevents attention to certain positions for content attention computation.
query_attention_mask: a boolean mask of shape `[B, T, S]` that
prevents attention to certain position for query attention computation.
"""
def call(self,
content_stream,
content_attention_bias,
positional_attention_bias,
query_stream,
relative_position_encoding,
target_mapping=None,
segment_matrix=None,
segment_encoding=None,
segment_attention_bias=None,
state=None,
content_attention_mask=None,
query_attention_mask=None):
"""Compute multi-head relative attention over inputs.
Size glossary:
* Number of heads (H): the number of attention heads.
* Value size (V): the size of each value embedding per head.
* Key size (K): the size of each key embedding per head. Equally, the size
of each query embedding per head. Typically K <= V.
* Number of predictions (P): the number of predictions.
* Batch dimensions (B).
* Query (target) attention axes shape (T).
* Value (source) attention axes shape (S), the rank must match the target.
* Encoding length (L): The relative positional encoding length.
Args:
content_stream: The content representation, commonly referred to as h.
This serves a similar role to the standard hidden states in
Transformer-XL.
content_attention_bias: A trainable bias parameter added to the query head
when calculating the content-based attention score.
positional_attention_bias: A trainable bias parameter added to the query
head when calculating the position-based attention score.
query_stream: The query representation, commonly referred to as g. This
only has access to contextual information and position, but not content.
If not provided, then this is MultiHeadRelativeAttention with
self-attention.
relative_position_encoding: relative positional encoding for key and
value.
target_mapping: Optional `Tensor` representing the target mapping used in
partial prediction.
segment_matrix: Optional `Tensor` representing segmentation IDs used in
XLNet.
segment_encoding: Optional `Tensor` representing the segmentation encoding
as used in XLNet.
segment_attention_bias: Optional trainable bias parameter added to the
query head when calculating the segment-based attention score.
state: (default None) optional state. If passed, this is also attended
over as in TransformerXL and XLNet.
content_attention_mask: (default None) Optional mask that is added to
content attention logits. If state is not None, the mask source sequence
dimension should extend M.
query_attention_mask: (default None) Optional mask that is added to query
attention logits. If state is not None, the mask source sequence
dimension should extend M.
Returns:
content_attention_output, query_attention_output: the results of the
computation, both of shape [B, T, E]. `T` is for target sequence shapes,
`E` is the query input last dimension if `output_shape` is `None`.
Otherwise, the multi-head outputs are projected to the shape specified
by `output_shape`.
"""
if not self._built_from_signature:
self._build_from_signature(content_stream, content_stream, content_stream)
if state is not None and state.shape.ndims > 1:
content_and_memory_stream = tf.concat([state, content_stream], 1)
else:
content_and_memory_stream = content_stream
# `query` = [B, T, N, H]
query = self._query_dense(content_stream)
# `key` = [B, S + M, N, H]
key = self._key_dense(content_and_memory_stream)
# `value` = [B, S + M, N, H]
value = self._value_dense(content_and_memory_stream)
# `position` = [B, L, N, H]
position = self._encoding_dense(relative_position_encoding)
content_attention_output = self.compute_attention(
query=query,
key=key,
value=value,
position=position,
content_attention_bias=content_attention_bias,
positional_attention_bias=positional_attention_bias,
segment_matrix=segment_matrix,
segment_encoding=segment_encoding,
segment_attention_bias=segment_attention_bias,
attention_mask=content_attention_mask)
# `content_attention_output` = [B, S, N, H]
content_attention_output = self._output_dense(content_attention_output)
query_attention_output = None
if query_stream is not None:
query = self._query_dense(query_stream)
if target_mapping is not None:
query = tf.einsum("bmnd,bml->blnd", query, target_mapping)
query_attention_output = self.compute_attention(
query=query,
key=key,
value=value,
position=position,
content_attention_bias=content_attention_bias,
positional_attention_bias=positional_attention_bias,
segment_matrix=segment_matrix,
segment_encoding=segment_encoding,
segment_attention_bias=segment_attention_bias,
attention_mask=query_attention_mask)
query_attention_output = tf.einsum("blnd,bml->bmnd",
query_attention_output,
target_mapping)
else:
query_attention_output = self.compute_attention(
query=query,
key=key,
value=value,
position=position,
content_attention_bias=content_attention_bias,
positional_attention_bias=positional_attention_bias,
segment_matrix=segment_matrix,
segment_encoding=segment_encoding,
segment_attention_bias=segment_attention_bias,
attention_mask=query_attention_mask)
query_attention_output = self._output_dense(query_attention_output)
return content_attention_output, query_attention_output
| 20,547 | 40.178357 | 91 | py |
models | models-master/official/nlp/modeling/layers/rezero_transformer_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-based rezero-transformer block layer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import rezero_transformer
class TransformerWithReZeroLayerTest(tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(TransformerWithReZeroLayerTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy('float32')
@parameterized.named_parameters(('no_share_attn_ffn', False),
('share_attn_ffn', True))
def test_layer_invocation_with_float16_dtype(self, share_rezero):
tf.keras.mixed_precision.set_global_policy('mixed_float16')
test_layer = rezero_transformer.ReZeroTransformer(
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu',
share_rezero=share_rezero)
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = (10 * np.random.random_sample(
(batch_size, sequence_length, width)))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_rezero_without_layer_norm(self):
test_layer = rezero_transformer.ReZeroTransformer(
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu',
use_layer_norm=False)
input_length, width = 16, 30
input_tensor = tf.keras.Input(shape=(input_length, width))
output_tensor = test_layer(input_tensor)
model = tf.keras.Model(input_tensor, output_tensor)
input_data = np.random.rand(2, input_length, width)
test_layer._rezero_a.assign(1.0)
test_layer.reset_rezero()
output_data = model.predict(input_data)
self.assertAllClose(input_data, output_data)
def test_rezero_with_layer_norm(self):
test_layer = rezero_transformer.ReZeroTransformer(
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu',
use_layer_norm=True)
input_length, width = 16, 30
input_tensor = tf.keras.Input(shape=(input_length, width))
output_tensor = test_layer(input_tensor)
model = tf.keras.Model(input_tensor, output_tensor)
input_data = np.random.rand(2, input_length, width) + 2.0
output_data = model.predict(input_data)
input_data_normed = (input_data -
np.mean(input_data, axis=-1, keepdims=True)) / (
np.std(input_data, axis=-1, keepdims=True))
self.assertAllClose(input_data_normed, output_data)
def test_layer_output_range(self):
test_layer = rezero_transformer.ReZeroTransformer(
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu')
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embeeding.
new_layer = rezero_transformer.ReZeroTransformer(
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu',
output_range=1)
_ = new_layer([input_data, mask_data])
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer([input_data, mask_data])
self.assertAllClose(new_output_tensor, output_tensor[:, 0:1, :])
output_tensor = test_layer([input_data, mask_data], output_range=1)
self.assertAllClose(new_output_tensor, output_tensor, atol=5e-5, rtol=0.003)
def test_separate_qkv(self):
test_layer = rezero_transformer.ReZeroTransformer(
num_attention_heads=2,
intermediate_size=128,
intermediate_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Forward path.
q_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 16], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
inputs = [q_tensor, kv_tensor, dummy_mask]
output = test_layer(inputs)
self.assertEqual(output.shape, q_tensor.shape)
if __name__ == '__main__':
tf.test.main()
| 5,751 | 38.129252 | 80 | py |
models | models-master/official/nlp/modeling/layers/gaussian_process_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Gaussian process functions."""
import os
import shutil
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import gaussian_process
def exact_gaussian_kernel(x1, x2):
"""Computes exact Gaussian kernel value(s) for tensors x1 and x2."""
x1_squared = tf.reduce_sum(tf.square(x1), list(range(1, len(x1.shape))))
x2_squared = tf.reduce_sum(tf.square(x2), list(range(1, len(x2.shape))))
square = (x1_squared[:, tf.newaxis] + x2_squared[tf.newaxis, :] -
2 * tf.matmul(x1, x2, transpose_b=True))
return tf.math.exp(-square / 2.)
def _generate_normal_data(num_sample, num_dim, loc):
"""Generates random data sampled from i.i.d. normal distribution."""
return np.random.normal(
size=(num_sample, num_dim), loc=loc, scale=1. / np.sqrt(num_dim))
def _generate_rbf_data(x_data, orthogonal=True):
"""Generates high-dim data that is the eigen components of a RBF kernel."""
k_rbf = exact_gaussian_kernel(x_data, x_data)
x_orth, x_diag, _ = np.linalg.svd(k_rbf)
if orthogonal:
return x_orth
return np.diag(np.sqrt(x_diag)).dot(x_orth.T)
def _make_minibatch_iterator(data_numpy, batch_size, num_epoch):
"""Makes a tf.data.Dataset for given batch size and num epoches."""
dataset = tf.data.Dataset.from_tensor_slices(data_numpy)
dataset = dataset.repeat(num_epoch).batch(batch_size)
return iter(dataset)
def _compute_posterior_kernel(x_tr, x_ts, kernel_func, ridge_penalty):
"""Computes the posterior covariance matrix of a Gaussian process."""
num_sample = x_tr.shape[0]
k_tt_inv = tf.linalg.inv(
kernel_func(x_tr, x_tr) + ridge_penalty * np.eye(num_sample))
k_ts = kernel_func(x_tr, x_ts)
k_ss = kernel_func(x_ts, x_ts)
return k_ss - tf.matmul(k_ts, tf.matmul(k_tt_inv, k_ts), transpose_a=True)
class GaussianProcessTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(GaussianProcessTest, self).setUp()
self.num_data_dim = 10
self.num_inducing = 1024
self.num_train_sample = 1024
self.num_test_sample = 256
self.prec_tolerance = {'atol': 1e-3, 'rtol': 5e-2}
self.cov_tolerance = {'atol': 5e-2, 'rtol': 2.}
self.rbf_kern_func = exact_gaussian_kernel
self.x_tr = _generate_normal_data(
self.num_train_sample, self.num_data_dim, loc=0.)
self.x_ts = _generate_normal_data(
self.num_test_sample, self.num_data_dim, loc=1.)
def test_layer_build(self):
"""Tests if layer.built=True after building."""
rfgp_model = gaussian_process.RandomFeatureGaussianProcess(units=1)
rfgp_model.build(input_shape=self.x_tr.shape)
self.assertTrue(rfgp_model.built)
@parameterized.named_parameters(('rbf_data', False),
('orthogonal_data', True))
def test_laplace_covariance_minibatch(self, generate_orthogonal_data):
"""Tests if model correctly learns population-lvel precision matrix."""
batch_size = 50
epochs = 1000
x_data = _generate_rbf_data(self.x_ts, generate_orthogonal_data)
data_iterator = _make_minibatch_iterator(x_data, batch_size, epochs)
# Estimates precision matrix using minibatch.
cov_estimator = gaussian_process.LaplaceRandomFeatureCovariance(
momentum=0.999, ridge_penalty=0)
for minibatch_data in data_iterator:
_ = cov_estimator(minibatch_data, training=True)
# Evaluation
prec_mat_expected = x_data.T.dot(x_data)
prec_mat_computed = (
cov_estimator.precision_matrix.numpy() * self.num_test_sample)
np.testing.assert_allclose(prec_mat_computed, prec_mat_expected,
**self.prec_tolerance)
def test_random_feature_prior_approximation(self):
"""Tests random feature GP's ability in approximating exact GP prior."""
num_inducing = 10240
rfgp_model = gaussian_process.RandomFeatureGaussianProcess(
units=1,
num_inducing=num_inducing,
normalize_input=False,
gp_kernel_type='gaussian',
return_random_features=True)
# Extract random features.
_, _, gp_feature = rfgp_model(self.x_tr, training=True)
gp_feature_np = gp_feature.numpy()
prior_kernel_computed = gp_feature_np.dot(gp_feature_np.T)
prior_kernel_expected = self.rbf_kern_func(self.x_tr, self.x_tr)
np.testing.assert_allclose(prior_kernel_computed, prior_kernel_expected,
**self.cov_tolerance)
def test_random_feature_posterior_approximation(self):
"""Tests random feature GP's ability in approximating exact GP posterior."""
# Set momentum = 0.5 so posterior precision matrix is 0.5 * (I + K).
gp_cov_momentum = 0.5
gp_cov_ridge_penalty = 1.
num_inducing = 1024
rfgp_model = gaussian_process.RandomFeatureGaussianProcess(
units=1,
num_inducing=num_inducing,
normalize_input=False,
gp_kernel_type='gaussian',
gp_cov_momentum=gp_cov_momentum,
gp_cov_ridge_penalty=gp_cov_ridge_penalty)
# Computes posterior covariance on test data.
_, _ = rfgp_model(self.x_tr, training=True)
_, gp_cov_ts = rfgp_model(self.x_ts, training=False)
# Scale up covariance estimate since prec matrix is down-scaled by momentum.
post_kernel_computed = gp_cov_ts * gp_cov_momentum
post_kernel_expected = _compute_posterior_kernel(self.x_tr, self.x_ts,
self.rbf_kern_func,
gp_cov_ridge_penalty)
np.testing.assert_allclose(post_kernel_computed, post_kernel_expected,
**self.cov_tolerance)
def test_random_feature_linear_kernel(self):
"""Tests if linear kernel indeed leads to an identity mapping."""
# Specify linear kernel
gp_kernel_type = 'linear'
normalize_input = False
scale_random_features = False
use_custom_random_features = True
rfgp_model = gaussian_process.RandomFeatureGaussianProcess(
units=1,
normalize_input=normalize_input,
gp_kernel_type=gp_kernel_type,
scale_random_features=scale_random_features,
use_custom_random_features=use_custom_random_features,
return_random_features=True)
_, _, gp_feature = rfgp_model(self.x_tr, training=True)
# Check if linear kernel leads to identity mapping.
np.testing.assert_allclose(gp_feature, self.x_tr, **self.prec_tolerance)
def test_no_matrix_update_during_test(self):
"""Tests if the precision matrix is not updated during testing."""
rfgp_model = gaussian_process.RandomFeatureGaussianProcess(units=1)
# Training.
_, gp_covmat_null = rfgp_model(self.x_tr, training=True)
precision_mat_before_test = rfgp_model._gp_cov_layer.precision_matrix
# Testing.
_ = rfgp_model(self.x_ts, training=False)
precision_mat_after_test = rfgp_model._gp_cov_layer.precision_matrix
self.assertAllClose(
gp_covmat_null, tf.eye(self.num_train_sample), atol=1e-4)
self.assertAllClose(
precision_mat_before_test, precision_mat_after_test, atol=1e-4)
def test_state_saving_and_loading(self):
"""Tests if the loaded model returns same results."""
input_data = np.random.random((1, 2))
rfgp_model = gaussian_process.RandomFeatureGaussianProcess(units=1)
inputs = tf.keras.Input((2,), batch_size=1)
outputs = rfgp_model(inputs)
model = tf.keras.Model(inputs, outputs)
gp_output, gp_covmat = model.predict(input_data)
# Save and then load the model.
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
saved_model_dir = os.path.join(temp_dir, 'rfgp_model')
model.save(saved_model_dir)
new_model = tf.keras.models.load_model(saved_model_dir)
gp_output_new, gp_covmat_new = new_model.predict(input_data)
self.assertAllClose(gp_output, gp_output_new, atol=1e-4)
self.assertAllClose(gp_covmat, gp_covmat_new, atol=1e-4)
class MeanFieldLogitsTest(tf.test.TestCase):
def testMeanFieldLogitsLikelihood(self):
"""Tests if scaling is correct under different likelihood."""
batch_size = 10
num_classes = 12
variance = 1.5
mean_field_factor = 2.
rng = np.random.RandomState(0)
tf.random.set_seed(1)
logits = rng.randn(batch_size, num_classes)
covmat = tf.linalg.diag([variance] * batch_size)
logits_logistic = gaussian_process.mean_field_logits(
logits, covmat, mean_field_factor=mean_field_factor)
self.assertAllClose(logits_logistic, logits / 2., atol=1e-4)
def testMeanFieldLogitsTemperatureScaling(self):
"""Tests using mean_field_logits as temperature scaling method."""
batch_size = 10
num_classes = 12
rng = np.random.RandomState(0)
tf.random.set_seed(1)
logits = rng.randn(batch_size, num_classes)
# Test if there's no change to logits when mean_field_factor < 0.
logits_no_change = gaussian_process.mean_field_logits(
logits, covariance_matrix=None, mean_field_factor=-1)
# Test if mean_field_logits functions as a temperature scaling method when
# mean_field_factor > 0, with temperature = sqrt(1. + mean_field_factor).
logits_scale_by_two = gaussian_process.mean_field_logits(
logits, covariance_matrix=None, mean_field_factor=3.)
self.assertAllClose(logits_no_change, logits, atol=1e-4)
self.assertAllClose(logits_scale_by_two, logits / 2., atol=1e-4)
if __name__ == '__main__':
tf.test.main()
| 10,091 | 36.656716 | 80 | py |
models | models-master/official/nlp/modeling/layers/reuse_transformer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based TransformerEncoder block layer."""
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling.layers import reuse_attention as attention
class ReuseTransformer(tf.keras.layers.Layer):
"""Transformer layer.
This layer implements the ReuseTransformer Encoder from
"Leveraging redundancy in attention with Reuse Transformers".
(https://arxiv.org/abs/2110.06821)
"""
def __init__(self,
num_attention_heads,
inner_dim,
inner_activation,
head_size=None,
output_range=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_bias=True,
norm_first=False,
norm_epsilon=1e-12,
output_dropout=0.0,
attention_dropout=0.0,
inner_dropout=0.0,
attention_initializer=None,
attention_axes=None,
reuse_attention=0,
use_relative_pe=False,
pe_max_seq_length=512,
layer_idx=None,
max_reuse_layer_idx=None,
**kwargs):
"""Initializes `ReuseTransformer`.
Args:
num_attention_heads: Number of attention heads.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network.
head_size: Projection size of heads.
output_range: the sequence output range, [0, output_range) for slicing the
target sequence. `None` means the target sequence is not sliced.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
use_bias: Whether to enable use_bias in attention layer. If set False,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate
dense layers. If set False, output of attention and intermediate dense
layers is normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: Dropout probability for within the attention layer.
inner_dropout: Dropout probability for the first Dense layer in a
two-layer feedforward network.
attention_initializer: Initializer for kernels of attention layers. If set
`None`, attention layers use kernel_initializer as initializer for
kernel.
attention_axes: axes over which the attention is applied. `None` means
attention over all axes, but batch, heads, and features.
reuse_attention: reuse_attention: An integer specifying number of heads
to reuse. -1 for all heads.
use_relative_pe: whether to use relative position bias.
pe_max_seq_length: used to set the size of the relative positin encodings.
layer_idx: the idx of this layer.
max_reuse_layer_idx: layer idx (if passed) greater than this value will
not reuse attention scores from previous layers.
**kwargs: keyword arguments.
"""
super().__init__(**kwargs)
self._num_heads = num_attention_heads
self._inner_dim = inner_dim
self._inner_activation = inner_activation
self._head_size = head_size
self._attention_dropout = attention_dropout
self._attention_dropout_rate = attention_dropout
self._output_dropout = output_dropout
self._output_dropout_rate = output_dropout
self._output_range = output_range
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._inner_dropout = inner_dropout
self._reuse_attention = reuse_attention
self._use_relative_pe = use_relative_pe
self._pe_max_seq_length = pe_max_seq_length
self._layer_idx = layer_idx
self._max_reuse_layer_idx = max_reuse_layer_idx
# Overwrite for the first layer and layers greater than max_reuse_layer_idx.
if self._layer_idx is not None and (
self._layer_idx == 0 or (self._max_reuse_layer_idx is not None and
self._max_reuse_layer_idx < self._layer_idx)):
self._reuse_attention = 0
if attention_initializer:
self._attention_initializer = tf.keras.initializers.get(
attention_initializer)
else:
self._attention_initializer = tf_utils.clone_initializer(
self._kernel_initializer)
self._attention_axes = attention_axes
def build(self, input_shape):
if isinstance(input_shape, tf.TensorShape):
input_tensor_shape = input_shape
elif isinstance(input_shape, (list, tuple)):
input_tensor_shape = tf.TensorShape(input_shape[0])
else:
raise ValueError(
"The type of input shape argument is not supported, got: %s" %
type(input_shape))
einsum_equation = "abc,cd->abd"
if len(input_tensor_shape.as_list()) > 3:
einsum_equation = "...bc,cd->...bd"
hidden_size = input_tensor_shape[-1]
if self._head_size is None:
if hidden_size % self._num_heads != 0:
raise ValueError(
"The input size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, self._num_heads))
self._attention_head_size = int(hidden_size // self._num_heads)
else:
self._attention_head_size = self._head_size
common_kwargs = dict(
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
self._attention_layer = attention.ReuseMultiHeadAttention(
num_heads=self._num_heads,
key_dim=self._attention_head_size,
dropout=self._attention_dropout,
use_bias=self._use_bias,
kernel_initializer=self._attention_initializer,
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
attention_axes=self._attention_axes,
reuse_attention=self._reuse_attention,
use_relative_pe=self._use_relative_pe,
pe_max_seq_length=self._pe_max_seq_length,
name="self_attention",
**common_kwargs)
self._attention_dropout = tf.keras.layers.Dropout(
rate=self._output_dropout)
# Use float32 in layernorm for numeric stability.
# It is probably safe in mixed_float16, but we haven't validated this yet.
self._attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32))
self._intermediate_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=(None, self._inner_dim),
bias_axes="d",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
name="intermediate",
**common_kwargs)
policy = tf.keras.mixed_precision.global_policy()
if policy.name == "mixed_bfloat16":
# bfloat16 causes BERT with the LAMB optimizer to not converge
# as well, so we use float32.
# TODO(b/154538392): Investigate this.
policy = tf.float32
self._intermediate_activation_layer = tf.keras.layers.Activation(
self._inner_activation, dtype=policy)
self._inner_dropout_layer = tf.keras.layers.Dropout(
rate=self._inner_dropout)
self._output_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=(None, hidden_size),
bias_axes="d",
name="output",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs)
self._output_dropout = tf.keras.layers.Dropout(rate=self._output_dropout)
# Use float32 in layernorm for numeric stability.
self._output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32)
super(ReuseTransformer, self).build(input_shape)
def get_config(self):
config = {
"num_attention_heads":
self._num_heads,
"inner_dim":
self._inner_dim,
"inner_activation":
self._inner_activation,
"head_size":
self._head_size,
"output_dropout":
self._output_dropout_rate,
"attention_dropout":
self._attention_dropout_rate,
"output_range":
self._output_range,
"reuse_attention":
self._reuse_attention,
"use_relative_pe": self._use_relative_pe,
"pe_max_seq_length": self._pe_max_seq_length,
"max_reuse_layer_idx": self._max_reuse_layer_idx,
"kernel_initializer":
tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer":
tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer":
tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer":
tf.keras.regularizers.serialize(self._bias_regularizer),
"activity_regularizer":
tf.keras.regularizers.serialize(self._activity_regularizer),
"kernel_constraint":
tf.keras.constraints.serialize(self._kernel_constraint),
"bias_constraint":
tf.keras.constraints.serialize(self._bias_constraint),
"use_bias":
self._use_bias,
"norm_first":
self._norm_first,
"norm_epsilon":
self._norm_epsilon,
"inner_dropout":
self._inner_dropout,
"attention_initializer":
tf.keras.initializers.serialize(self._attention_initializer),
"attention_axes": self._attention_axes,
}
base_config = super(ReuseTransformer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
"""Transformer self-attention encoder block call.
Args:
inputs: a single tensor or a list of tensors.
`input tensor` as the single sequence of embeddings.
[`input tensor`, `attention mask`] to have the additional attention
mask.
[`query tensor`, `attention mask`, `attention scores`] to have
additional attention scores for reuse computation. If `attention scores`
is None, the reuse_attention flag will be ignored.
Returns:
An output tensor with the same dimensions as input/query tensor.
Attention scores if return_attention_scores is true.
"""
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
input_tensor, attention_mask = inputs
reuse_attention_scores = None
elif len(inputs) == 3:
input_tensor, attention_mask, reuse_attention_scores = inputs
else:
raise ValueError("Unexpected inputs to %s with length at %d" %
(self.__class__, len(inputs)))
else:
input_tensor, attention_mask, reuse_attention_scores = (inputs, None,
None)
key_value = None
if self._reuse_attention != 0 and reuse_attention_scores is None:
raise ValueError(
"reuse_attention_scores cannot be None when reuse_attention != 0.")
if self._output_range:
if self._norm_first:
source_tensor = input_tensor[:, 0:self._output_range, :]
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm(key_value)
target_tensor = input_tensor[:, 0:self._output_range, :]
if attention_mask is not None:
attention_mask = attention_mask[:, 0:self._output_range, :]
if reuse_attention_scores is not None:
reuse_attention_scores = reuse_attention_scores[:, :,
0:self._output_range, :]
else:
if self._norm_first:
source_tensor = input_tensor
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm(key_value)
target_tensor = input_tensor
if key_value is None:
key_value = input_tensor
attention_output = self._attention_layer(
query=target_tensor, value=key_value, attention_mask=attention_mask,
reuse_attention_scores=reuse_attention_scores,
return_attention_scores=True)
attention_output, attention_scores = attention_output
attention_output = self._attention_dropout(attention_output)
if self._norm_first:
attention_output = source_tensor + attention_output
else:
attention_output = self._attention_layer_norm(target_tensor +
attention_output)
if self._norm_first:
source_attention_output = attention_output
attention_output = self._output_layer_norm(attention_output)
inner_output = self._intermediate_dense(attention_output)
inner_output = self._intermediate_activation_layer(inner_output)
inner_output = self._inner_dropout_layer(inner_output)
layer_output = self._output_dense(inner_output)
layer_output = self._output_dropout(layer_output)
if self._norm_first:
return source_attention_output + layer_output, attention_scores
# During mixed precision training, layer norm output is always fp32 for now.
# Casts fp32 for the subsequent add.
layer_output = tf.cast(layer_output, tf.float32)
layer_output = self._output_layer_norm(layer_output + attention_output)
return layer_output, attention_scores
| 15,687 | 42.457064 | 80 | py |
models | models-master/official/nlp/modeling/layers/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers are the fundamental building blocks for NLP models.
They can be used to assemble new `tf.keras` layers or models.
"""
# pylint: disable=wildcard-import
from official.nlp.modeling.layers import util
from official.nlp.modeling.layers.attention import *
from official.nlp.modeling.layers.bigbird_attention import BigBirdAttention
from official.nlp.modeling.layers.bigbird_attention import BigBirdMasks
from official.nlp.modeling.layers.block_diag_feedforward import BlockDiagFeedforward
from official.nlp.modeling.layers.cls_head import *
from official.nlp.modeling.layers.factorized_embedding import FactorizedEmbedding
from official.nlp.modeling.layers.gated_feedforward import GatedFeedforward
from official.nlp.modeling.layers.gaussian_process import RandomFeatureGaussianProcess
from official.nlp.modeling.layers.kernel_attention import KernelAttention
from official.nlp.modeling.layers.kernel_attention import KernelMask
from official.nlp.modeling.layers.masked_lm import MaskedLM
from official.nlp.modeling.layers.masked_softmax import MaskedSoftmax
from official.nlp.modeling.layers.mat_mul_with_margin import MatMulWithMargin
from official.nlp.modeling.layers.mixing import FourierTransformLayer
from official.nlp.modeling.layers.mixing import HartleyTransformLayer
from official.nlp.modeling.layers.mixing import LinearTransformLayer
from official.nlp.modeling.layers.mixing import MixingMechanism
from official.nlp.modeling.layers.mobile_bert_layers import MobileBertEmbedding
from official.nlp.modeling.layers.mobile_bert_layers import MobileBertMaskedLM
from official.nlp.modeling.layers.mobile_bert_layers import MobileBertTransformer
from official.nlp.modeling.layers.moe import ExpertsChooseMaskedRouter
from official.nlp.modeling.layers.moe import FeedForwardExperts
from official.nlp.modeling.layers.moe import MoeLayer
from official.nlp.modeling.layers.moe import MoeLayerWithBackbone
from official.nlp.modeling.layers.multi_channel_attention import *
from official.nlp.modeling.layers.on_device_embedding import OnDeviceEmbedding
from official.nlp.modeling.layers.pack_optimization import PackBertEmbeddings
from official.nlp.modeling.layers.pack_optimization import StridedTransformerEncoderBlock
from official.nlp.modeling.layers.pack_optimization import StridedTransformerScaffold
from official.nlp.modeling.layers.per_dim_scale_attention import PerDimScaleAttention
from official.nlp.modeling.layers.position_embedding import PositionEmbedding
from official.nlp.modeling.layers.position_embedding import RelativePositionBias
from official.nlp.modeling.layers.position_embedding import RelativePositionEmbedding
from official.nlp.modeling.layers.relative_attention import MultiHeadRelativeAttention
from official.nlp.modeling.layers.relative_attention import TwoStreamRelativeAttention
from official.nlp.modeling.layers.reuse_attention import ReuseMultiHeadAttention
from official.nlp.modeling.layers.reuse_transformer import ReuseTransformer
from official.nlp.modeling.layers.rezero_transformer import ReZeroTransformer
from official.nlp.modeling.layers.routing import *
from official.nlp.modeling.layers.self_attention_mask import *
from official.nlp.modeling.layers.spectral_normalization import *
from official.nlp.modeling.layers.talking_heads_attention import TalkingHeadsAttention
from official.nlp.modeling.layers.text_layers import BertPackInputs
from official.nlp.modeling.layers.text_layers import BertTokenizer
from official.nlp.modeling.layers.text_layers import FastWordpieceBertTokenizer
from official.nlp.modeling.layers.text_layers import SentencepieceTokenizer
from official.nlp.modeling.layers.tn_transformer_expand_condense import TNTransformerExpandCondense
from official.nlp.modeling.layers.transformer import Transformer
from official.nlp.modeling.layers.transformer import TransformerDecoderBlock
from official.nlp.modeling.layers.transformer_encoder_block import TransformerEncoderBlock
from official.nlp.modeling.layers.transformer_scaffold import TransformerScaffold
from official.nlp.modeling.layers.transformer_xl import TransformerXL
from official.nlp.modeling.layers.transformer_xl import TransformerXLBlock
| 4,780 | 62.746667 | 99 | py |
models | models-master/official/nlp/modeling/layers/transformer_scaffold_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-based transformer block layer."""
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import attention
from official.nlp.modeling.layers import transformer_scaffold
# Test class that wraps a standard attention layer. If this layer is called
# at any point, the list passed to the config object will be filled with a
# boolean 'True'. We register this class as a Keras serializable so we can
# test serialization below.
@tf.keras.utils.register_keras_serializable(package='TestOnlyAttention')
class ValidatedAttentionLayer(attention.MultiHeadAttention):
def __init__(self, call_list, **kwargs):
super(ValidatedAttentionLayer, self).__init__(**kwargs)
self.list = call_list
def call(self, query, value, attention_mask=None):
self.list.append(True)
return super(ValidatedAttentionLayer, self).call(
query, value, attention_mask=attention_mask)
def get_config(self):
config = super(ValidatedAttentionLayer, self).get_config()
config['call_list'] = []
return config
# Test class implements a simple feedforward layer. If this layer is called
# at any point, the list passed to the config object will be filled with a
# boolean 'True'. We register this class as a Keras serializable so we can
# test serialization below.
@tf.keras.utils.register_keras_serializable(package='TestOnlyFeedforward')
class ValidatedFeedforwardLayer(tf.keras.layers.Layer):
def __init__(self, call_list, activation, **kwargs):
super(ValidatedFeedforwardLayer, self).__init__(**kwargs)
self.list = call_list
self.activation = activation
def build(self, input_shape):
hidden_size = input_shape[-1]
self._feedforward_dense = tf.keras.layers.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
activation=self.activation,
name='feedforward')
def call(self, inputs):
self.list.append(True)
return self._feedforward_dense(inputs)
def get_config(self):
config = super(ValidatedFeedforwardLayer, self).get_config()
config['call_list'] = []
config['activation'] = self.activation
return config
class TransformerLayerTest(tf.test.TestCase):
def tearDown(self):
super(TransformerLayerTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy('float32')
def test_layer_creation(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu')
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
def test_layer_creation_with_feedforward_cls(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
feedforward_call_list = []
feedforward_layer_cfg = {
'activation': 'relu',
'call_list': feedforward_call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
feedforward_cls=ValidatedFeedforwardLayer,
feedforward_cfg=feedforward_layer_cfg,
num_attention_heads=10,
inner_dim=None,
inner_activation=None)
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
self.assertNotEmpty(feedforward_call_list)
self.assertTrue(feedforward_call_list[0],
"The passed layer class wasn't instantiated.")
def test_layer_creation_with_mask(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu')
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
def test_layer_invocation(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu')
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# Create a model from the test layer.
model = tf.keras.Model(data_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
_ = model.predict(input_data)
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
def test_layer_invocation_with_feedforward_cls(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
feedforward_call_list = []
feedforward_layer_cfg = {
'activation': 'relu',
'call_list': feedforward_call_list,
}
feedforward_layer = ValidatedFeedforwardLayer(**feedforward_layer_cfg)
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
feedforward_cls=feedforward_layer,
num_attention_heads=10,
inner_dim=None,
inner_activation=None)
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
self.assertNotEmpty(feedforward_call_list)
self.assertTrue(feedforward_call_list[0],
"The passed layer class wasn't instantiated.")
def test_layer_invocation_with_mask(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu')
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
def test_layer_invocation_with_float16_dtype(self):
tf.keras.mixed_precision.set_global_policy('mixed_float16')
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu')
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = (10 * np.random.random_sample(
(batch_size, sequence_length, width)))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
def test_transform_with_initializer(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list())
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0])
def test_layer_restoration_from_config(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
'name': 'test_layer',
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu')
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
pre_serialization_output = model.predict([input_data, mask_data])
# Serialize the model config. Pass the serialized data through json to
# ensure that we can serialize this layer to disk.
serialized_data = model.get_config()
# Create a new model from the old config, and copy the weights. These models
# should have identical outputs.
new_model = tf.keras.Model.from_config(serialized_data)
new_model.set_weights(model.get_weights())
output = new_model.predict([input_data, mask_data])
self.assertAllClose(pre_serialization_output, output)
# If the layer was configured correctly, it should have a list attribute
# (since it should have the custom class and config passed to it).
new_model.summary()
new_call_list = new_model.get_layer(
name='transformer_scaffold')._attention_layer.list
self.assertNotEmpty(new_call_list)
self.assertTrue(new_call_list[0],
"The passed layer class wasn't instantiated.")
def test_layer_with_feedforward_cls_restoration_from_config(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
'name': 'test_layer',
}
feedforward_call_list = []
feedforward_layer_cfg = {
'activation': 'relu',
'call_list': feedforward_call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
feedforward_cls=ValidatedFeedforwardLayer,
feedforward_cfg=feedforward_layer_cfg,
num_attention_heads=10,
inner_dim=None,
inner_activation=None)
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
pre_serialization_output = model.predict([input_data, mask_data])
serialized_data = model.get_config()
# Create a new model from the old config, and copy the weights. These models
# should have identical outputs.
new_model = tf.keras.Model.from_config(serialized_data)
new_model.set_weights(model.get_weights())
output = new_model.predict([input_data, mask_data])
self.assertAllClose(pre_serialization_output, output)
# If the layer was configured correctly, it should have a list attribute
# (since it should have the custom class and config passed to it).
new_model.summary()
new_call_list = new_model.get_layer(
name='transformer_scaffold')._attention_layer.list
self.assertNotEmpty(new_call_list)
self.assertTrue(new_call_list[0],
"The passed layer class wasn't instantiated.")
new_feedforward_call_list = new_model.get_layer(
name='transformer_scaffold')._feedforward_block.list
self.assertNotEmpty(new_feedforward_call_list)
self.assertTrue(new_feedforward_call_list[0],
"The passed layer class wasn't instantiated.")
if __name__ == '__main__':
tf.test.main()
| 19,910 | 38.349802 | 80 | py |
models | models-master/official/nlp/modeling/layers/masked_softmax.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based softmax layer with optional masking."""
# pylint: disable=g-classes-have-attributes
import tensorflow as tf
def _large_compatible_negative(tensor_type):
"""Large negative number as Tensor.
This function is necessary because the standard value for epsilon
in this module (-1e9) cannot be represented using `tf.float16`.
Args:
tensor_type: A dtype to determine the type.
Returns:
A large negative number.
"""
if tensor_type == tf.float16:
return tf.float16.min
return -1e9
@tf.keras.utils.register_keras_serializable(package='Text')
class MaskedSoftmax(tf.keras.layers.Layer):
"""Performs a softmax with optional masking on a tensor.
Args:
mask_expansion_axes: Any axes that should be padded on the mask tensor.
normalization_axes: On which axes the softmax should perform.
"""
def __init__(self,
mask_expansion_axes=None,
normalization_axes=None,
**kwargs):
self._mask_expansion_axes = mask_expansion_axes
if normalization_axes is None:
self._normalization_axes = (-1,)
else:
self._normalization_axes = normalization_axes
super().__init__(**kwargs)
def call(self, scores, mask=None):
if mask is not None:
for _ in range(len(scores.shape) - len(mask.shape)):
mask = tf.expand_dims(mask, axis=self._mask_expansion_axes)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -1.e9 for masked positions.
adder = (1.0 - tf.cast(mask, scores.dtype)) * _large_compatible_negative(
scores.dtype)
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
scores += adder
if len(self._normalization_axes) == 1:
return tf.nn.softmax(scores, axis=self._normalization_axes[0])
else:
return tf.math.exp(scores - tf.math.reduce_logsumexp(
scores, axis=self._normalization_axes, keepdims=True))
def get_config(self):
config = {
'mask_expansion_axes': self._mask_expansion_axes,
'normalization_axes': self._normalization_axes
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| 2,990 | 33.77907 | 79 | py |
models | models-master/official/nlp/modeling/layers/masked_lm_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for masked language model network."""
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import masked_lm
from official.nlp.modeling.networks import bert_encoder
class MaskedLMTest(tf.test.TestCase):
def create_layer(self,
vocab_size,
hidden_size,
output='predictions',
xformer_stack=None):
# First, create a transformer stack that we can use to get the LM's
# vocabulary weight.
if xformer_stack is None:
xformer_stack = bert_encoder.BertEncoder(
vocab_size=vocab_size,
num_layers=1,
hidden_size=hidden_size,
num_attention_heads=4,
)
# Create a maskedLM from the transformer stack.
test_layer = masked_lm.MaskedLM(
embedding_table=xformer_stack.get_embedding_table(), output=output)
return test_layer
def test_layer_creation(self):
vocab_size = 100
sequence_length = 32
hidden_size = 64
num_predictions = 21
test_layer = self.create_layer(
vocab_size=vocab_size, hidden_size=hidden_size)
# Make sure that the output tensor of the masked LM is the right shape.
lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size))
masked_positions = tf.keras.Input(shape=(num_predictions,), dtype=tf.int32)
output = test_layer(lm_input_tensor, masked_positions=masked_positions)
expected_output_shape = [None, num_predictions, vocab_size]
self.assertEqual(expected_output_shape, output.shape.as_list())
def test_layer_invocation_with_external_logits(self):
vocab_size = 100
sequence_length = 32
hidden_size = 64
num_predictions = 21
xformer_stack = bert_encoder.BertEncoder(
vocab_size=vocab_size,
num_layers=1,
hidden_size=hidden_size,
num_attention_heads=4,
)
test_layer = self.create_layer(
vocab_size=vocab_size,
hidden_size=hidden_size,
xformer_stack=xformer_stack,
output='predictions')
logit_layer = self.create_layer(
vocab_size=vocab_size,
hidden_size=hidden_size,
xformer_stack=xformer_stack,
output='logits')
# Create a model from the masked LM layer.
lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size))
masked_positions = tf.keras.Input(shape=(num_predictions,), dtype=tf.int32)
output = test_layer(lm_input_tensor, masked_positions)
logit_output = logit_layer(lm_input_tensor, masked_positions)
logit_output = tf.keras.layers.Activation(tf.nn.log_softmax)(logit_output)
logit_layer.set_weights(test_layer.get_weights())
model = tf.keras.Model([lm_input_tensor, masked_positions], output)
logits_model = tf.keras.Model(([lm_input_tensor, masked_positions]),
logit_output)
# Invoke the masked LM on some fake data to make sure there are no runtime
# errors in the code.
batch_size = 3
lm_input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, hidden_size))
masked_position_data = np.random.randint(
sequence_length, size=(batch_size, num_predictions))
# ref_outputs = model.predict([lm_input_data, masked_position_data])
# outputs = logits_model.predict([lm_input_data, masked_position_data])
ref_outputs = model([lm_input_data, masked_position_data])
outputs = logits_model([lm_input_data, masked_position_data])
# Ensure that the tensor shapes are correct.
expected_output_shape = (batch_size, num_predictions, vocab_size)
self.assertEqual(expected_output_shape, ref_outputs.shape)
self.assertEqual(expected_output_shape, outputs.shape)
self.assertAllClose(ref_outputs, outputs)
def test_layer_invocation(self):
vocab_size = 100
sequence_length = 32
hidden_size = 64
num_predictions = 21
test_layer = self.create_layer(
vocab_size=vocab_size, hidden_size=hidden_size)
# Create a model from the masked LM layer.
lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size))
masked_positions = tf.keras.Input(shape=(num_predictions,), dtype=tf.int32)
output = test_layer(lm_input_tensor, masked_positions)
model = tf.keras.Model([lm_input_tensor, masked_positions], output)
# Invoke the masked LM on some fake data to make sure there are no runtime
# errors in the code.
batch_size = 3
lm_input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, hidden_size))
masked_position_data = np.random.randint(
2, size=(batch_size, num_predictions))
_ = model.predict([lm_input_data, masked_position_data])
def test_unknown_output_type_fails(self):
with self.assertRaisesRegex(ValueError, 'Unknown `output` value "bad".*'):
_ = self.create_layer(vocab_size=8, hidden_size=8, output='bad')
if __name__ == '__main__':
tf.test.main()
| 5,551 | 37.825175 | 79 | py |
models | models-master/official/nlp/modeling/layers/attention.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based attention layer."""
# pylint: disable=g-classes-have-attributes
import math
import tensorflow as tf
EinsumDense = tf.keras.layers.EinsumDense
MultiHeadAttention = tf.keras.layers.MultiHeadAttention
@tf.keras.utils.register_keras_serializable(package="Text")
class CachedAttention(tf.keras.layers.MultiHeadAttention):
"""Attention layer with cache used for autoregressive decoding.
Arguments are the same as `tf.keras.layers.MultiHeadAttention` layer.
"""
def _update_cache(self, key, value, cache, decode_loop_step):
"""Updates cache states and gets full-length key/value tensors."""
# Combines cached keys and values with new keys and values.
if decode_loop_step is not None:
# TPU special case.
key_seq_dim = cache["key"].shape.as_list()[1]
indices = tf.reshape(
tf.one_hot(decode_loop_step, key_seq_dim, dtype=key.dtype),
[1, key_seq_dim, 1, 1])
key = cache["key"] + key * indices
value_seq_dim = cache["value"].shape.as_list()[1]
indices = tf.reshape(
tf.one_hot(decode_loop_step, value_seq_dim, dtype=value.dtype),
[1, value_seq_dim, 1, 1])
value = cache["value"] + value * indices
else:
key = tf.concat([tf.cast(cache["key"], key.dtype), key], axis=1)
value = tf.concat([tf.cast(cache["value"], value.dtype), value], axis=1)
# Update cache
cache["key"] = key
cache["value"] = value
return key, value
def call(self,
query,
value,
key=None,
attention_mask=None,
cache=None,
decode_loop_step=None,
return_attention_scores=False):
if not self._built_from_signature:
self._build_from_signature(query=query, value=value, key=key)
if key is None:
key = value
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
# `query` = [B, F, N ,H]
query = self._query_dense(query)
# `key` = [B, T, N, H]
key = self._key_dense(key)
# `value` = [B, T, N, H]
value = self._value_dense(value)
if cache:
key, value = self._update_cache(key, value, cache, decode_loop_step)
query = tf.multiply(query, 1.0 / math.sqrt(float(self._key_dim)))
# Take the dot product between "query" and "key" to get the raw
# attention scores.
attention_scores = tf.einsum(self._dot_product_equation, key, query)
# Normalize the attention scores to probabilities.
# `attention_scores` = [B, N, F, T]
attention_scores = self._masked_softmax(attention_scores, attention_mask)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_scores = self._dropout_layer(attention_scores)
# `context_layer` = [B, F, N, H]
attention_output = tf.einsum(self._combine_equation, attention_scores,
value)
attention_output = self._output_dense(attention_output)
if return_attention_scores:
return attention_output, attention_scores, cache
return attention_output, cache
| 3,896 | 35.083333 | 78 | py |
models | models-master/official/nlp/modeling/layers/text_layers_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests bert.text_layers."""
import os
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow import estimator as tf_estimator
from sentencepiece import SentencePieceTrainer
from official.nlp.modeling.layers import text_layers
# This test covers the in-process behavior of a BertTokenizer layer.
# For saving, restoring, and the restored behavior (incl. shape inference),
# see nlp/tools/export_tfhub_lib_test.py.
class BertTokenizerTest(tf.test.TestCase):
def _make_vocab_file(self, vocab, filename="vocab.txt"):
path = os.path.join(
tempfile.mkdtemp(dir=self.get_temp_dir()), # New subdir each time.
filename)
with tf.io.gfile.GFile(path, "w") as f:
f.write("\n".join(vocab + [""]))
return path
def test_uncased(self):
vocab_file = self._make_vocab_file(
["[PAD]", "[UNK]", "[CLS]", "[SEP]", "d", "##ef", "abc", "xy"])
bert_tokenize = text_layers.BertTokenizer(
vocab_file=vocab_file, lower_case=True)
inputs = tf.constant(["abc def", "ABC DEF d"])
token_ids = bert_tokenize(inputs)
self.assertAllEqual(token_ids, tf.ragged.constant([[[6], [4, 5]],
[[6], [4, 5], [4]]]))
bert_tokenize.tokenize_with_offsets = True
token_ids_2, start_offsets, limit_offsets = bert_tokenize(inputs)
self.assertAllEqual(token_ids, token_ids_2)
self.assertAllEqual(start_offsets, tf.ragged.constant([[[0], [4, 5]],
[[0], [4, 5], [8]]]))
self.assertAllEqual(limit_offsets, tf.ragged.constant([[[3], [5, 7]],
[[3], [5, 7], [9]]]))
self.assertEqual(bert_tokenize.vocab_size.numpy(), 8)
# Repeat the above and test that case matters with lower_case=False.
def test_cased(self):
vocab_file = self._make_vocab_file(
["[PAD]", "[UNK]", "[CLS]", "[SEP]", "d", "##ef", "abc", "ABC"])
bert_tokenize = text_layers.BertTokenizer(
vocab_file=vocab_file, lower_case=False, tokenize_with_offsets=True)
inputs = tf.constant(["abc def", "ABC DEF"])
token_ids, start_offsets, limit_offsets = bert_tokenize(inputs)
self.assertAllEqual(token_ids, tf.ragged.constant([[[6], [4, 5]],
[[7], [1]]]))
self.assertAllEqual(start_offsets, tf.ragged.constant([[[0], [4, 5]],
[[0], [4]]]))
self.assertAllEqual(limit_offsets, tf.ragged.constant([[[3], [5, 7]],
[[3], [7]]]))
def test_special_tokens_complete(self):
vocab_file = self._make_vocab_file(
["foo", "[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]", "xy"])
bert_tokenize = text_layers.BertTokenizer(
vocab_file=vocab_file, lower_case=True)
self.assertDictEqual(bert_tokenize.get_special_tokens_dict(),
dict(padding_id=1,
start_of_sequence_id=3,
end_of_segment_id=4,
mask_id=5,
vocab_size=7))
def test_special_tokens_partial(self):
vocab_file = self._make_vocab_file(
["[PAD]", "[CLS]", "[SEP]"])
bert_tokenize = text_layers.BertTokenizer(
vocab_file=vocab_file, lower_case=True)
self.assertDictEqual(bert_tokenize.get_special_tokens_dict(),
dict(padding_id=0,
start_of_sequence_id=1,
end_of_segment_id=2,
vocab_size=3)) # No mask_id,
def test_special_tokens_in_estimator(self):
"""Tests getting special tokens without an Eager init context."""
vocab_file = self._make_vocab_file(
["[PAD]", "[UNK]", "[CLS]", "[SEP]", "d", "##ef", "abc", "xy"])
def input_fn():
with tf.init_scope():
self.assertFalse(tf.executing_eagerly())
# Build a preprocessing Model.
sentences = tf.keras.layers.Input(shape=[], dtype=tf.string)
bert_tokenizer = text_layers.BertTokenizer(
vocab_file=vocab_file, lower_case=True)
special_tokens_dict = bert_tokenizer.get_special_tokens_dict()
for k, v in special_tokens_dict.items():
self.assertIsInstance(v, int, "Unexpected type for {}".format(k))
tokens = bert_tokenizer(sentences)
packed_inputs = text_layers.BertPackInputs(
4, special_tokens_dict=special_tokens_dict)(tokens)
preprocessing = tf.keras.Model(sentences, packed_inputs)
# Map the dataset.
ds = tf.data.Dataset.from_tensors(
(tf.constant(["abc", "DEF"]), tf.constant([0, 1])))
ds = ds.map(lambda features, labels: (preprocessing(features), labels))
return ds
def model_fn(features, labels, mode):
del labels # Unused.
return tf_estimator.EstimatorSpec(mode=mode,
predictions=features["input_word_ids"])
estimator = tf_estimator.Estimator(model_fn=model_fn)
outputs = list(estimator.predict(input_fn))
self.assertAllEqual(outputs, np.array([[2, 6, 3, 0],
[2, 4, 5, 3]]))
# This test covers the in-process behavior of a SentencepieceTokenizer layer.
class SentencepieceTokenizerTest(tf.test.TestCase):
def setUp(self):
super().setUp()
# Make a sentencepiece model.
tmp_dir = self.get_temp_dir()
tempfile.mkdtemp(dir=tmp_dir)
vocab = ["a", "b", "c", "d", "e", "abc", "def", "ABC", "DEF"]
model_prefix = os.path.join(tmp_dir, "spm_model")
input_text_file_path = os.path.join(tmp_dir, "train_input.txt")
with tf.io.gfile.GFile(input_text_file_path, "w") as f:
f.write(" ".join(vocab + ["\n"]))
# Add 7 more tokens: <pad>, <unk>, [CLS], [SEP], [MASK], <s>, </s>.
full_vocab_size = len(vocab) + 7
flags = dict(
model_prefix=model_prefix,
model_type="word",
input=input_text_file_path,
pad_id=0, unk_id=1, control_symbols="[CLS],[SEP],[MASK]",
vocab_size=full_vocab_size,
bos_id=full_vocab_size-2, eos_id=full_vocab_size-1)
SentencePieceTrainer.Train(
" ".join(["--{}={}".format(k, v) for k, v in flags.items()]))
self._spm_path = model_prefix + ".model"
def test_uncased(self):
sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(
model_file_path=self._spm_path, lower_case=True, nbest_size=0)
inputs = tf.constant(["abc def", "ABC DEF d"])
token_ids = sentencepiece_tokenizer(inputs)
self.assertAllEqual(
token_ids,
tf.ragged.constant([[8, 12], [8, 12, 11]]))
sentencepiece_tokenizer.tokenize_with_offsets = True
token_ids_2, start_offsets, limit_offsets = sentencepiece_tokenizer(inputs)
self.assertAllEqual(token_ids, token_ids_2)
self.assertAllEqual(
start_offsets, tf.ragged.constant([[0, 3], [0, 3, 7]]))
self.assertAllEqual(
limit_offsets, tf.ragged.constant([[3, 7], [3, 7, 9]]))
self.assertEqual(sentencepiece_tokenizer.vocab_size.numpy(), 16)
# Repeat the above and test that case matters with lower_case=False.
def test_cased(self):
sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(
model_file_path=self._spm_path,
lower_case=False,
nbest_size=0,
tokenize_with_offsets=False)
inputs = tf.constant(["abc def", "ABC DEF d"])
token_ids = sentencepiece_tokenizer(inputs)
self.assertAllEqual(
token_ids,
tf.ragged.constant([[8, 12], [5, 6, 11]]))
sentencepiece_tokenizer.tokenize_with_offsets = True
token_ids_2, start_offsets, limit_offsets = sentencepiece_tokenizer(inputs)
self.assertAllEqual(token_ids, token_ids_2)
self.assertAllEqual(
start_offsets,
tf.ragged.constant([[0, 3], [0, 3, 7]]))
self.assertAllEqual(
limit_offsets,
tf.ragged.constant([[3, 7], [3, 7, 9]]))
def test_special_tokens(self):
sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(
model_file_path=self._spm_path, lower_case=True, nbest_size=0)
self.assertDictEqual(sentencepiece_tokenizer.get_special_tokens_dict(),
dict(padding_id=0,
start_of_sequence_id=2,
end_of_segment_id=3,
mask_id=4,
vocab_size=16))
def test_special_tokens_in_estimator(self):
"""Tests getting special tokens without an Eager init context."""
def input_fn():
with tf.init_scope():
self.assertFalse(tf.executing_eagerly())
# Build a preprocessing Model.
sentences = tf.keras.layers.Input(shape=[], dtype=tf.string)
sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(
model_file_path=self._spm_path, lower_case=True, nbest_size=0)
special_tokens_dict = sentencepiece_tokenizer.get_special_tokens_dict()
for k, v in special_tokens_dict.items():
self.assertIsInstance(v, int, "Unexpected type for {}".format(k))
tokens = sentencepiece_tokenizer(sentences)
packed_inputs = text_layers.BertPackInputs(
4, special_tokens_dict=special_tokens_dict)(tokens)
preprocessing = tf.keras.Model(sentences, packed_inputs)
# Map the dataset.
ds = tf.data.Dataset.from_tensors(
(tf.constant(["abc", "DEF"]), tf.constant([0, 1])))
ds = ds.map(lambda features, labels: (preprocessing(features), labels))
return ds
def model_fn(features, labels, mode):
del labels # Unused.
return tf_estimator.EstimatorSpec(mode=mode,
predictions=features["input_word_ids"])
estimator = tf_estimator.Estimator(model_fn=model_fn)
outputs = list(estimator.predict(input_fn))
self.assertAllEqual(outputs, np.array([[2, 8, 3, 0],
[2, 12, 3, 0]]))
def test_strip_diacritics(self):
sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(
model_file_path=self._spm_path,
lower_case=True,
nbest_size=0,
strip_diacritics=True)
inputs = tf.constant(["a b c d e", "ă ḅ č ḓ é"])
token_ids = sentencepiece_tokenizer(inputs)
self.assertAllEqual(
token_ids,
tf.ragged.constant([[7, 9, 10, 11, 13], [7, 9, 10, 11, 13]]))
def test_fail_on_tokenize_with_offsets_and_strip_diacritics(self):
# Raise an error in init().
with self.assertRaises(ValueError):
text_layers.SentencepieceTokenizer(
model_file_path=self._spm_path,
tokenize_with_offsets=True,
lower_case=True,
nbest_size=0,
strip_diacritics=True)
sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(
model_file_path=self._spm_path,
lower_case=True,
nbest_size=0,
strip_diacritics=True)
sentencepiece_tokenizer.tokenize_with_offsets = True
# Raise an error in call():
inputs = tf.constant(["abc def", "ABC DEF d", "Äffin"])
with self.assertRaises(ValueError):
sentencepiece_tokenizer(inputs)
def test_serialize_deserialize(self):
self.skipTest("b/170480226")
sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(
model_file_path=self._spm_path,
lower_case=False,
nbest_size=0,
tokenize_with_offsets=False,
name="sentencepiece_tokenizer_layer")
config = sentencepiece_tokenizer.get_config()
new_tokenizer = text_layers.SentencepieceTokenizer.from_config(config)
self.assertEqual(config, new_tokenizer.get_config())
inputs = tf.constant(["abc def", "ABC DEF d"])
token_ids = sentencepiece_tokenizer(inputs)
token_ids_2 = new_tokenizer(inputs)
self.assertAllEqual(token_ids, token_ids_2)
# TODO(b/170480226): Remove once tf_hub_export_lib_test.py covers saving.
def test_saving(self):
sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(
model_file_path=self._spm_path, lower_case=True, nbest_size=0)
inputs = tf.keras.layers.Input([], dtype=tf.string)
outputs = sentencepiece_tokenizer(inputs)
model = tf.keras.Model(inputs, outputs)
export_path = tempfile.mkdtemp(dir=self.get_temp_dir())
model.save(export_path, signatures={})
class BertPackInputsTest(tf.test.TestCase):
def test_round_robin_correct_outputs(self):
bpi = text_layers.BertPackInputs(
10,
start_of_sequence_id=1001,
end_of_segment_id=1002,
padding_id=999,
truncator="round_robin")
# Single input, rank 2.
bert_inputs = bpi(
tf.ragged.constant([[11, 12, 13],
[21, 22, 23, 24, 25, 26, 27, 28, 29, 30]]))
self.assertAllEqual(
bert_inputs["input_word_ids"],
tf.constant([[1001, 11, 12, 13, 1002, 999, 999, 999, 999, 999],
[1001, 21, 22, 23, 24, 25, 26, 27, 28, 1002]]))
self.assertAllEqual(
bert_inputs["input_mask"],
tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]))
self.assertAllEqual(
bert_inputs["input_type_ids"],
tf.constant([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))
# Two inputs, rank 3. Truncation does not respect word boundaries.
bert_inputs = bpi([
tf.ragged.constant([[[111], [112, 113]],
[[121, 122, 123], [124, 125, 126], [127, 128]]]),
tf.ragged.constant([[[211, 212], [213]],
[[221, 222], [223, 224, 225], [226, 227, 228]]])
])
self.assertAllEqual(
bert_inputs["input_word_ids"],
tf.constant([[1001, 111, 112, 113, 1002, 211, 212, 213, 1002, 999],
[1001, 121, 122, 123, 124, 1002, 221, 222, 223, 1002]]))
self.assertAllEqual(
bert_inputs["input_mask"],
tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]))
self.assertAllEqual(
bert_inputs["input_type_ids"],
tf.constant([[0, 0, 0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1]]))
# Three inputs. rank 3.
bert_inputs = bpi([
tf.ragged.constant([[[111], [112, 113]],
[[121, 122, 123], [124, 125, 126], [127, 128]]]),
tf.ragged.constant([[[211, 212], [213]],
[[221, 222], [223, 224, 225], [226, 227, 228]]]),
tf.ragged.constant([[[311, 312], [313]],
[[321, 322], [323, 324, 325], [326, 327, 328]]])
])
self.assertAllEqual(
bert_inputs["input_word_ids"],
tf.constant([[1001, 111, 112, 1002, 211, 212, 1002, 311, 312, 1002],
[1001, 121, 122, 1002, 221, 222, 1002, 321, 322, 1002]]))
def test_waterfall_correct_outputs(self):
bpi = text_layers.BertPackInputs(
10,
start_of_sequence_id=1001,
end_of_segment_id=1002,
padding_id=999,
truncator="waterfall")
# Single input, rank 2.
bert_inputs = bpi(
tf.ragged.constant([[11, 12, 13],
[21, 22, 23, 24, 25, 26, 27, 28, 29, 30]]))
self.assertAllEqual(
bert_inputs["input_word_ids"],
tf.constant([[1001, 11, 12, 13, 1002, 999, 999, 999, 999, 999],
[1001, 21, 22, 23, 24, 25, 26, 27, 28, 1002]]))
self.assertAllEqual(
bert_inputs["input_mask"],
tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]))
self.assertAllEqual(
bert_inputs["input_type_ids"],
tf.constant([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))
# Two inputs, rank 3. Truncation does not respect word boundaries.
bert_inputs = bpi([
tf.ragged.constant([[[111], [112, 113]],
[[121, 122, 123], [124, 125, 126], [127, 128]]]),
tf.ragged.constant([[[211, 212], [213]],
[[221, 222], [223, 224, 225], [226, 227, 228]]])
])
self.assertAllEqual(
bert_inputs["input_word_ids"],
tf.constant([[1001, 111, 112, 113, 1002, 211, 212, 213, 1002, 999],
[1001, 121, 122, 123, 124, 125, 126, 127, 1002, 1002]]))
self.assertAllEqual(
bert_inputs["input_mask"],
tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]))
self.assertAllEqual(
bert_inputs["input_type_ids"],
tf.constant([[0, 0, 0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]))
# Three inputs, rank 3. Truncation does not respect word boundaries.
bert_inputs = bpi([
tf.ragged.constant([[[111], [112, 113]],
[[121, 122, 123], [124, 125, 126], [127, 128]]]),
tf.ragged.constant([[[211], [212]],
[[221, 222], [223, 224, 225], [226, 227, 228]]]),
tf.ragged.constant([[[311, 312], [313]],
[[321, 322], [323, 324, 325], [326, 327]]])
])
self.assertAllEqual(
bert_inputs["input_word_ids"],
tf.constant([[1001, 111, 112, 113, 1002, 211, 212, 1002, 311, 1002],
[1001, 121, 122, 123, 124, 125, 126, 1002, 1002, 1002]]))
self.assertAllEqual(
bert_inputs["input_mask"],
tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]))
self.assertAllEqual(
bert_inputs["input_type_ids"],
tf.constant([[0, 0, 0, 0, 0, 1, 1, 1, 2, 2],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 2]]))
def test_special_tokens_dict(self):
special_tokens_dict = dict(start_of_sequence_id=1001,
end_of_segment_id=1002,
padding_id=999,
extraneous_key=666)
bpi = text_layers.BertPackInputs(10,
special_tokens_dict=special_tokens_dict)
bert_inputs = bpi(
tf.ragged.constant([[11, 12, 13],
[21, 22, 23, 24, 25, 26, 27, 28, 29, 30]]))
self.assertAllEqual(
bert_inputs["input_word_ids"],
tf.constant([[1001, 11, 12, 13, 1002, 999, 999, 999, 999, 999],
[1001, 21, 22, 23, 24, 25, 26, 27, 28, 1002]]))
# This test covers the in-process behavior of FastWordpieceBertTokenizer layer.
class FastWordPieceBertTokenizerTest(tf.test.TestCase):
def _make_vocab_file(self, vocab, filename="vocab.txt"):
path = os.path.join(
tempfile.mkdtemp(dir=self.get_temp_dir()), # New subdir each time.
filename)
with tf.io.gfile.GFile(path, "w") as f:
f.write("\n".join(vocab + [""]))
return path
def test_uncased(self):
vocab_file = self._make_vocab_file(
["[PAD]", "[UNK]", "[CLS]", "[SEP]", "d", "##ef", "abc", "xy"])
bert_tokenize = text_layers.FastWordpieceBertTokenizer(
vocab_file=vocab_file, lower_case=True)
inputs = tf.constant(["abc def", "ABC DEF d"])
token_ids = bert_tokenize(inputs)
self.assertAllEqual(token_ids, tf.ragged.constant([[[6], [4, 5]],
[[6], [4, 5], [4]]]))
bert_tokenize.tokenize_with_offsets = True
token_ids_2, start_offsets, limit_offsets = bert_tokenize(inputs)
self.assertAllEqual(token_ids, token_ids_2)
self.assertAllEqual(start_offsets, tf.ragged.constant([[[0], [4, 5]],
[[0], [4, 5], [8]]]))
self.assertAllEqual(limit_offsets, tf.ragged.constant([[[3], [5, 7]],
[[3], [5, 7], [9]]]))
self.assertEqual(bert_tokenize.vocab_size, 8)
# Repeat the above and test that case matters with lower_case=False.
def test_cased(self):
vocab_file = self._make_vocab_file(
["[PAD]", "[UNK]", "[CLS]", "[SEP]", "d", "##ef", "abc", "ABC"])
bert_tokenize = text_layers.FastWordpieceBertTokenizer(
vocab_file=vocab_file, lower_case=False, tokenize_with_offsets=True)
inputs = tf.constant(["abc def", "ABC DEF"])
token_ids, start_offsets, limit_offsets = bert_tokenize(inputs)
self.assertAllEqual(token_ids, tf.ragged.constant([[[6], [4, 5]],
[[7], [1]]]))
self.assertAllEqual(start_offsets, tf.ragged.constant([[[0], [4, 5]],
[[0], [4]]]))
self.assertAllEqual(limit_offsets, tf.ragged.constant([[[3], [5, 7]],
[[3], [7]]]))
def test_special_tokens_complete(self):
vocab_file = self._make_vocab_file(
["foo", "[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]", "xy"])
bert_tokenize = text_layers.FastWordpieceBertTokenizer(
vocab_file=vocab_file, lower_case=True)
self.assertDictEqual(bert_tokenize.get_special_tokens_dict(),
dict(padding_id=1,
start_of_sequence_id=3,
end_of_segment_id=4,
mask_id=5,
vocab_size=7))
def test_special_tokens_partial(self):
# [UNK] token is required by fast wordpiece tokenizer.
vocab_file = self._make_vocab_file(
["[PAD]", "[CLS]", "[SEP]", "[UNK]"])
bert_tokenize = text_layers.FastWordpieceBertTokenizer(
vocab_file=vocab_file, lower_case=True)
self.assertDictEqual(bert_tokenize.get_special_tokens_dict(),
dict(padding_id=0,
start_of_sequence_id=1,
end_of_segment_id=2,
vocab_size=4)) # No mask_id,
def test_special_tokens_in_estimator(self):
"""Tests getting special tokens without an Eager init context."""
vocab_file = self._make_vocab_file(
["[PAD]", "[UNK]", "[CLS]", "[SEP]", "d", "##ef", "abc", "xy"])
def input_fn():
with tf.init_scope():
self.assertFalse(tf.executing_eagerly())
# Build a preprocessing Model.
sentences = tf.keras.layers.Input(shape=[], dtype=tf.string)
bert_tokenizer = text_layers.FastWordpieceBertTokenizer(
vocab_file=vocab_file, lower_case=True)
special_tokens_dict = bert_tokenizer.get_special_tokens_dict()
for k, v in special_tokens_dict.items():
self.assertIsInstance(v, int, "Unexpected type for {}".format(k))
tokens = bert_tokenizer(sentences)
packed_inputs = text_layers.BertPackInputs(
4, special_tokens_dict=special_tokens_dict)(tokens)
preprocessing = tf.keras.Model(sentences, packed_inputs)
# Map the dataset.
ds = tf.data.Dataset.from_tensors(
(tf.constant(["abc", "DEF"]), tf.constant([0, 1])))
ds = ds.map(lambda features, labels: (preprocessing(features), labels))
return ds
def model_fn(features, labels, mode):
del labels # Unused.
return tf_estimator.EstimatorSpec(mode=mode,
predictions=features["input_word_ids"])
estimator = tf_estimator.Estimator(model_fn=model_fn)
outputs = list(estimator.predict(input_fn))
self.assertAllEqual(outputs, np.array([[2, 6, 3, 0],
[2, 4, 5, 3]]))
if __name__ == "__main__":
tf.test.main()
| 24,237 | 42.90942 | 80 | py |
models | models-master/official/nlp/modeling/layers/mat_mul_with_margin_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mat_mul_with_margin layer."""
import tensorflow as tf
from official.nlp.modeling.layers import mat_mul_with_margin
class MatMulWithMarginTest(tf.test.TestCase):
def test_layer_invocation(self):
"""Validate that the Keras object can be created and invoked."""
input_width = 512
test_layer = mat_mul_with_margin.MatMulWithMargin()
# Create a 2-dimensional input (the first dimension is implicit).
left_encoded = tf.keras.Input(shape=(input_width,), dtype=tf.float32)
right_encoded = tf.keras.Input(shape=(input_width,), dtype=tf.float32)
left_logits, right_logits = test_layer(left_encoded, right_encoded)
# Validate that the outputs are of the expected shape.
expected_output_shape = [None, None]
self.assertEqual(expected_output_shape, left_logits.shape.as_list())
self.assertEqual(expected_output_shape, right_logits.shape.as_list())
def test_serialize_deserialize(self):
# Create a layer object that sets all of its config options.
layer = mat_mul_with_margin.MatMulWithMargin()
# Create another layer object from the first object's config.
new_layer = mat_mul_with_margin.MatMulWithMargin.from_config(
layer.get_config())
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(layer.get_config(), new_layer.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,022 | 37.903846 | 79 | py |
models | models-master/official/nlp/modeling/layers/masked_softmax_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-based masked softmax layer."""
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import masked_softmax
class MaskedSoftmaxLayerTest(tf.test.TestCase):
def test_non_masked_softmax(self):
test_layer = masked_softmax.MaskedSoftmax()
input_tensor = tf.keras.Input(shape=(4, 8))
output = test_layer(input_tensor)
model = tf.keras.Model(input_tensor, output)
input_data = 10 * np.random.random_sample((3, 4, 8))
output_data = model.predict(input_data)
expected_data = tf.nn.softmax(input_data)
self.assertAllClose(expected_data, output_data)
def test_masked_softmax(self):
test_layer = masked_softmax.MaskedSoftmax()
input_tensor = tf.keras.Input(shape=(4, 8))
mask_tensor = tf.keras.Input(shape=(4, 8))
output = test_layer(input_tensor, mask_tensor)
model = tf.keras.Model([input_tensor, mask_tensor], output)
input_data = 10 * np.random.random_sample((3, 4, 8))
mask_data = np.random.randint(2, size=(3, 4, 8))
output_data = model.predict([input_data, mask_data])
expected_zeros = np.greater(mask_data, 0)
is_zeros = np.greater(output_data, 0)
self.assertAllEqual(expected_zeros, is_zeros)
def test_masked_softmax_with_none_mask(self):
test_layer = masked_softmax.MaskedSoftmax()
input_tensor = tf.keras.Input(shape=(4, 8))
output = test_layer(input_tensor, None)
model = tf.keras.Model(input_tensor, output)
input_data = 10 * np.random.random_sample((3, 4, 8))
output_data = model.predict(input_data)
expected_data = tf.nn.softmax(input_data)
self.assertAllClose(expected_data, output_data)
def test_softmax_with_axes_expansion(self):
test_layer = masked_softmax.MaskedSoftmax(mask_expansion_axes=[1])
input_tensor = tf.keras.Input(shape=(4, 8))
mask_tensor = tf.keras.Input(shape=(8))
output = test_layer(input_tensor, mask_tensor)
model = tf.keras.Model([input_tensor, mask_tensor], output)
input_data = 10 * np.random.random_sample((3, 4, 8))
mask_data = np.random.randint(2, size=(3, 8))
output_data = model.predict([input_data, mask_data])
expanded_mask = np.expand_dims(mask_data, axis=1) * np.ones_like(input_data)
expected_zeros = np.greater(expanded_mask, 0)
is_zeros = np.greater(output_data, 0)
self.assertAllEqual(expected_zeros, is_zeros)
def test_masked_softmax_high_dims(self):
test_layer = masked_softmax.MaskedSoftmax(
mask_expansion_axes=[1], normalization_axes=[6, 7])
input_shape = [2, 3, 4, 5, 6, 7, 8]
mask_shape = [5, 6, 7, 8]
input_tensor = tf.keras.Input(shape=input_shape)
mask_tensor = tf.keras.Input(shape=mask_shape)
output = test_layer(input_tensor, mask_tensor)
model = tf.keras.Model([input_tensor, mask_tensor], output)
input_data = 10 * np.random.random_sample([3] + input_shape)
mask_data = np.random.randint(2, size=[3] + mask_shape)
output_data = model.predict([input_data, mask_data])
expanded_mask = np.expand_dims(mask_data, axis=1)
expanded_mask = np.expand_dims(expanded_mask, axis=1)
expanded_mask = np.expand_dims(
expanded_mask, axis=1) * np.ones_like(input_data)
expected_zeros = np.greater(expanded_mask, 0)
is_zeros = np.greater(output_data, 0)
self.assertAllEqual(expected_zeros, is_zeros)
def test_serialize_deserialize(self):
test_layer = masked_softmax.MaskedSoftmax(
mask_expansion_axes=[1], normalization_axes=[6, 7])
new_layer = masked_softmax.MaskedSoftmax.from_config(
test_layer.get_config())
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(test_layer.get_config(), new_layer.get_config())
if __name__ == '__main__':
tf.test.main()
| 4,393 | 38.232143 | 80 | py |
models | models-master/official/nlp/modeling/layers/transformer_xl_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Transformer XL."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from official.nlp.modeling.layers import transformer_xl
def create_mock_transformer_xl_data(
batch_size,
num_heads,
head_size,
hidden_size,
seq_length,
memory_length=0,
num_predictions=2,
two_stream=False,
num_layers=1,
include_biases=True,
include_state=False,
include_mask=False,
include_segment=False):
"""Creates mock testing data.
Args:
batch_size: `int`, the batch size.
num_heads: `int`, number of attention heads.
head_size: `int`, the size of each attention head.
hidden_size: `int`, the layer's hidden size.
seq_length: `int`, Sequence length of the input.
memory_length: optional `int`, the length of the state. Defaults to 0.
num_predictions: `int`, the number of predictions used in two stream
attention.
two_stream: `bool`, whether or not to generate two stream data.
num_layers: `int`, the number of Transformer XL blocks.
include_biases: optional `bool`, whether or not to include attention biases.
include_state: optional `bool`, whether or not to include state data.
include_mask: optional `bool`, whether or not to include mask data.
include_segment: optional `bool`, whether or not to include segment data.
Returns:
A dictionary with `str` as keys and `Tensor` as values.
"""
encoding_shape = (batch_size, seq_length * 2, hidden_size)
data = dict(
relative_position_encoding=tf.random.normal(shape=encoding_shape),
content_stream=tf.random.normal(
shape=(batch_size, seq_length, hidden_size)))
if include_biases:
attention_bias_shape = (num_heads, head_size)
data.update(dict(
content_attention_bias=tf.random.normal(shape=attention_bias_shape),
segment_attention_bias=tf.random.normal(shape=attention_bias_shape),
positional_attention_bias=tf.random.normal(shape=attention_bias_shape)))
if two_stream:
data.update(dict(
query_stream=tf.random.normal(
shape=(batch_size, num_predictions, hidden_size)),
target_mapping=tf.random.normal(
shape=(batch_size, num_predictions, seq_length))))
if include_state:
total_seq_length = seq_length + memory_length
if num_layers > 1:
state_shape = (num_layers, batch_size, memory_length, hidden_size)
else:
state_shape = (batch_size, memory_length, hidden_size)
data.update(dict(
state=tf.random.normal(shape=state_shape)))
else:
total_seq_length = seq_length
if include_mask:
mask_shape = (batch_size, num_heads, seq_length, total_seq_length)
mask_data = np.random.randint(2, size=mask_shape).astype("float32")
data["content_attention_mask"] = mask_data
if two_stream:
data["query_attention_mask"] = mask_data
if include_segment:
# A transformer XL block takes an individual segment "encoding" from the
# entirety of the Transformer XL segment "embedding".
if num_layers > 1:
segment_encoding_shape = (num_layers, 2, num_heads, head_size)
segment_encoding_name = "segment_embedding"
else:
segment_encoding_shape = (2, num_heads, head_size)
segment_encoding_name = "segment_encoding"
segment_matrix = np.random.randint(
2, size=(batch_size, seq_length, total_seq_length))
data["segment_matrix"] = tf.math.equal(segment_matrix, 1)
data[segment_encoding_name] = tf.random.normal(shape=segment_encoding_shape)
return data
class TransformerXLBlockTest(tf.test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(
memory_length=[0, 4],
two_stream=[True, False],
state=[True, False],
mask=[True, False],
segment=[True, False]))
def test_transformer_xl_block(
self,
two_stream,
memory_length,
state,
mask,
segment):
"""Tests combinations of Transformer XL block calculations."""
batch_size, num_heads, head_size, seq_length = 2, 12, 64, 8
hidden_size, num_predictions, inner_size = 24, 8, 12
data = create_mock_transformer_xl_data(
include_biases=True,
num_heads=num_heads,
head_size=head_size,
hidden_size=hidden_size,
seq_length=seq_length,
batch_size=batch_size,
memory_length=memory_length,
num_predictions=num_predictions,
two_stream=two_stream,
include_state=state,
include_mask=mask,
include_segment=segment)
test_layer = transformer_xl.TransformerXLBlock(
vocab_size=32000,
hidden_size=hidden_size,
num_attention_heads=num_heads,
head_size=head_size,
inner_size=inner_size,
dropout_rate=0.,
attention_dropout_rate=0.,
two_stream=two_stream)
output = test_layer(**data)
content_attention = output["content_attention"]
self.assertEqual(content_attention.shape,
[batch_size, seq_length, hidden_size])
if two_stream:
self.assertIn("query_attention", output)
self.assertEqual(output["query_attention"].shape,
[batch_size, num_predictions, hidden_size])
else:
self.assertNotIn("query_attention", output)
def test_get_config(self):
transformer_xl_block = transformer_xl.TransformerXLBlock(
vocab_size=32000,
head_size=64,
num_attention_heads=2,
hidden_size=10,
inner_size=50,
dropout_rate=0.,
attention_dropout_rate=0.,
two_stream=False)
transformer_xl_block_config = transformer_xl_block.get_config()
new_block = transformer_xl.TransformerXLBlock.from_config(
transformer_xl_block_config)
self.assertEqual(transformer_xl_block_config, new_block.get_config())
class TransformerXLTest(tf.test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(
two_stream=[True, False],
memory_length=[0, 4],
reuse_length=[0, 4],
tie_attention_biases=[True, False],
state=[True, False],
mask=[True, False],
segment=[True, False]))
def test_transformer_xl(
self,
two_stream,
memory_length,
reuse_length,
tie_attention_biases,
state,
mask,
segment):
batch_size, num_heads, head_size, seq_length = 2, 12, 64, 8
hidden_size, num_predictions, inner_size = 24, 8, 12
num_layers = 3
data = create_mock_transformer_xl_data(
include_biases=False,
num_heads=num_heads,
head_size=head_size,
hidden_size=hidden_size,
seq_length=seq_length,
batch_size=batch_size,
memory_length=memory_length,
num_predictions=num_predictions,
two_stream=two_stream,
num_layers=num_layers,
include_state=state,
include_mask=mask,
include_segment=segment)
transformer_xl_layer = transformer_xl.TransformerXL(
vocab_size=32000,
num_layers=num_layers,
head_size=head_size,
hidden_size=hidden_size,
num_attention_heads=num_heads,
inner_size=inner_size,
dropout_rate=0.,
attention_dropout_rate=0.,
initializer=tf.keras.initializers.RandomNormal(stddev=0.1),
two_stream=two_stream,
tie_attention_biases=tie_attention_biases,
memory_length=memory_length,
reuse_length=reuse_length,
inner_activation="relu")
attention_output, cached_memory_states = transformer_xl_layer(**data)
if two_stream:
self.assertEqual(attention_output.shape,
[batch_size, num_predictions, hidden_size])
else:
self.assertEqual(attention_output.shape,
[batch_size, seq_length, hidden_size])
self.assertLen(cached_memory_states, num_layers)
def test_get_config(self):
transformer_xl_layer = transformer_xl.TransformerXL(
vocab_size=32000,
num_layers=12,
hidden_size=36,
head_size=12,
num_attention_heads=12,
inner_size=12,
dropout_rate=0.,
attention_dropout_rate=0.,
initializer=tf.keras.initializers.RandomNormal(stddev=0.1),
two_stream=False,
tie_attention_biases=True,
memory_length=0,
reuse_length=0,
inner_activation="relu")
transformer_xl_config = transformer_xl_layer.get_config()
new_transformer_xl = transformer_xl.TransformerXL.from_config(
transformer_xl_config)
self.assertEqual(transformer_xl_config, new_transformer_xl.get_config())
if __name__ == "__main__":
np.random.seed(0)
tf.random.set_seed(0)
tf.test.main()
| 9,386 | 33.134545 | 80 | py |
models | models-master/official/nlp/modeling/layers/gated_feedforward_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-based gated feedforward layer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import gated_feedforward
class GatedFeedforwardTest(tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(GatedFeedforwardTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy("float32")
@parameterized.parameters(
(True, 1, "after_residual", "float32"),
(True, 1, "after_residual", "mixed_float16"),
(False, 4, "before_residual", "float32"),
(False, 4, "before_residual", "mixed_float16"),
(True, 4, "after_residual", "float32"),
(True, 4, "after_residual", "mixed_float16"),
(False, 1, "before_residual", "float32"),
(False, 1, "before_residual", "mixed_float16"),
)
def test_layer_creation(self, use_gate, num_blocks, dropout_position, dtype):
tf.keras.mixed_precision.set_global_policy(dtype)
kwargs = dict(
inner_dim=128,
inner_activation="relu",
dropout=0.1,
use_gate=use_gate,
num_blocks=num_blocks,
dropout_position=dropout_position,
kernel_initializer="glorot_uniform",
bias_initializer="zeros")
test_layer = gated_feedforward.GatedFeedforward(**kwargs)
sequence_length = 64
width = 128
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
@parameterized.parameters(
(True, 1, "after_residual", "float32"),
(True, 1, "after_residual", "mixed_float16"),
(False, 4, "before_residual", "float32"),
(False, 4, "before_residual", "mixed_float16"),
(True, 4, "after_residual", "float32"),
(True, 4, "after_residual", "mixed_float16"),
(False, 1, "before_residual", "float32"),
(False, 1, "before_residual", "mixed_float16"),
)
def test_layer_invocation(self, use_gate, num_blocks, dropout_position,
dtype):
tf.keras.mixed_precision.set_global_policy(dtype)
kwargs = dict(
inner_dim=16,
inner_activation="relu",
dropout=0.1,
use_gate=use_gate,
num_blocks=num_blocks,
dropout_position=dropout_position,
kernel_initializer="glorot_uniform",
bias_initializer="zeros")
test_layer = gated_feedforward.GatedFeedforward(**kwargs)
sequence_length = 16
width = 32
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# Create a model from the test layer.
model = tf.keras.Model(data_tensor, output_tensor)
# Invoke the model on test data.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
output_data = model.predict(input_data)
self.assertEqual(output_data.shape, (batch_size, sequence_length, width))
def test_serialize_deserialize(self):
kwargs = dict(
inner_dim=16,
inner_activation="relu",
dropout=0.1,
use_gate=False,
num_blocks=4,
dropout_position="after_residual",
kernel_initializer="glorot_uniform",
bias_initializer="zeros")
test_layer = gated_feedforward.GatedFeedforward(**kwargs)
new_layer = gated_feedforward.GatedFeedforward.from_config(
test_layer.get_config())
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(test_layer.get_config(), new_layer.get_config())
if __name__ == "__main__":
tf.test.main()
| 4,484 | 36.066116 | 80 | py |
models | models-master/official/nlp/modeling/layers/routing.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers for Mixture of Experts (MoE) routing.
For MoE routing, we need to separate a set of tokens to sets of tokens.
Later on, different sets of tokens can potentially go to different experts.
"""
import tensorflow as tf
@tf.keras.utils.register_keras_serializable(package="Text")
class TokenImportanceWithMovingAvg(tf.keras.layers.Layer):
"""Routing based on per-token importance value."""
def __init__(self,
vocab_size,
init_importance,
moving_average_beta=0.995,
**kwargs):
self._vocab_size = vocab_size
self._init_importance = init_importance
self._moving_average_beta = moving_average_beta
super().__init__(**kwargs)
def build(self, input_shape):
self._importance_embedding = self.add_weight(
name="importance_embed",
shape=(self._vocab_size),
initializer=tf.keras.initializers.Constant(self._init_importance),
trainable=False)
def get_config(self):
config = {
"vocab_size":
self._vocab_size,
"init_importance":
self._init_importance,
"moving_average_beta":
self._moving_average_beta,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def update_token_importance(self, token_ids, importance):
token_ids = tf.reshape(token_ids, shape=[-1])
importance = tf.reshape(importance, shape=[-1])
beta = self._moving_average_beta
old_importance = tf.gather(self._importance_embedding, token_ids)
self._importance_embedding.assign(tf.tensor_scatter_nd_update(
self._importance_embedding,
tf.expand_dims(token_ids, axis=1),
old_importance * beta + tf.cast(importance * (1.0 - beta),
dtype=tf.float32)))
def call(self, inputs):
return tf.gather(self._importance_embedding, inputs)
@tf.keras.utils.register_keras_serializable(package="Text")
class SelectTopK(tf.keras.layers.Layer):
"""Select top-k + random-k tokens according to importance."""
def __init__(self,
top_k=None,
random_k=None,
**kwargs):
self._top_k = top_k
self._random_k = random_k
super().__init__(**kwargs)
def get_config(self):
config = {
"top_k":
self._top_k,
"random_k":
self._random_k,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
if self._random_k is None:
# Pure top-k, not randomness.
pos = tf.argsort(inputs, direction="DESCENDING")
selected = tf.slice(pos, [0, 0], [-1, self._top_k])
not_selected = tf.slice(pos, [0, self._top_k], [-1, -1])
elif self._top_k is None:
# Pure randomness, no top-k.
pos = tf.argsort(tf.random.uniform(shape=tf.shape(inputs)),
direction="DESCENDING")
selected = tf.slice(pos, [0, 0], [-1, self._random_k])
not_selected = tf.slice(pos, [0, self._random_k], [-1, -1])
else:
# Top-k plus randomness.
pos = tf.argsort(inputs, direction="DESCENDING")
selected_top_k = tf.slice(pos, [0, 0], [-1, self._top_k])
pos_left = tf.slice(pos, [0, self._top_k], [-1, -1])
# Randomly shuffle pos_left
sort_index = tf.argsort(
tf.random.uniform(shape=tf.shape(pos_left)),
direction="DESCENDING")
pos_left = tf.gather(pos_left, sort_index, batch_dims=1, axis=1)
selected_rand = tf.slice(pos_left, [0, 0], [-1, self._random_k])
not_selected = tf.slice(pos_left, [0, self._random_k], [-1, -1])
selected = tf.concat([selected_top_k, selected_rand], axis=1)
# Return the indices of selected and not-selected tokens.
return selected, not_selected
| 4,459 | 34.396825 | 75 | py |
models | models-master/official/nlp/modeling/layers/tn_expand_condense_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ExpandCondense tensor network layer."""
import os
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers.tn_expand_condense import TNExpandCondense
class TNLayerTest(tf.test.TestCase, parameterized.TestCase):
"""Unit tests for ExpandCondense TN layer.
"""
def setUp(self):
super().setUp()
self.labels = np.concatenate((np.ones((50, 1)), np.zeros((50, 1))), axis=0)
def _build_model(self, data, proj_multiple=2):
model = tf.keras.models.Sequential()
model.add(
TNExpandCondense(
proj_multiplier=proj_multiple,
use_bias=True,
activation='relu',
input_shape=(data.shape[-1],)))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
return model
@parameterized.parameters((768, 6), (1024, 2))
def test_train(self, input_dim, proj_multiple):
tf.keras.utils.set_random_seed(0)
data = np.random.randint(10, size=(100, input_dim))
model = self._build_model(data, proj_multiple)
model.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Train the model for 5 epochs
history = model.fit(data, self.labels, epochs=5, batch_size=32)
# Check that loss decreases and accuracy increases
self.assertGreater(history.history['loss'][0], history.history['loss'][-1])
self.assertLess(
history.history['accuracy'][0], history.history['accuracy'][-1])
@parameterized.parameters((768, 6), (1024, 2))
def test_weights_change(self, input_dim, proj_multiple):
tf.keras.utils.set_random_seed(0)
data = np.random.randint(10, size=(100, input_dim))
model = self._build_model(data, proj_multiple)
model.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
before = model.get_weights()
model.fit(data, self.labels, epochs=5, batch_size=32)
after = model.get_weights()
# Make sure every layer's weights changed
for i, _ in enumerate(before):
self.assertTrue((after[i] != before[i]).any())
@parameterized.parameters((768, 6), (1024, 2))
def test_output_shape(self, input_dim, proj_multiple):
data = np.random.randint(10, size=(100, input_dim))
model = self._build_model(data, proj_multiple)
input_shape = data.shape
actual_output_shape = model(data).shape
expected_output_shape = model.compute_output_shape(input_shape)
self.assertEqual(expected_output_shape, actual_output_shape)
@parameterized.parameters((768, 6), (1024, 2))
def test_expandcondense_num_parameters(self, input_dim, proj_multiple):
data = np.random.randint(10, size=(100, input_dim))
proj_size = proj_multiple * data.shape[-1]
model = tf.keras.models.Sequential()
model.add(
TNExpandCondense(
proj_multiplier=proj_multiple,
use_bias=True,
activation='relu',
input_shape=(data.shape[-1],)))
w1_params = data.shape[-1]**2
w2_params = 128 * 128 * (proj_size // data.shape[-1])
w3_params = 128 * 128 * (proj_size // data.shape[-1])
w4_params = (data.shape[-1] // 128) * 128 * data.shape[-1]
bias_params = ((data.shape[-1] // 128) * 128 *
(proj_size // data.shape[-1]))
expected_num_parameters = (w1_params + w2_params + w3_params +
w4_params) + bias_params
self.assertEqual(expected_num_parameters, model.count_params())
@parameterized.parameters((912, 6), (200, 2))
def test_incorrect_sizes(self, input_dim, proj_multiple):
data = np.random.randint(10, size=(100, input_dim))
with self.assertRaises(AssertionError):
model = self._build_model(data, proj_multiple)
model.compile(optimizer='adam', loss='binary_crossentropy')
@parameterized.parameters((768, 6), (1024, 2))
def test_config(self, input_dim, proj_multiple):
data = np.random.randint(10, size=(100, input_dim))
model = self._build_model(data, proj_multiple)
expected_num_parameters = model.layers[0].count_params()
# Serialize model and use config to create new layer
model_config = model.get_config()
layer_config = model_config['layers'][1]['config']
new_model = TNExpandCondense.from_config(layer_config)
# Build the layer so we can count params below
new_model.build(layer_config['batch_input_shape'])
# Check that original layer had same num params as layer built from config
self.assertEqual(expected_num_parameters, new_model.count_params())
@parameterized.parameters((768, 6), (1024, 2))
def test_model_save(self, input_dim, proj_multiple):
data = np.random.randint(10, size=(100, input_dim))
model = self._build_model(data, proj_multiple)
model.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Train the model for 5 epochs
model.fit(data, self.labels, epochs=5, batch_size=32)
save_path = os.path.join(self.get_temp_dir(), 'test_model')
model.save(save_path)
loaded_model = tf.keras.models.load_model(save_path)
# Compare model predictions and loaded_model predictions
self.assertAllEqual(model.predict(data), loaded_model.predict(data))
if __name__ == '__main__':
tf.test.main()
| 5,890 | 35.81875 | 79 | py |
models | models-master/official/nlp/modeling/losses/weighted_sparse_categorical_crossentropy_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for masked LM loss."""
import numpy as np
import tensorflow as tf
from official.nlp.modeling import layers
from official.nlp.modeling import networks
from official.nlp.modeling.losses import weighted_sparse_categorical_crossentropy
class ClassificationLossTest(tf.test.TestCase):
def create_lm_model(self,
vocab_size,
sequence_length,
hidden_size,
num_predictions,
output="predictions"):
# First, create a transformer stack that we can use to get the LM's
# vocabulary weight.
xformer_stack = networks.BertEncoder(
vocab_size=vocab_size,
num_layers=1,
sequence_length=sequence_length,
hidden_size=hidden_size,
num_attention_heads=4,
)
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
_ = xformer_stack([word_ids, mask, type_ids])
# Create a maskedLM from the transformer stack.
test_layer = layers.MaskedLM(
embedding_table=xformer_stack.get_embedding_table(), output=output)
# Create a model from the masked LM layer.
lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size))
masked_lm_positions = tf.keras.Input(
shape=(num_predictions,), dtype=tf.int32)
output = test_layer(lm_input_tensor, masked_positions=masked_lm_positions)
return tf.keras.Model([lm_input_tensor, masked_lm_positions], output)
def test_loss_3d_input(self):
"""Test overall loss with a 3-dimensional input, from a masked LM."""
vocab_size = 100
sequence_length = 32
hidden_size = 64
num_predictions = 21
model = self.create_lm_model(
vocab_size=vocab_size,
sequence_length=sequence_length,
hidden_size=hidden_size,
num_predictions=num_predictions)
# Get the output of the masked LM.
batch_size = 3
lm_input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, hidden_size))
masked_position_data = np.random.randint(
2, size=(batch_size, num_predictions))
output_data = model.predict([lm_input_data, masked_position_data])
# Calculate loss.
labels = np.random.randint(vocab_size, size=(batch_size, num_predictions))
weights = np.random.randint(2, size=(batch_size, num_predictions))
per_example_loss_data = weighted_sparse_categorical_crossentropy.loss(
predictions=output_data, labels=labels, weights=weights)
# Total loss data should have one value, and that value shouldn't be zero
# in this case (as we're using random data).
expected_shape = [] # Scalar
self.assertEqual(expected_shape, per_example_loss_data.shape.as_list())
self.assertNotAllClose(
tf.zeros_like(per_example_loss_data), per_example_loss_data)
def test_loss_weights_3d_input(self):
"""Test masked loss with a 3-dimensional input, from a masked LM."""
vocab_size = 100
sequence_length = 32
hidden_size = 64
num_predictions = 21
model = self.create_lm_model(
vocab_size=vocab_size,
sequence_length=sequence_length,
hidden_size=hidden_size,
num_predictions=num_predictions)
# Get the output of the masked LM.
batch_size = 3
lm_input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, hidden_size))
masked_position_data = np.random.randint(
2, size=(batch_size, num_predictions))
output_data = model.predict([lm_input_data, masked_position_data])
# Calculate a fully masked weight tensor. This should give a loss of zero.
labels = np.random.randint(vocab_size, size=(batch_size, num_predictions))
null_weights = np.zeros((batch_size, num_predictions))
weighted_loss_data = weighted_sparse_categorical_crossentropy.loss(
predictions=output_data, labels=labels, weights=null_weights)
# Because the tensor is fully masked, the loss should be 0.
self.assertAllClose(0, weighted_loss_data)
def test_mismatched_predictions_and_labels_ranks_squeezes(self):
"""Test that the loss asserts when rank(predictions)-1 != rank(labels)."""
batch_size = 3
output_data = np.random.random_sample((batch_size, 10))
labels = np.random.randint(10, size=(batch_size, 1))
# All that this test tests is that the squeeze is successful.
_ = weighted_sparse_categorical_crossentropy.loss(
predictions=output_data, labels=labels)
def test_mismatched_weights_and_labels_ranks_fail(self):
"""Test that the loss asserts when rank(predictions) != rank(labels)."""
batch_size = 3
output_data = np.random.random_sample((batch_size, 10, 15))
labels = np.random.randint(10, size=(batch_size, 10))
weights = np.random.randint(2, size=(batch_size))
with self.assertRaisesRegex(RuntimeError, ".*of the same rank.*"):
_ = weighted_sparse_categorical_crossentropy.loss(
predictions=output_data, labels=labels, weights=weights)
def test_tf_tensor_inputs(self):
"""Test that tf.Tensors can be used as inputs to the loss function."""
batch_size = 3
output_data = tf.convert_to_tensor(
np.random.random_sample((batch_size, 10, 15)))
labels = tf.convert_to_tensor(np.random.randint(10, size=(batch_size, 10)))
weights = tf.convert_to_tensor(np.random.randint(2, size=(batch_size, 10)))
# We're not trying to validate numerical correctness, just ensure that
# we can in fact pass tensors to these functions without causing runtime
# errors from the shape checking code.
_ = weighted_sparse_categorical_crossentropy.loss(
predictions=output_data, labels=labels, weights=weights)
def test_legacy_lm_loss_compatibility(self):
"""Test to validate computational correctness during refactors."""
# This is the empirical output of a masked LM with the following parameters:
# batch_size = 3
# vocab_size = 5
# sequence_length = 4
# num_predictions = 2
output_data = np.array(
[[[-2.5286622, -1.0963473, -1.4925185, -2.4451098, -1.2923571],
[-2.7117882, -1.1205841, -4.02187, -0.9966936, -1.5119683]],
[[-2.5379114, -0.82479054, -2.287932, -1.3747153, -2.053741],
[-2.5379114, -0.82479054, -2.287932, -1.3747153, -2.053741]],
[[-2.7760355, -1.8219438, -3.0924666, -1.0779881, -0.9407509],
[-2.7760355, -1.8219438, -3.0924666, -1.0779881, -0.9407509]]])
labels = np.array([[4, 0], [2, 2], [2, 1]])
# Validate that overall loss calculations are the same.
weights = np.array([[1, 0], [0, 0], [0, 0]])
loss_data = weighted_sparse_categorical_crossentropy.loss(
predictions=output_data,
labels=labels,
weights=weights,
from_logits=True)
expected_loss_data = 1.2923441
self.assertAllClose(expected_loss_data, loss_data, rtol=1e-3)
def test_legacy_classification_loss_compatibility(self):
"""Test to validate computational correctness during refactors."""
# This is the empirical output of a classifier with the following params:
# batch_size = 2
# num_classes = 3
output_data = np.array([[-1.6094601e-03, -1.0966038e+01, -6.4434357e+00],
[-1.6975292e-03, -6.4009643e+00, -1.0226612e+01]])
labels = np.array([2, 1])
# Validate that overall loss calculations are the same.
weights = None
loss_data = weighted_sparse_categorical_crossentropy.loss(
predictions=output_data,
labels=labels,
weights=weights,
from_logits=True)
expected_loss_data = 6.4222
self.assertAllClose(expected_loss_data, loss_data, rtol=1e-3)
if __name__ == "__main__":
tf.test.main()
| 8,464 | 40.699507 | 81 | py |
models | models-master/official/nlp/modeling/losses/weighted_sparse_categorical_crossentropy.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Weighted sparse categorical cross-entropy losses."""
import tensorflow as tf
def _adjust_labels(labels, predictions):
"""Adjust the 'labels' tensor by squeezing it if needed."""
labels = tf.cast(labels, tf.int32)
if len(predictions.shape) == len(labels.shape):
labels = tf.squeeze(labels, [-1])
return labels, predictions
def _validate_rank(labels, predictions, weights):
if weights is not None and len(weights.shape) != len(labels.shape):
raise RuntimeError(
("Weight and label tensors were not of the same rank. weights.shape "
"was %s, and labels.shape was %s.") %
(predictions.shape, labels.shape))
if (len(predictions.shape) - 1) != len(labels.shape):
raise RuntimeError(
("Weighted sparse categorical crossentropy expects `labels` to have a "
"rank of one less than `predictions`. labels.shape was %s, and "
"predictions.shape was %s.") % (labels.shape, predictions.shape))
def loss(labels, predictions, weights=None, from_logits=False):
"""Calculate a per-batch sparse categorical crossentropy loss.
This loss function assumes that the predictions are post-softmax.
Args:
labels: The labels to evaluate against. Should be a set of integer indices
ranging from 0 to (vocab_size-1).
predictions: The network predictions. Should have softmax already applied.
weights: An optional weight array of the same shape as the 'labels' array.
If None, all examples will be used.
from_logits: Whether the input predictions are logits.
Returns:
A loss scalar.
Raises:
RuntimeError if the passed tensors do not have the same rank.
"""
# When using these functions with the Keras core API, we will need to squeeze
# the labels tensor - Keras adds a spurious inner dimension.
labels, predictions = _adjust_labels(labels, predictions)
_validate_rank(labels, predictions, weights)
example_losses = tf.keras.losses.sparse_categorical_crossentropy(
labels, predictions, from_logits=from_logits)
if weights is None:
return tf.reduce_mean(example_losses)
weights = tf.cast(weights, predictions.dtype)
return tf.math.divide_no_nan(
tf.reduce_sum(example_losses * weights), tf.reduce_sum(weights))
| 2,859 | 38.722222 | 79 | py |
models | models-master/official/nlp/tasks/electra_task_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.tasks.electra_task."""
import tensorflow as tf
from official.nlp.configs import bert
from official.nlp.configs import electra
from official.nlp.configs import encoders
from official.nlp.data import pretrain_dataloader
from official.nlp.tasks import electra_task
class ElectraPretrainTaskTest(tf.test.TestCase):
def test_task(self):
config = electra_task.ElectraPretrainConfig(
model=electra.ElectraPretrainerConfig(
generator_encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522,
num_layers=1)),
discriminator_encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522,
num_layers=1)),
num_masked_tokens=20,
sequence_length=128,
cls_heads=[
bert.ClsHeadConfig(
inner_dim=10, num_classes=2, name="next_sentence")
]),
train_data=pretrain_dataloader.BertPretrainDataConfig(
input_path="dummy",
max_predictions_per_seq=20,
seq_length=128,
global_batch_size=1))
task = electra_task.ElectraPretrainTask(config)
model = task.build_model()
metrics = task.build_metrics()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
task.validation_step(next(iterator), model, metrics=metrics)
if __name__ == "__main__":
tf.test.main()
| 2,282 | 36.42623 | 74 | py |
models | models-master/official/nlp/tasks/masked_lm.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Masked language task."""
import dataclasses
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling import tf_utils
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import data_loader_factory
from official.nlp.modeling import layers
from official.nlp.modeling import models
@dataclasses.dataclass
class MaskedLMConfig(cfg.TaskConfig):
"""The model config."""
model: bert.PretrainerConfig = dataclasses.field(
default_factory=lambda: bert.PretrainerConfig( # pylint: disable=g-long-lambda
cls_heads=[
bert.ClsHeadConfig(
inner_dim=768,
num_classes=2,
dropout_rate=0.1,
name='next_sentence',
)
]
)
)
# TODO(b/154564893): Mathematically, scale_loss should be True.
# However, it works better with scale_loss being False.
scale_loss: bool = False
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(
default_factory=cfg.DataConfig
)
@task_factory.register_task_cls(MaskedLMConfig)
class MaskedLMTask(base_task.Task):
"""Task object for Mask language modeling."""
def _build_encoder(self, encoder_cfg):
return encoders.build_encoder(encoder_cfg)
def build_model(self, params=None):
config = params or self.task_config.model
encoder_cfg = config.encoder
encoder_network = self._build_encoder(encoder_cfg)
cls_heads = [
layers.ClassificationHead(**cfg.as_dict()) for cfg in config.cls_heads
] if config.cls_heads else []
return models.BertPretrainerV2(
mlm_activation=tf_utils.get_activation(config.mlm_activation),
mlm_initializer=tf.keras.initializers.TruncatedNormal(
stddev=config.mlm_initializer_range),
encoder_network=encoder_network,
classification_heads=cls_heads)
def build_losses(self,
labels,
model_outputs,
metrics,
aux_losses=None) -> tf.Tensor:
with tf.name_scope('MaskedLMTask/losses'):
metrics = dict([(metric.name, metric) for metric in metrics])
lm_prediction_losses = tf.keras.losses.sparse_categorical_crossentropy(
labels['masked_lm_ids'],
tf.cast(model_outputs['mlm_logits'], tf.float32),
from_logits=True)
lm_label_weights = labels['masked_lm_weights']
lm_numerator_loss = tf.reduce_sum(lm_prediction_losses *
lm_label_weights)
lm_denominator_loss = tf.reduce_sum(lm_label_weights)
mlm_loss = tf.math.divide_no_nan(lm_numerator_loss, lm_denominator_loss)
metrics['lm_example_loss'].update_state(mlm_loss)
if 'next_sentence_labels' in labels:
sentence_labels = labels['next_sentence_labels']
sentence_outputs = tf.cast(
model_outputs['next_sentence'], dtype=tf.float32)
sentence_loss = tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(
sentence_labels, sentence_outputs, from_logits=True))
metrics['next_sentence_loss'].update_state(sentence_loss)
total_loss = mlm_loss + sentence_loss
else:
total_loss = mlm_loss
if aux_losses:
total_loss += tf.add_n(aux_losses)
return total_loss
def build_inputs(self, params, input_context=None):
"""Returns tf.data.Dataset for pretraining."""
if params.input_path == 'dummy':
def dummy_data(_):
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
dummy_lm = tf.zeros((1, params.max_predictions_per_seq), dtype=tf.int32)
return dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids,
masked_lm_positions=dummy_lm,
masked_lm_ids=dummy_lm,
masked_lm_weights=tf.cast(dummy_lm, dtype=tf.float32),
next_sentence_labels=tf.zeros((1, 1), dtype=tf.int32))
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
return data_loader_factory.get_data_loader(params).load(input_context)
def build_metrics(self, training=None):
del training
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='masked_lm_accuracy'),
tf.keras.metrics.Mean(name='lm_example_loss')
]
# TODO(hongkuny): rethink how to manage metrics creation with heads.
if self.task_config.train_data.use_next_sentence_label:
metrics.append(
tf.keras.metrics.SparseCategoricalAccuracy(
name='next_sentence_accuracy'))
metrics.append(tf.keras.metrics.Mean(name='next_sentence_loss'))
return metrics
def process_metrics(self, metrics, labels, model_outputs):
with tf.name_scope('MaskedLMTask/process_metrics'):
metrics = dict([(metric.name, metric) for metric in metrics])
if 'masked_lm_accuracy' in metrics:
metrics['masked_lm_accuracy'].update_state(
labels['masked_lm_ids'], model_outputs['mlm_logits'],
labels['masked_lm_weights'])
if 'next_sentence_accuracy' in metrics:
metrics['next_sentence_accuracy'].update_state(
labels['next_sentence_labels'], model_outputs['next_sentence'])
def train_step(self, inputs, model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer, metrics):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
with tf.GradientTape() as tape:
outputs = model(inputs, training=True)
# Computes per-replica loss.
loss = self.build_losses(
labels=inputs,
model_outputs=outputs,
metrics=metrics,
aux_losses=model.losses)
if self.task_config.scale_loss:
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / tf.distribute.get_strategy().num_replicas_in_sync
tvars = model.trainable_variables
if self.task_config.scale_loss:
grads = tape.gradient(scaled_loss, tvars)
else:
grads = tape.gradient(loss, tvars)
optimizer.apply_gradients(list(zip(grads, tvars)))
self.process_metrics(metrics, inputs, outputs)
return {self.loss: loss}
def validation_step(self, inputs, model: tf.keras.Model, metrics):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
outputs = self.inference_step(inputs, model)
loss = self.build_losses(
labels=inputs,
model_outputs=outputs,
metrics=metrics,
aux_losses=model.losses)
self.process_metrics(metrics, inputs, outputs)
return {self.loss: loss}
| 7,977 | 36.810427 | 85 | py |
models | models-master/official/nlp/tasks/translation.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the translation task."""
import dataclasses
import os
from typing import Optional
from absl import logging
import sacrebleu
import tensorflow as tf
import tensorflow_text as tftxt
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling.hyperparams import base_config
from official.nlp.data import data_loader_factory
from official.nlp.metrics import bleu
from official.nlp.modeling import models
def _pad_tensors_to_same_length(x, y):
"""Pad x and y so that the results have the same length (second dimension)."""
x_length = tf.shape(x)[1]
y_length = tf.shape(y)[1]
max_length = tf.maximum(x_length, y_length)
x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
return x, y
def _padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns the cross entropy loss and weight tensors: float32 tensors with
shape [batch_size, max(length_logits, length_labels)]
"""
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.cast(vocab_size - 1, tf.float32)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.math.log(confidence) + tf.cast(vocab_size - 1, tf.float32)
* low_confidence * tf.math.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.cast(tf.not_equal(labels, 0), tf.float32)
return xentropy * weights, weights
@dataclasses.dataclass
class EncDecoder(base_config.Config):
"""Configurations for Encoder/Decoder."""
num_layers: int = 6
num_attention_heads: int = 8
intermediate_size: int = 2048
activation: str = "relu"
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
intermediate_dropout: float = 0.1
use_bias: bool = False
norm_first: bool = True
norm_epsilon: float = 1e-6
@dataclasses.dataclass
class ModelConfig(base_config.Config):
"""A base Seq2Seq model configuration."""
encoder: EncDecoder = dataclasses.field(default_factory=EncDecoder)
decoder: EncDecoder = dataclasses.field(default_factory=EncDecoder)
embedding_width: int = 512
dropout_rate: float = 0.1
# Decoding.
padded_decode: bool = False
decode_max_length: Optional[int] = None
beam_size: int = 4
alpha: float = 0.6
# Training.
label_smoothing: float = 0.1
@dataclasses.dataclass
class TranslationConfig(cfg.TaskConfig):
"""The translation task config."""
model: ModelConfig = dataclasses.field(default_factory=ModelConfig)
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(
default_factory=cfg.DataConfig
)
# Tokenization
sentencepiece_model_path: str = ""
# Evaluation.
print_translations: Optional[bool] = None
def write_test_record(params, model_dir):
"""Writes the test input to a tfrecord."""
# Get raw data from tfds.
params = params.replace(transform_and_batch=False)
dataset = data_loader_factory.get_data_loader(params).load()
references = []
total_samples = 0
output_file = os.path.join(model_dir, "eval.tf_record")
writer = tf.io.TFRecordWriter(output_file)
for d in dataset:
references.append(d[params.tgt_lang].numpy().decode())
example = tf.train.Example(
features=tf.train.Features(
feature={
"unique_id": tf.train.Feature(
int64_list=tf.train.Int64List(value=[total_samples])),
params.src_lang: tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[d[params.src_lang].numpy()])),
params.tgt_lang: tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[d[params.tgt_lang].numpy()])),
}))
writer.write(example.SerializeToString())
total_samples += 1
batch_size = params.global_batch_size
num_dummy_example = batch_size - total_samples % batch_size
for i in range(num_dummy_example):
example = tf.train.Example(
features=tf.train.Features(
feature={
"unique_id": tf.train.Feature(
int64_list=tf.train.Int64List(value=[total_samples + i])),
params.src_lang: tf.train.Feature(
bytes_list=tf.train.BytesList(value=[b""])),
params.tgt_lang: tf.train.Feature(
bytes_list=tf.train.BytesList(value=[b""])),
}))
writer.write(example.SerializeToString())
writer.close()
return references, output_file
@task_factory.register_task_cls(TranslationConfig)
class TranslationTask(base_task.Task):
"""A single-replica view of training procedure.
Tasks provide artifacts for training/evalution procedures, including
loading/iterating over Datasets, initializing the model, calculating the loss
and customized metrics with reduction.
"""
def __init__(self, params: cfg.TaskConfig, logging_dir=None, name=None):
super().__init__(params, logging_dir, name=name)
self._sentencepiece_model_path = params.sentencepiece_model_path
if params.sentencepiece_model_path:
self._sp_tokenizer = tftxt.SentencepieceTokenizer(
model=tf.io.gfile.GFile(params.sentencepiece_model_path, "rb").read(),
add_eos=True)
try:
empty_str_tokenized = self._sp_tokenizer.tokenize("").numpy()
except tf.errors.InternalError:
raise ValueError(
"EOS token not in tokenizer vocab."
"Please make sure the tokenizer generates a single token for an "
"empty string.")
self._eos_id = empty_str_tokenized.item()
self._vocab_size = self._sp_tokenizer.vocab_size().numpy()
else:
raise ValueError("Setencepiece model path not provided.")
if (params.validation_data.input_path or
params.validation_data.tfds_name) and self._logging_dir:
self._references, self._tf_record_input_path = write_test_record(
params.validation_data, self.logging_dir)
def build_model(self) -> tf.keras.Model:
"""Creates model architecture.
Returns:
A model instance.
"""
model_cfg = self.task_config.model
encoder_kwargs = model_cfg.encoder.as_dict()
encoder_layer = models.TransformerEncoder(**encoder_kwargs)
decoder_kwargs = model_cfg.decoder.as_dict()
decoder_layer = models.TransformerDecoder(**decoder_kwargs)
return models.Seq2SeqTransformer(
vocab_size=self._vocab_size,
embedding_width=model_cfg.embedding_width,
dropout_rate=model_cfg.dropout_rate,
padded_decode=model_cfg.padded_decode,
decode_max_length=model_cfg.decode_max_length,
beam_size=model_cfg.beam_size,
alpha=model_cfg.alpha,
encoder_layer=encoder_layer,
decoder_layer=decoder_layer,
eos_id=self._eos_id)
def build_inputs(self,
params: cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None):
"""Returns a dataset."""
if params.is_training:
dataloader_params = params
else:
input_path = self._tf_record_input_path
# Read from padded tf records instead.
dataloader_params = params.replace(
input_path=input_path,
tfds_name="",
tfds_split="",
has_unique_id=True)
dataloader_params = dataloader_params.replace(
sentencepiece_model_path=self._sentencepiece_model_path)
return data_loader_factory.get_data_loader(dataloader_params).load(
input_context)
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
"""Standard interface to compute losses.
Args:
labels: optional label tensors.
model_outputs: a nested structure of output tensors.
aux_losses: auxiliary loss tensors, i.e. `losses` in keras.Model.
Returns:
The total loss tensor.
"""
del aux_losses
smoothing = self.task_config.model.label_smoothing
xentropy, weights = _padded_cross_entropy_loss(model_outputs, labels,
smoothing, self._vocab_size)
return tf.reduce_sum(xentropy) / tf.reduce_sum(weights)
def train_step(self,
inputs,
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics=None):
"""Does forward and backward.
With distribution strategies, this method runs on devices.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
with tf.GradientTape() as tape:
outputs = model(inputs, training=True)
# Computes per-replica loss.
loss = self.build_losses(labels=inputs["targets"], model_outputs=outputs)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / tf.distribute.get_strategy().num_replicas_in_sync
# For mixed precision, when a LossScaleOptimizer is used, the loss is
# scaled to avoid numeric underflow.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, inputs["targets"], outputs)
return logs
def validation_step(self, inputs, model: tf.keras.Model, metrics=None):
unique_ids = inputs.pop("unique_id")
# Validation loss
outputs = model(inputs, training=False)
# Computes per-replica loss to help understand if we are overfitting.
loss = self.build_losses(labels=inputs["targets"], model_outputs=outputs)
inputs.pop("targets")
# Beam search to calculate metrics.
model_outputs = model(inputs, training=False)
outputs = model_outputs
logs = {
self.loss: loss,
"inputs": inputs["inputs"],
"unique_ids": unique_ids,
}
logs.update(outputs)
return logs
def aggregate_logs(self, state=None, step_outputs=None):
"""Aggregates over logs returned from a validation step."""
if state is None:
state = {}
for in_token_ids, out_token_ids, unique_ids in zip(
step_outputs["inputs"],
step_outputs["outputs"],
step_outputs["unique_ids"]):
for in_ids, out_ids, u_id in zip(
in_token_ids.numpy(), out_token_ids.numpy(), unique_ids.numpy()):
state[u_id] = (in_ids, out_ids)
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
def _decode(ids):
return self._sp_tokenizer.detokenize(ids).numpy().decode()
def _trim_and_decode(ids):
"""Trim EOS and PAD tokens from ids, and decode to return a string."""
try:
index = list(ids).index(self._eos_id)
return _decode(ids[:index])
except ValueError: # No EOS found in sequence
return _decode(ids)
translations = []
for u_id in sorted(aggregated_logs):
if u_id >= len(self._references):
continue
src = _trim_and_decode(aggregated_logs[u_id][0])
translation = _trim_and_decode(aggregated_logs[u_id][1])
translations.append(translation)
if self.task_config.print_translations:
# Deccoding the in_ids to reflect what the model sees.
logging.info("Translating:\n\tInput: %s\n\tOutput: %s\n\tReference: %s",
src, translation, self._references[u_id])
sacrebleu_score = sacrebleu.corpus_bleu(
translations, [self._references]).score
bleu_score = bleu.bleu_on_list(self._references, translations)
return {"sacrebleu_score": sacrebleu_score,
"bleu_score": bleu_score}
| 13,556 | 35.640541 | 80 | py |
models | models-master/official/nlp/tasks/electra_task.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ELECTRA pretraining task (Joint Masked LM and Replaced Token Detection)."""
import dataclasses
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling import tf_utils
from official.nlp.configs import bert
from official.nlp.configs import electra
from official.nlp.configs import encoders
from official.nlp.data import pretrain_dataloader
from official.nlp.modeling import layers
from official.nlp.modeling import models
@dataclasses.dataclass
class ElectraPretrainConfig(cfg.TaskConfig):
"""The model config."""
model: electra.ElectraPretrainerConfig = dataclasses.field(
default_factory=lambda: electra.ElectraPretrainerConfig( # pylint: disable=g-long-lambda
cls_heads=[
bert.ClsHeadConfig(
inner_dim=768,
num_classes=2,
dropout_rate=0.1,
name='next_sentence',
)
]
)
)
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(
default_factory=cfg.DataConfig
)
def _build_pretrainer(
config: electra.ElectraPretrainerConfig) -> models.ElectraPretrainer:
"""Instantiates ElectraPretrainer from the config."""
generator_encoder_cfg = config.generator_encoder
discriminator_encoder_cfg = config.discriminator_encoder
# Copy discriminator's embeddings to generator for easier model serialization.
discriminator_network = encoders.build_encoder(discriminator_encoder_cfg)
if config.tie_embeddings:
embedding_layer = discriminator_network.get_embedding_layer()
generator_network = encoders.build_encoder(
generator_encoder_cfg, embedding_layer=embedding_layer)
else:
generator_network = encoders.build_encoder(generator_encoder_cfg)
generator_encoder_cfg = generator_encoder_cfg.get()
return models.ElectraPretrainer(
generator_network=generator_network,
discriminator_network=discriminator_network,
vocab_size=generator_encoder_cfg.vocab_size,
num_classes=config.num_classes,
sequence_length=config.sequence_length,
num_token_predictions=config.num_masked_tokens,
mlm_activation=tf_utils.get_activation(
generator_encoder_cfg.hidden_activation),
mlm_initializer=tf.keras.initializers.TruncatedNormal(
stddev=generator_encoder_cfg.initializer_range),
classification_heads=[
layers.ClassificationHead(**cfg.as_dict()) for cfg in config.cls_heads
],
disallow_correct=config.disallow_correct)
@task_factory.register_task_cls(ElectraPretrainConfig)
class ElectraPretrainTask(base_task.Task):
"""ELECTRA Pretrain Task (Masked LM + Replaced Token Detection)."""
def build_model(self):
return _build_pretrainer(self.task_config.model)
def build_losses(self,
labels,
model_outputs,
metrics,
aux_losses=None) -> tf.Tensor:
metrics = dict([(metric.name, metric) for metric in metrics])
# generator lm and (optional) nsp loss.
lm_prediction_losses = tf.keras.losses.sparse_categorical_crossentropy(
labels['masked_lm_ids'],
tf.cast(model_outputs['lm_outputs'], tf.float32),
from_logits=True)
lm_label_weights = labels['masked_lm_weights']
lm_numerator_loss = tf.reduce_sum(lm_prediction_losses * lm_label_weights)
lm_denominator_loss = tf.reduce_sum(lm_label_weights)
mlm_loss = tf.math.divide_no_nan(lm_numerator_loss, lm_denominator_loss)
metrics['lm_example_loss'].update_state(mlm_loss)
if 'next_sentence_labels' in labels:
sentence_labels = labels['next_sentence_labels']
sentence_outputs = tf.cast(
model_outputs['sentence_outputs'], dtype=tf.float32)
sentence_loss = tf.keras.losses.sparse_categorical_crossentropy(
sentence_labels, sentence_outputs, from_logits=True)
metrics['next_sentence_loss'].update_state(sentence_loss)
total_loss = mlm_loss + sentence_loss
else:
total_loss = mlm_loss
# discriminator replaced token detection (rtd) loss.
rtd_logits = model_outputs['disc_logits']
rtd_labels = tf.cast(model_outputs['disc_label'], tf.float32)
input_mask = tf.cast(labels['input_mask'], tf.float32)
rtd_ind_loss = tf.nn.sigmoid_cross_entropy_with_logits(
logits=rtd_logits, labels=rtd_labels)
rtd_numerator = tf.reduce_sum(input_mask * rtd_ind_loss)
rtd_denominator = tf.reduce_sum(input_mask)
rtd_loss = tf.math.divide_no_nan(rtd_numerator, rtd_denominator)
metrics['discriminator_loss'].update_state(rtd_loss)
total_loss = total_loss + \
self.task_config.model.discriminator_loss_weight * rtd_loss
if aux_losses:
total_loss += tf.add_n(aux_losses)
metrics['total_loss'].update_state(total_loss)
return total_loss
def build_inputs(self, params, input_context=None):
"""Returns tf.data.Dataset for pretraining."""
if params.input_path == 'dummy':
def dummy_data(_):
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
dummy_lm = tf.zeros((1, params.max_predictions_per_seq), dtype=tf.int32)
return dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids,
masked_lm_positions=dummy_lm,
masked_lm_ids=dummy_lm,
masked_lm_weights=tf.cast(dummy_lm, dtype=tf.float32),
next_sentence_labels=tf.zeros((1, 1), dtype=tf.int32))
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
return pretrain_dataloader.BertPretrainDataLoader(params).load(
input_context)
def build_metrics(self, training=None):
del training
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='masked_lm_accuracy'),
tf.keras.metrics.Mean(name='lm_example_loss'),
tf.keras.metrics.SparseCategoricalAccuracy(
name='discriminator_accuracy'),
]
if self.task_config.train_data.use_next_sentence_label:
metrics.append(
tf.keras.metrics.SparseCategoricalAccuracy(
name='next_sentence_accuracy'))
metrics.append(tf.keras.metrics.Mean(name='next_sentence_loss'))
metrics.append(tf.keras.metrics.Mean(name='discriminator_loss'))
metrics.append(tf.keras.metrics.Mean(name='total_loss'))
return metrics
def process_metrics(self, metrics, labels, model_outputs):
metrics = dict([(metric.name, metric) for metric in metrics])
if 'masked_lm_accuracy' in metrics:
metrics['masked_lm_accuracy'].update_state(labels['masked_lm_ids'],
model_outputs['lm_outputs'],
labels['masked_lm_weights'])
if 'next_sentence_accuracy' in metrics:
metrics['next_sentence_accuracy'].update_state(
labels['next_sentence_labels'], model_outputs['sentence_outputs'])
if 'discriminator_accuracy' in metrics:
disc_logits_expanded = tf.expand_dims(model_outputs['disc_logits'], -1)
discrim_full_logits = tf.concat(
[-1.0 * disc_logits_expanded, disc_logits_expanded], -1)
metrics['discriminator_accuracy'].update_state(
model_outputs['disc_label'], discrim_full_logits,
labels['input_mask'])
def train_step(self, inputs, model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer, metrics):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
with tf.GradientTape() as tape:
outputs = model(inputs, training=True)
# Computes per-replica loss.
loss = self.build_losses(
labels=inputs,
model_outputs=outputs,
metrics=metrics,
aux_losses=model.losses)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / tf.distribute.get_strategy().num_replicas_in_sync
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
optimizer.apply_gradients(list(zip(grads, tvars)))
self.process_metrics(metrics, inputs, outputs)
return {self.loss: loss}
def validation_step(self, inputs, model: tf.keras.Model, metrics):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
outputs = model(inputs, training=False)
loss = self.build_losses(
labels=inputs,
model_outputs=outputs,
metrics=metrics,
aux_losses=model.losses)
self.process_metrics(metrics, inputs, outputs)
return {self.loss: loss}
| 9,850 | 38.562249 | 95 | py |
models | models-master/official/nlp/tasks/tagging.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tagging (e.g., NER/POS) task."""
from typing import List, Optional, Tuple
import dataclasses
import orbit
from seqeval import metrics as seqeval_metrics
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.nlp.data import data_loader_factory
from official.nlp.modeling import models
from official.nlp.tasks import utils
@dataclasses.dataclass
class ModelConfig(base_config.Config):
"""A base span labeler configuration."""
encoder: encoders.EncoderConfig = dataclasses.field(default_factory=encoders.EncoderConfig)
head_dropout: float = 0.1
head_initializer_range: float = 0.02
@dataclasses.dataclass
class TaggingConfig(cfg.TaskConfig):
"""The model config."""
# At most one of `init_checkpoint` and `hub_module_url` can be specified.
init_checkpoint: str = ''
hub_module_url: str = ''
model: ModelConfig = dataclasses.field(default_factory=ModelConfig)
# The real class names, the order of which should match real label id.
# Note that a word may be tokenized into multiple word_pieces tokens, and
# we asssume the real label id (non-negative) is assigned to the first token
# of the word, and a negative label id is assigned to the remaining tokens.
# The negative label id will not contribute to loss and metrics.
class_names: Optional[List[str]] = None
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
def _masked_labels_and_weights(y_true):
"""Masks negative values from token level labels.
Args:
y_true: Token labels, typically shape (batch_size, seq_len), where tokens
with negative labels should be ignored during loss/accuracy calculation.
Returns:
(masked_y_true, masked_weights) where `masked_y_true` is the input
with each negative label replaced with zero and `masked_weights` is 0.0
where negative labels were replaced and 1.0 for original labels.
"""
# Ignore the classes of tokens with negative values.
mask = tf.greater_equal(y_true, 0)
# Replace negative labels, which are out of bounds for some loss functions,
# with zero.
masked_y_true = tf.where(mask, y_true, 0)
return masked_y_true, tf.cast(mask, tf.float32)
@task_factory.register_task_cls(TaggingConfig)
class TaggingTask(base_task.Task):
"""Task object for tagging (e.g., NER or POS)."""
def build_model(self):
if self.task_config.hub_module_url and self.task_config.init_checkpoint:
raise ValueError('At most one of `hub_module_url` and '
'`init_checkpoint` can be specified.')
if self.task_config.hub_module_url:
encoder_network = utils.get_encoder_from_hub(
self.task_config.hub_module_url)
else:
encoder_network = encoders.build_encoder(self.task_config.model.encoder)
return models.BertTokenClassifier(
network=encoder_network,
num_classes=len(self.task_config.class_names),
initializer=tf.keras.initializers.TruncatedNormal(
stddev=self.task_config.model.head_initializer_range),
dropout_rate=self.task_config.model.head_dropout,
output='logits',
output_encoder_outputs=True)
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
logits = tf.cast(model_outputs['logits'], tf.float32)
masked_labels, masked_weights = _masked_labels_and_weights(labels)
loss = tf.keras.losses.sparse_categorical_crossentropy(
masked_labels, logits, from_logits=True)
numerator_loss = tf.reduce_sum(loss * masked_weights)
denominator_loss = tf.reduce_sum(masked_weights)
loss = tf.math.divide_no_nan(numerator_loss, denominator_loss)
return loss
def build_inputs(self, params: cfg.DataConfig, input_context=None):
"""Returns tf.data.Dataset for sentence_prediction task."""
if params.input_path == 'dummy':
def dummy_data(_):
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
x = dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids)
# Include some label_id as -1, which will be ignored in loss/metrics.
y = tf.random.uniform(
shape=(1, params.seq_length),
minval=-1,
maxval=len(self.task_config.class_names),
dtype=tf.dtypes.int32)
return (x, y)
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
return data_loader_factory.get_data_loader(params).load(input_context)
def inference_step(self, inputs, model: tf.keras.Model):
"""Performs the forward step."""
logits = model(inputs, training=False)['logits']
return {'logits': logits,
'predict_ids': tf.argmax(logits, axis=-1, output_type=tf.int32)}
def validation_step(self, inputs, model: tf.keras.Model, metrics=None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
outputs = self.inference_step(features, model)
loss = self.build_losses(labels=labels, model_outputs=outputs)
# Negative label ids are padding labels which should be ignored.
real_label_index = tf.where(tf.greater_equal(labels, 0))
predict_ids = tf.gather_nd(outputs['predict_ids'], real_label_index)
label_ids = tf.gather_nd(labels, real_label_index)
return {
self.loss: loss,
'predict_ids': predict_ids,
'label_ids': label_ids,
}
def aggregate_logs(self, state=None, step_outputs=None):
"""Aggregates over logs returned from a validation step."""
if state is None:
state = {'predict_class': [], 'label_class': []}
def id_to_class_name(batched_ids):
class_names = []
for per_example_ids in batched_ids:
class_names.append([])
for per_token_id in per_example_ids.numpy().tolist():
class_names[-1].append(self.task_config.class_names[per_token_id])
return class_names
# Convert id to class names, because `seqeval_metrics` relies on the class
# name to decide IOB tags.
state['predict_class'].extend(id_to_class_name(step_outputs['predict_ids']))
state['label_class'].extend(id_to_class_name(step_outputs['label_ids']))
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
"""Reduces aggregated logs over validation steps."""
label_class = aggregated_logs['label_class']
predict_class = aggregated_logs['predict_class']
return {
'f1':
seqeval_metrics.f1_score(label_class, predict_class),
'precision':
seqeval_metrics.precision_score(label_class, predict_class),
'recall':
seqeval_metrics.recall_score(label_class, predict_class),
'accuracy':
seqeval_metrics.accuracy_score(label_class, predict_class),
}
def predict(task: TaggingTask,
params: cfg.DataConfig,
model: tf.keras.Model) -> List[Tuple[int, int, List[int]]]:
"""Predicts on the input data.
Args:
task: A `TaggingTask` object.
params: A `cfg.DataConfig` object.
model: A keras.Model.
Returns:
A list of tuple. Each tuple contains `sentence_id`, `sub_sentence_id` and
a list of predicted ids.
"""
def predict_step(inputs):
"""Replicated prediction calculation."""
x, y = inputs
sentence_ids = x.pop('sentence_id')
sub_sentence_ids = x.pop('sub_sentence_id')
outputs = task.inference_step(x, model)
predict_ids = outputs['predict_ids']
label_mask = tf.greater_equal(y, 0)
return dict(
predict_ids=predict_ids,
label_mask=label_mask,
sentence_ids=sentence_ids,
sub_sentence_ids=sub_sentence_ids)
def aggregate_fn(state, outputs):
"""Concatenates model's outputs."""
if state is None:
state = []
for (batch_predict_ids, batch_label_mask, batch_sentence_ids,
batch_sub_sentence_ids) in zip(outputs['predict_ids'],
outputs['label_mask'],
outputs['sentence_ids'],
outputs['sub_sentence_ids']):
for (tmp_predict_ids, tmp_label_mask, tmp_sentence_id,
tmp_sub_sentence_id) in zip(batch_predict_ids.numpy(),
batch_label_mask.numpy(),
batch_sentence_ids.numpy(),
batch_sub_sentence_ids.numpy()):
real_predict_ids = []
assert len(tmp_predict_ids) == len(tmp_label_mask)
for i in range(len(tmp_predict_ids)):
# Skip the padding label.
if tmp_label_mask[i]:
real_predict_ids.append(tmp_predict_ids[i])
state.append((tmp_sentence_id, tmp_sub_sentence_id, real_predict_ids))
return state
dataset = orbit.utils.make_distributed_dataset(tf.distribute.get_strategy(),
task.build_inputs, params)
outputs = utils.predict(predict_step, aggregate_fn, dataset)
return sorted(outputs, key=lambda x: (x[0], x[1]))
| 10,220 | 37.424812 | 93 | py |
models | models-master/official/nlp/tasks/utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utils for tasks."""
from typing import Any, Callable
import orbit
import tensorflow as tf
import tensorflow_hub as hub
def get_encoder_from_hub(hub_model_path: str) -> tf.keras.Model:
"""Gets an encoder from hub.
Args:
hub_model_path: The path to the tfhub model.
Returns:
A tf.keras.Model.
"""
input_word_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_word_ids')
input_mask = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_mask')
input_type_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_type_ids')
hub_layer = hub.KerasLayer(hub_model_path, trainable=True)
output_dict = {}
dict_input = dict(
input_word_ids=input_word_ids,
input_mask=input_mask,
input_type_ids=input_type_ids)
output_dict = hub_layer(dict_input)
return tf.keras.Model(inputs=dict_input, outputs=output_dict)
def predict(predict_step_fn: Callable[[Any], Any],
aggregate_fn: Callable[[Any, Any], Any], dataset: tf.data.Dataset):
"""Runs prediction.
Args:
predict_step_fn: A callable such as `def predict_step(inputs)`, where
`inputs` are input tensors.
aggregate_fn: A callable such as `def aggregate_fn(state, value)`, where
`value` is the outputs from `predict_step_fn`.
dataset: A `tf.data.Dataset` object.
Returns:
The aggregated predictions.
"""
@tf.function
def predict_step(iterator):
"""Predicts on distributed devices."""
outputs = tf.distribute.get_strategy().run(
predict_step_fn, args=(next(iterator),))
return tf.nest.map_structure(
tf.distribute.get_strategy().experimental_local_results, outputs)
loop_fn = orbit.utils.create_loop_fn(predict_step)
# Set `num_steps` to -1 to exhaust the dataset.
outputs = loop_fn(
iter(dataset), num_steps=-1, state=None, reduce_fn=aggregate_fn) # pytype: disable=wrong-arg-types
return outputs
| 2,575 | 32.454545 | 105 | py |
models | models-master/official/nlp/tasks/question_answering.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Question answering task."""
import dataclasses
import functools
import json
import os
from typing import List, Optional
from absl import logging
import orbit
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.nlp.data import data_loader_factory
from official.nlp.data import squad_lib as squad_lib_wp
from official.nlp.data import squad_lib_sp
from official.nlp.modeling import models
from official.nlp.tasks import utils
from official.nlp.tools import squad_evaluate_v1_1
from official.nlp.tools import squad_evaluate_v2_0
from official.nlp.tools import tokenization
@dataclasses.dataclass
class ModelConfig(base_config.Config):
"""A base span labeler configuration."""
encoder: encoders.EncoderConfig = dataclasses.field(
default_factory=encoders.EncoderConfig
)
@dataclasses.dataclass
class QuestionAnsweringConfig(cfg.TaskConfig):
"""The model config."""
# At most one of `init_checkpoint` and `hub_module_url` can be specified.
init_checkpoint: str = ''
hub_module_url: str = ''
n_best_size: int = 20
max_answer_length: int = 30
null_score_diff_threshold: float = 0.0
model: ModelConfig = dataclasses.field(default_factory=ModelConfig)
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(
default_factory=cfg.DataConfig
)
@dataclasses.dataclass
class RawAggregatedResult:
"""Raw representation for SQuAD predictions."""
unique_id: int
start_logits: List[float]
end_logits: List[float]
start_indexes: Optional[List[int]] = None
end_indexes: Optional[List[int]] = None
class_logits: Optional[float] = None
@task_factory.register_task_cls(QuestionAnsweringConfig)
class QuestionAnsweringTask(base_task.Task):
"""Task object for question answering."""
def __init__(self, params: cfg.TaskConfig, logging_dir=None, name=None):
super().__init__(params, logging_dir, name=name)
if params.validation_data is None:
return
if params.validation_data.tokenization == 'WordPiece':
self.squad_lib = squad_lib_wp
elif params.validation_data.tokenization == 'SentencePiece':
self.squad_lib = squad_lib_sp
else:
raise ValueError('Unsupported tokenization method: {}'.format(
params.validation_data.tokenization))
if params.validation_data.input_path:
self._tf_record_input_path, self._eval_examples, self._eval_features = (
self._preprocess_eval_data(params.validation_data))
def set_preprocessed_eval_input_path(self, eval_input_path):
"""Sets the path to the preprocessed eval data."""
self._tf_record_input_path = eval_input_path
def build_model(self):
if self.task_config.hub_module_url and self.task_config.init_checkpoint:
raise ValueError('At most one of `hub_module_url` and '
'`init_checkpoint` can be specified.')
if self.task_config.hub_module_url:
encoder_network = utils.get_encoder_from_hub(
self.task_config.hub_module_url)
else:
encoder_network = encoders.build_encoder(self.task_config.model.encoder)
encoder_cfg = self.task_config.model.encoder.get()
return models.BertSpanLabeler(
network=encoder_network,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range))
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
start_positions = labels['start_positions']
end_positions = labels['end_positions']
start_logits, end_logits = model_outputs
start_loss = tf.keras.losses.sparse_categorical_crossentropy(
start_positions,
tf.cast(start_logits, dtype=tf.float32),
from_logits=True)
end_loss = tf.keras.losses.sparse_categorical_crossentropy(
end_positions, tf.cast(end_logits, dtype=tf.float32), from_logits=True)
loss = (tf.reduce_mean(start_loss) + tf.reduce_mean(end_loss)) / 2
return loss
def _preprocess_eval_data(self, params):
eval_examples = self.squad_lib.read_squad_examples(
input_file=params.input_path,
is_training=False,
version_2_with_negative=params.version_2_with_negative)
temp_file_path = params.input_preprocessed_data_path or self.logging_dir
if not temp_file_path:
raise ValueError('You must specify a temporary directory, either in '
'params.input_preprocessed_data_path or logging_dir to '
'store intermediate evaluation TFRecord data.')
eval_writer = self.squad_lib.FeatureWriter(
filename=os.path.join(temp_file_path, 'eval.tf_record'),
is_training=False)
eval_features = []
def _append_feature(feature, is_padding):
if not is_padding:
eval_features.append(feature)
eval_writer.process_feature(feature)
# XLNet preprocesses SQuAD examples in a P, Q, class order whereas
# BERT preprocesses in a class, Q, P order.
xlnet_ordering = self.task_config.model.encoder.type == 'xlnet'
kwargs = dict(
examples=eval_examples,
max_seq_length=params.seq_length,
doc_stride=params.doc_stride,
max_query_length=params.query_length,
is_training=False,
output_fn=_append_feature,
batch_size=params.global_batch_size,
xlnet_format=xlnet_ordering)
if params.tokenization == 'SentencePiece':
# squad_lib_sp requires one more argument 'do_lower_case'.
kwargs['do_lower_case'] = params.do_lower_case
kwargs['tokenizer'] = tokenization.FullSentencePieceTokenizer(
sp_model_file=params.vocab_file)
elif params.tokenization == 'WordPiece':
kwargs['tokenizer'] = tokenization.FullTokenizer(
vocab_file=params.vocab_file, do_lower_case=params.do_lower_case)
else:
raise ValueError('Unexpected tokenization: %s' % params.tokenization)
eval_dataset_size = self.squad_lib.convert_examples_to_features(**kwargs)
eval_writer.close()
logging.info('***** Evaluation input stats *****')
logging.info(' Num orig examples = %d', len(eval_examples))
logging.info(' Num split examples = %d', len(eval_features))
logging.info(' Batch size = %d', params.global_batch_size)
logging.info(' Dataset size = %d', eval_dataset_size)
return eval_writer.filename, eval_examples, eval_features
def _dummy_data(self, params, _):
"""Returns dummy data."""
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
x = dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids)
y = dict(
start_positions=tf.constant(0, dtype=tf.int32),
end_positions=tf.constant(1, dtype=tf.int32),
is_impossible=tf.constant(0, dtype=tf.int32))
return x, y
def build_inputs(self, params, input_context=None):
"""Returns tf.data.Dataset for sentence_prediction task."""
if params.input_path == 'dummy':
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dummy_data = functools.partial(self._dummy_data, params)
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
if params.is_training:
dataloader_params = params
else:
input_path = self._tf_record_input_path
dataloader_params = params.replace(input_path=input_path)
return data_loader_factory.get_data_loader(dataloader_params).load(
input_context)
def build_metrics(self, training=None):
if not training:
# We cannot compute start/end_position_accuracy because start/end_position
# labels are not available in the validation dataset (b/173794928).
return []
# TODO(lehou): a list of metrics doesn't work the same as in compile/fit.
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(
name='start_position_accuracy'),
tf.keras.metrics.SparseCategoricalAccuracy(
name='end_position_accuracy'),
]
return metrics
def process_metrics(self, metrics, labels, model_outputs):
metrics = dict([(metric.name, metric) for metric in metrics])
start_logits, end_logits = model_outputs
metrics['start_position_accuracy'].update_state(labels['start_positions'],
start_logits)
metrics['end_position_accuracy'].update_state(labels['end_positions'],
end_logits)
def process_compiled_metrics(self, compiled_metrics, labels, model_outputs):
start_logits, end_logits = model_outputs
compiled_metrics.update_state(
y_true=labels, # labels has keys 'start_positions' and 'end_positions'.
y_pred={
'start_positions': start_logits,
'end_positions': end_logits
})
def validation_step(self, inputs, model: tf.keras.Model, metrics=None):
features, _ = inputs
unique_ids = features.pop('unique_ids')
model_outputs = self.inference_step(features, model)
start_logits, end_logits = model_outputs
# We cannot compute validation_loss here, because start/end_position
# labels are not available in the validation dataset (b/173794928).
logs = {
'unique_ids': unique_ids,
'start_logits': start_logits,
'end_logits': end_logits,
}
return logs
def aggregate_logs(self, state=None, step_outputs=None):
assert step_outputs is not None, 'Got no logs from self.validation_step.'
if state is None:
state = []
for outputs in zip(step_outputs['unique_ids'],
step_outputs['start_logits'],
step_outputs['end_logits']):
numpy_values = [
output.numpy() for output in outputs if output is not None]
for values in zip(*numpy_values):
state.append(RawAggregatedResult(
unique_id=values[0],
start_logits=values[1],
end_logits=values[2]))
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
all_predictions, _, scores_diff = (
self.squad_lib.postprocess_output(
self._eval_examples,
self._eval_features,
aggregated_logs,
self.task_config.n_best_size,
self.task_config.max_answer_length,
self.task_config.validation_data.do_lower_case,
version_2_with_negative=(
self.task_config.validation_data.version_2_with_negative),
null_score_diff_threshold=(
self.task_config.null_score_diff_threshold),
xlnet_format=self.task_config.validation_data.xlnet_format,
verbose=False))
with tf.io.gfile.GFile(self.task_config.validation_data.input_path,
'r') as reader:
dataset_json = json.load(reader)
pred_dataset = dataset_json['data']
if self.task_config.validation_data.version_2_with_negative:
eval_metrics = squad_evaluate_v2_0.evaluate(pred_dataset, all_predictions,
scores_diff)
eval_metrics = {
'exact_match': eval_metrics['final_exact'],
'exact_match_threshold': eval_metrics['final_exact_thresh'],
'final_f1': eval_metrics['final_f1'] / 100.0, # scale back to [0, 1].
'f1_threshold': eval_metrics['final_f1_thresh'],
'has_answer_exact_match': eval_metrics['HasAns_exact'],
'has_answer_f1': eval_metrics['HasAns_f1']
}
else:
eval_metrics = squad_evaluate_v1_1.evaluate(pred_dataset, all_predictions)
eval_metrics = {
'exact_match': eval_metrics['exact_match'],
'final_f1': eval_metrics['final_f1']
}
return eval_metrics
@dataclasses.dataclass
class XLNetQuestionAnsweringConfig(QuestionAnsweringConfig):
"""The config for the XLNet variation of QuestionAnswering."""
pass
@task_factory.register_task_cls(XLNetQuestionAnsweringConfig)
class XLNetQuestionAnsweringTask(QuestionAnsweringTask):
"""XLNet variant of the Question Answering Task.
The main differences include:
- The encoder is an `XLNetBase` class.
- The `SpanLabeling` head is an instance of `XLNetSpanLabeling` which
predicts start/end positions and impossibility score. During inference,
it predicts the top N scores and indexes.
"""
def build_model(self):
if self.task_config.hub_module_url and self.task_config.init_checkpoint:
raise ValueError('At most one of `hub_module_url` and '
'`init_checkpoint` can be specified.')
if self.task_config.hub_module_url:
encoder_network = utils.get_encoder_from_hub(
self.task_config.hub_module_url)
else:
encoder_network = encoders.build_encoder(self.task_config.model.encoder)
encoder_cfg = self.task_config.model.encoder.get()
return models.XLNetSpanLabeler(
network=encoder_network,
start_n_top=self.task_config.n_best_size,
end_n_top=self.task_config.n_best_size,
initializer=tf.keras.initializers.RandomNormal(
stddev=encoder_cfg.initializer_range))
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
start_positions = labels['start_positions']
end_positions = labels['end_positions']
is_impossible = labels['is_impossible']
is_impossible = tf.cast(tf.reshape(is_impossible, [-1]), tf.float32)
start_logits = model_outputs['start_logits']
end_logits = model_outputs['end_logits']
class_logits = model_outputs['class_logits']
start_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
start_positions, start_logits)
end_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
end_positions, end_logits)
is_impossible_loss = tf.keras.losses.binary_crossentropy(
is_impossible, class_logits, from_logits=True)
loss = (tf.reduce_mean(start_loss) + tf.reduce_mean(end_loss)) / 2
loss += tf.reduce_mean(is_impossible_loss) / 2
return loss
def process_metrics(self, metrics, labels, model_outputs):
metrics = dict([(metric.name, metric) for metric in metrics])
start_logits = model_outputs['start_logits']
end_logits = model_outputs['end_logits']
metrics['start_position_accuracy'].update_state(labels['start_positions'],
start_logits)
metrics['end_position_accuracy'].update_state(labels['end_positions'],
end_logits)
def process_compiled_metrics(self, compiled_metrics, labels, model_outputs):
start_logits = model_outputs['start_logits']
end_logits = model_outputs['end_logits']
compiled_metrics.update_state(
y_true=labels, # labels has keys 'start_positions' and 'end_positions'.
y_pred={
'start_positions': start_logits,
'end_positions': end_logits,
})
def _dummy_data(self, params, _):
"""Returns dummy data."""
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
zero = tf.constant(0, dtype=tf.int32)
x = dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids,
class_index=zero,
is_impossible=zero,
paragraph_mask=dummy_ids,
start_positions=tf.zeros((1), dtype=tf.int32))
y = dict(
start_positions=tf.zeros((1), dtype=tf.int32),
end_positions=tf.ones((1), dtype=tf.int32),
is_impossible=zero)
return x, y
def validation_step(self, inputs, model: tf.keras.Model, metrics=None):
features, _ = inputs
unique_ids = features.pop('unique_ids')
model_outputs = self.inference_step(features, model)
start_top_predictions = model_outputs['start_top_predictions']
end_top_predictions = model_outputs['end_top_predictions']
start_indexes = model_outputs['start_top_index']
end_indexes = model_outputs['end_top_index']
class_logits = model_outputs['class_logits']
logs = {
'unique_ids': unique_ids,
'start_top_predictions': start_top_predictions,
'end_top_predictions': end_top_predictions,
'start_indexes': start_indexes,
'end_indexes': end_indexes,
'class_logits': class_logits,
}
return logs
def aggregate_logs(self, state=None, step_outputs=None):
assert step_outputs is not None, 'Got no logs from self.validation_step.'
if state is None:
state = []
for outputs in zip(step_outputs['unique_ids'],
step_outputs['start_top_predictions'],
step_outputs['end_top_predictions'],
step_outputs['start_indexes'],
step_outputs['end_indexes'],
step_outputs['class_logits']):
numpy_values = [
output.numpy() for output in outputs]
for (unique_id, start_top_predictions, end_top_predictions, start_indexes,
end_indexes, class_logits) in zip(*numpy_values):
state.append(RawAggregatedResult(
unique_id=unique_id,
start_logits=start_top_predictions.tolist(),
end_logits=end_top_predictions.tolist(),
start_indexes=start_indexes.tolist(),
end_indexes=end_indexes.tolist(),
class_logits=class_logits))
return state
def predict(task: QuestionAnsweringTask, params: cfg.DataConfig,
model: tf.keras.Model):
"""Predicts on the input data.
Args:
task: A `QuestionAnsweringTask` object.
params: A `cfg.DataConfig` object.
model: A keras.Model.
Returns:
A tuple of `all_predictions`, `all_nbest` and `scores_diff`, which
are dict and can be written to json files including prediction json file,
nbest json file and null_odds json file.
"""
tf_record_input_path, eval_examples, eval_features = (
task._preprocess_eval_data(params)) # pylint: disable=protected-access
# `tf_record_input_path` will overwrite `params.input_path`,
# when `task.buid_inputs()` is called.
task.set_preprocessed_eval_input_path(tf_record_input_path)
def predict_step(inputs):
"""Replicated prediction calculation."""
return task.validation_step(inputs, model)
dataset = orbit.utils.make_distributed_dataset(tf.distribute.get_strategy(),
task.build_inputs, params)
aggregated_outputs = utils.predict(predict_step, task.aggregate_logs, dataset)
all_predictions, all_nbest, scores_diff = (
task.squad_lib.postprocess_output(
eval_examples,
eval_features,
aggregated_outputs,
task.task_config.n_best_size,
task.task_config.max_answer_length,
task.task_config.validation_data.do_lower_case,
version_2_with_negative=(params.version_2_with_negative),
null_score_diff_threshold=task.task_config.null_score_diff_threshold,
xlnet_format=task.task_config.validation_data.xlnet_format,
verbose=False))
return all_predictions, all_nbest, scores_diff
| 19,969 | 38.701789 | 80 | py |
models | models-master/official/nlp/tasks/dual_encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dual encoder (retrieval) task."""
from typing import Mapping, Tuple
# Import libraries
from absl import logging
import dataclasses
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.nlp.data import data_loader_factory
from official.nlp.modeling import models
from official.nlp.tasks import utils
@dataclasses.dataclass
class ModelConfig(base_config.Config):
"""A dual encoder (retrieval) configuration."""
# Normalize input embeddings if set to True.
normalize: bool = True
# Maximum input sequence length.
max_sequence_length: int = 64
# Parameters for training a dual encoder model with additive margin, see
# https://www.ijcai.org/Proceedings/2019/0746.pdf for more details.
logit_scale: float = 1
logit_margin: float = 0
bidirectional: bool = False
# Defining k for calculating metrics recall@k.
eval_top_k: Tuple[int, ...] = (1, 3, 10)
encoder: encoders.EncoderConfig = dataclasses.field(
default_factory=encoders.EncoderConfig
)
@dataclasses.dataclass
class DualEncoderConfig(cfg.TaskConfig):
"""The model config."""
# At most one of `init_checkpoint` and `hub_module_url` can
# be specified.
init_checkpoint: str = ''
hub_module_url: str = ''
# Defines the concrete model config at instantiation time.
model: ModelConfig = dataclasses.field(default_factory=ModelConfig)
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(
default_factory=cfg.DataConfig
)
@task_factory.register_task_cls(DualEncoderConfig)
class DualEncoderTask(base_task.Task):
"""Task object for dual encoder."""
def build_model(self):
"""Interface to build model. Refer to base_task.Task.build_model."""
if self.task_config.hub_module_url and self.task_config.init_checkpoint:
raise ValueError('At most one of `hub_module_url` and '
'`init_checkpoint` can be specified.')
if self.task_config.hub_module_url:
encoder_network = utils.get_encoder_from_hub(
self.task_config.hub_module_url)
else:
encoder_network = encoders.build_encoder(self.task_config.model.encoder)
# Currently, we only supports bert-style dual encoder.
return models.DualEncoder(
network=encoder_network,
max_seq_length=self.task_config.model.max_sequence_length,
normalize=self.task_config.model.normalize,
logit_scale=self.task_config.model.logit_scale,
logit_margin=self.task_config.model.logit_margin,
output='logits')
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
"""Interface to compute losses. Refer to base_task.Task.build_losses."""
del labels
left_logits = model_outputs['left_logits']
right_logits = model_outputs['right_logits']
batch_size = tf_utils.get_shape_list(left_logits, name='batch_size')[0]
ranking_labels = tf.range(batch_size)
loss = tf_utils.safe_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=ranking_labels,
logits=left_logits))
if self.task_config.model.bidirectional:
right_rank_loss = tf_utils.safe_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=ranking_labels,
logits=right_logits))
loss += right_rank_loss
return tf.reduce_mean(loss)
def build_inputs(self, params, input_context=None) -> tf.data.Dataset:
"""Returns tf.data.Dataset for sentence_prediction task."""
if params.input_path != 'dummy':
return data_loader_factory.get_data_loader(params).load(input_context)
def dummy_data(_):
dummy_ids = tf.zeros((10, params.seq_length), dtype=tf.int32)
x = dict(
left_word_ids=dummy_ids,
left_mask=dummy_ids,
left_type_ids=dummy_ids,
right_word_ids=dummy_ids,
right_mask=dummy_ids,
right_type_ids=dummy_ids)
return x
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
def build_metrics(self, training=None):
del training
metrics = [tf.keras.metrics.Mean(name='batch_size_per_core')]
for k in self.task_config.model.eval_top_k:
metrics.append(tf.keras.metrics.SparseTopKCategoricalAccuracy(
k=k, name=f'left_recall_at_{k}'))
if self.task_config.model.bidirectional:
metrics.append(tf.keras.metrics.SparseTopKCategoricalAccuracy(
k=k, name=f'right_recall_at_{k}'))
return metrics
def process_metrics(self, metrics, labels, model_outputs):
del labels
metrics = dict([(metric.name, metric) for metric in metrics])
left_logits = model_outputs['left_logits']
right_logits = model_outputs['right_logits']
batch_size = tf_utils.get_shape_list(
left_logits, name='sequence_output_tensor')[0]
ranking_labels = tf.range(batch_size)
for k in self.task_config.model.eval_top_k:
metrics[f'left_recall_at_{k}'].update_state(ranking_labels, left_logits)
if self.task_config.model.bidirectional:
metrics[f'right_recall_at_{k}'].update_state(ranking_labels,
right_logits)
metrics['batch_size_per_core'].update_state(batch_size)
def validation_step(self,
inputs,
model: tf.keras.Model,
metrics=None) -> Mapping[str, tf.Tensor]:
outputs = model(inputs)
loss = self.build_losses(
labels=None, model_outputs=outputs, aux_losses=model.losses)
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, None, outputs)
logs.update({m.name: m.result() for m in metrics})
elif model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, None, outputs)
logs.update({m.name: m.result() for m in model.metrics})
return logs
def initialize(self, model):
"""Load a pretrained checkpoint (if exists) and then train from iter 0."""
ckpt_dir_or_file = self.task_config.init_checkpoint
logging.info('Trying to load pretrained checkpoint from %s',
ckpt_dir_or_file)
if ckpt_dir_or_file and tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
if not ckpt_dir_or_file:
logging.info('No checkpoint file found from %s. Will not load.',
ckpt_dir_or_file)
return
pretrain2finetune_mapping = {
'encoder': model.checkpoint_items['encoder'],
}
ckpt = tf.train.Checkpoint(**pretrain2finetune_mapping)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
| 7,777 | 35.862559 | 80 | py |
models | models-master/official/nlp/tasks/masked_lm_determinism_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that masked LM models are deterministic when determinism is enabled."""
import tensorflow as tf
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import pretrain_dataloader
from official.nlp.tasks import masked_lm
class MLMTaskTest(tf.test.TestCase):
def _build_dataset(self, params, vocab_size):
def dummy_data(_):
dummy_ids = tf.random.uniform((1, params.seq_length), maxval=vocab_size,
dtype=tf.int32)
dummy_mask = tf.ones((1, params.seq_length), dtype=tf.int32)
dummy_type_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
dummy_lm = tf.zeros((1, params.max_predictions_per_seq), dtype=tf.int32)
return dict(
input_word_ids=dummy_ids,
input_mask=dummy_mask,
input_type_ids=dummy_type_ids,
masked_lm_positions=dummy_lm,
masked_lm_ids=dummy_lm,
masked_lm_weights=tf.cast(dummy_lm, dtype=tf.float32),
next_sentence_labels=tf.zeros((1, 1), dtype=tf.int32))
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
def _build_and_run_model(self, config, num_steps=5):
task = masked_lm.MaskedLMTask(config)
model = task.build_model()
metrics = task.build_metrics()
dataset = self._build_dataset(config.train_data,
config.model.encoder.get().vocab_size)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
# Run training
for _ in range(num_steps):
logs = task.train_step(next(iterator), model, optimizer, metrics=metrics)
for metric in metrics:
logs[metric.name] = metric.result()
# Run validation
validation_logs = task.validation_step(next(iterator), model,
metrics=metrics)
for metric in metrics:
validation_logs[metric.name] = metric.result()
return logs, validation_logs, model.weights
def test_task_determinism(self):
config = masked_lm.MaskedLMConfig(
init_checkpoint=self.get_temp_dir(),
scale_loss=True,
model=bert.PretrainerConfig(
encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522,
num_layers=1)),
cls_heads=[
bert.ClsHeadConfig(
inner_dim=10, num_classes=2, name="next_sentence")
]),
train_data=pretrain_dataloader.BertPretrainDataConfig(
max_predictions_per_seq=20,
seq_length=128,
global_batch_size=1))
tf.keras.utils.set_random_seed(1)
logs1, validation_logs1, weights1 = self._build_and_run_model(config)
tf.keras.utils.set_random_seed(1)
logs2, validation_logs2, weights2 = self._build_and_run_model(config)
self.assertEqual(logs1["loss"], logs2["loss"])
self.assertEqual(validation_logs1["loss"], validation_logs2["loss"])
for weight1, weight2 in zip(weights1, weights2):
self.assertAllEqual(weight1, weight2)
if __name__ == "__main__":
tf.config.experimental.enable_op_determinism()
tf.test.main()
| 3,922 | 36.721154 | 80 | py |
models | models-master/official/nlp/tasks/sentence_prediction_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.tasks.sentence_prediction."""
import functools
import os
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import sentence_prediction_dataloader
from official.nlp.tasks import masked_lm
from official.nlp.tasks import sentence_prediction
def _create_fake_dataset(output_path, seq_length, num_classes, num_examples):
"""Creates a fake dataset."""
writer = tf.io.TFRecordWriter(output_path)
def create_int_feature(values):
return tf.train.Feature(
int64_list=tf.train.Int64List(value=np.ravel(values)))
def create_float_feature(values):
return tf.train.Feature(
float_list=tf.train.FloatList(value=np.ravel(values)))
for i in range(num_examples):
features = {}
input_ids = np.random.randint(100, size=(seq_length))
features["input_ids"] = create_int_feature(input_ids)
features["input_mask"] = create_int_feature(np.ones_like(input_ids))
features["segment_ids"] = create_int_feature(np.ones_like(input_ids))
features["segment_ids"] = create_int_feature(np.ones_like(input_ids))
features["example_id"] = create_int_feature([i])
if num_classes == 1:
features["label_ids"] = create_float_feature([np.random.random()])
else:
features["label_ids"] = create_int_feature(
[np.random.random_integers(0, num_classes - 1, size=())])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
class SentencePredictionTaskTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(SentencePredictionTaskTest, self).setUp()
self._train_data_config = (
sentence_prediction_dataloader.SentencePredictionDataConfig(
input_path="dummy", seq_length=128, global_batch_size=1))
def get_model_config(self, num_classes):
return sentence_prediction.ModelConfig(
encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1)),
num_classes=num_classes)
def _run_task(self, config):
task = sentence_prediction.SentencePredictionTask(config)
model = task.build_model()
metrics = task.build_metrics()
strategy = tf.distribute.get_strategy()
dataset = strategy.distribute_datasets_from_function(
functools.partial(task.build_inputs, config.train_data))
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
model.save(os.path.join(self.get_temp_dir(), "saved_model"))
return task.validation_step(next(iterator), model, metrics=metrics)
@parameterized.named_parameters(
("init_cls_pooler", True),
("init_encoder", False),
)
def test_task(self, init_cls_pooler):
# Saves a checkpoint.
pretrain_cfg = bert.PretrainerConfig(
encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1)),
cls_heads=[
bert.ClsHeadConfig(
inner_dim=768, num_classes=2, name="next_sentence")
])
pretrain_model = masked_lm.MaskedLMTask(None).build_model(pretrain_cfg)
# The model variables will be created after the forward call.
_ = pretrain_model(pretrain_model.inputs)
ckpt = tf.train.Checkpoint(
model=pretrain_model, **pretrain_model.checkpoint_items)
init_path = ckpt.save(self.get_temp_dir())
# Creates the task.
config = sentence_prediction.SentencePredictionConfig(
init_checkpoint=init_path,
model=self.get_model_config(num_classes=2),
train_data=self._train_data_config,
init_cls_pooler=init_cls_pooler)
task = sentence_prediction.SentencePredictionTask(config)
model = task.build_model()
metrics = task.build_metrics()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
task.initialize(model)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
task.validation_step(next(iterator), model, metrics=metrics)
@parameterized.named_parameters(
{
"testcase_name": "regression",
"num_classes": 1,
},
{
"testcase_name": "classification",
"num_classes": 2,
},
)
def test_metrics_and_losses(self, num_classes):
config = sentence_prediction.SentencePredictionConfig(
init_checkpoint=self.get_temp_dir(),
model=self.get_model_config(num_classes),
train_data=self._train_data_config)
task = sentence_prediction.SentencePredictionTask(config)
model = task.build_model()
metrics = task.build_metrics()
if num_classes == 1:
self.assertIsInstance(metrics[0], tf.keras.metrics.MeanSquaredError)
else:
self.assertIsInstance(metrics[0],
tf.keras.metrics.SparseCategoricalAccuracy)
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
logs = task.validation_step(next(iterator), model, metrics=metrics)
loss = logs["loss"].numpy()
if num_classes == 1:
self.assertGreater(loss, 1.0)
else:
self.assertLess(loss, 1.0)
@parameterized.parameters(("matthews_corrcoef", 2),
("pearson_spearman_corr", 1),
("f1", 2))
def test_np_metrics(self, metric_type, num_classes):
config = sentence_prediction.SentencePredictionConfig(
metric_type=metric_type,
init_checkpoint=self.get_temp_dir(),
model=self.get_model_config(num_classes),
train_data=self._train_data_config)
task = sentence_prediction.SentencePredictionTask(config)
model = task.build_model()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
strategy = tf.distribute.get_strategy()
distributed_outputs = strategy.run(
functools.partial(task.validation_step, model=model),
args=(next(iterator),))
outputs = tf.nest.map_structure(strategy.experimental_local_results,
distributed_outputs)
aggregated = task.aggregate_logs(step_outputs=outputs)
aggregated = task.aggregate_logs(state=aggregated, step_outputs=outputs)
self.assertIn(metric_type, task.reduce_aggregated_logs(aggregated))
def test_np_metrics_cola_partial_batch(self):
train_data_path = os.path.join(self.get_temp_dir(), "train.tf_record")
num_examples = 5
global_batch_size = 8
seq_length = 16
_create_fake_dataset(
train_data_path,
seq_length=seq_length,
num_classes=2,
num_examples=num_examples)
train_data_config = (
sentence_prediction_dataloader.SentencePredictionDataConfig(
input_path=train_data_path,
seq_length=seq_length,
is_training=True,
label_type="int",
global_batch_size=global_batch_size,
drop_remainder=False,
include_example_id=True))
config = sentence_prediction.SentencePredictionConfig(
metric_type="matthews_corrcoef",
model=self.get_model_config(2),
train_data=train_data_config)
outputs = self._run_task(config)
self.assertEqual(outputs["sentence_prediction"].shape.as_list(), [8, 1])
def _export_bert_tfhub(self):
encoder = encoders.build_encoder(
encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1)))
encoder_inputs_dict = {x.name: x for x in encoder.inputs}
encoder_output_dict = encoder(encoder_inputs_dict)
core_model = tf.keras.Model(
inputs=encoder_inputs_dict, outputs=encoder_output_dict)
hub_destination = os.path.join(self.get_temp_dir(), "hub")
core_model.save(hub_destination, include_optimizer=False, save_format="tf")
return hub_destination
def test_task_with_hub(self):
hub_module_url = self._export_bert_tfhub()
config = sentence_prediction.SentencePredictionConfig(
hub_module_url=hub_module_url,
model=self.get_model_config(2),
train_data=self._train_data_config)
self._run_task(config)
@parameterized.named_parameters(("classification", 5), ("regression", 1))
def test_prediction(self, num_classes):
task_config = sentence_prediction.SentencePredictionConfig(
model=self.get_model_config(num_classes=num_classes),
train_data=self._train_data_config)
task = sentence_prediction.SentencePredictionTask(task_config)
model = task.build_model()
test_data_path = os.path.join(self.get_temp_dir(), "test.tf_record")
seq_length = 16
num_examples = 100
_create_fake_dataset(
test_data_path,
seq_length=seq_length,
num_classes=num_classes,
num_examples=num_examples)
test_data_config = (
sentence_prediction_dataloader.SentencePredictionDataConfig(
input_path=test_data_path,
seq_length=seq_length,
is_training=False,
label_type="int" if num_classes > 1 else "float",
global_batch_size=16,
drop_remainder=False,
include_example_id=True))
predictions = sentence_prediction.predict(task, test_data_config, model)
self.assertLen(predictions, num_examples)
for prediction in predictions:
self.assertEqual(prediction.dtype,
tf.int64 if num_classes > 1 else tf.float32)
if __name__ == "__main__":
tf.test.main()
| 10,430 | 37.349265 | 79 | py |
models | models-master/official/nlp/tasks/tagging_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.tasks.tagging."""
import functools
import os
import numpy as np
import tensorflow as tf
from official.nlp.configs import encoders
from official.nlp.data import tagging_dataloader
from official.nlp.tasks import tagging
def _create_fake_dataset(output_path, seq_length, num_labels, num_examples):
"""Creates a fake dataset."""
writer = tf.io.TFRecordWriter(output_path)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
for i in range(num_examples):
features = {}
input_ids = np.random.randint(100, size=(seq_length))
features["input_ids"] = create_int_feature(input_ids)
features["input_mask"] = create_int_feature(np.ones_like(input_ids))
features["segment_ids"] = create_int_feature(np.ones_like(input_ids))
features["label_ids"] = create_int_feature(
np.random.random_integers(-1, num_labels - 1, size=(seq_length)))
features["sentence_id"] = create_int_feature([i])
features["sub_sentence_id"] = create_int_feature([0])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
class TaggingTest(tf.test.TestCase):
def setUp(self):
super(TaggingTest, self).setUp()
self._encoder_config = encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1))
self._train_data_config = tagging_dataloader.TaggingDataConfig(
input_path="dummy", seq_length=128, global_batch_size=1)
def _run_task(self, config):
task = tagging.TaggingTask(config)
model = task.build_model()
metrics = task.build_metrics()
strategy = tf.distribute.get_strategy()
dataset = strategy.distribute_datasets_from_function(
functools.partial(task.build_inputs, config.train_data))
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
task.validation_step(next(iterator), model, metrics=metrics)
model.save(os.path.join(self.get_temp_dir(), "saved_model"))
def test_task(self):
# Saves a checkpoint.
encoder = encoders.build_encoder(self._encoder_config)
ckpt = tf.train.Checkpoint(encoder=encoder)
saved_path = ckpt.save(self.get_temp_dir())
config = tagging.TaggingConfig(
init_checkpoint=saved_path,
model=tagging.ModelConfig(encoder=self._encoder_config),
train_data=self._train_data_config,
class_names=["O", "B-PER", "I-PER"])
task = tagging.TaggingTask(config)
model = task.build_model()
metrics = task.build_metrics()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
task.validation_step(next(iterator), model, metrics=metrics)
task.initialize(model)
def _export_bert_tfhub(self):
encoder = encoders.build_encoder(
encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1)))
encoder_inputs_dict = {x.name: x for x in encoder.inputs}
encoder_output_dict = encoder(encoder_inputs_dict)
core_model = tf.keras.Model(
inputs=encoder_inputs_dict, outputs=encoder_output_dict)
hub_destination = os.path.join(self.get_temp_dir(), "hub")
core_model.save(hub_destination, include_optimizer=False, save_format="tf")
return hub_destination
def test_task_with_hub(self):
hub_module_url = self._export_bert_tfhub()
config = tagging.TaggingConfig(
hub_module_url=hub_module_url,
class_names=["O", "B-PER", "I-PER"],
train_data=self._train_data_config)
self._run_task(config)
def test_seqeval_metrics(self):
config = tagging.TaggingConfig(
model=tagging.ModelConfig(encoder=self._encoder_config),
train_data=self._train_data_config,
class_names=["O", "B-PER", "I-PER"])
task = tagging.TaggingTask(config)
model = task.build_model()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
strategy = tf.distribute.get_strategy()
distributed_outputs = strategy.run(
functools.partial(task.validation_step, model=model),
args=(next(iterator),))
outputs = tf.nest.map_structure(strategy.experimental_local_results,
distributed_outputs)
aggregated = task.aggregate_logs(step_outputs=outputs)
aggregated = task.aggregate_logs(state=aggregated, step_outputs=outputs)
self.assertCountEqual({"f1", "precision", "recall", "accuracy"},
task.reduce_aggregated_logs(aggregated).keys())
def test_predict(self):
task_config = tagging.TaggingConfig(
model=tagging.ModelConfig(encoder=self._encoder_config),
train_data=self._train_data_config,
class_names=["O", "B-PER", "I-PER"])
task = tagging.TaggingTask(task_config)
model = task.build_model()
test_data_path = os.path.join(self.get_temp_dir(), "test.tf_record")
seq_length = 16
num_examples = 100
_create_fake_dataset(
test_data_path,
seq_length=seq_length,
num_labels=len(task_config.class_names),
num_examples=num_examples)
test_data_config = tagging_dataloader.TaggingDataConfig(
input_path=test_data_path,
seq_length=seq_length,
is_training=False,
global_batch_size=16,
drop_remainder=False,
include_sentence_id=True)
results = tagging.predict(task, test_data_config, model)
self.assertLen(results, num_examples)
self.assertLen(results[0], 3)
if __name__ == "__main__":
tf.test.main()
| 6,407 | 36.91716 | 79 | py |
models | models-master/official/nlp/tasks/dual_encoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.tasks.sentence_prediction."""
import functools
import os
from absl.testing import parameterized
import tensorflow as tf
from official.legacy.bert import configs
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import dual_encoder_dataloader
from official.nlp.tasks import dual_encoder
from official.nlp.tasks import masked_lm
from official.nlp.tools import export_tfhub_lib
class DualEncoderTaskTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(DualEncoderTaskTest, self).setUp()
self._train_data_config = (
dual_encoder_dataloader.DualEncoderDataConfig(
input_path="dummy", seq_length=32))
def get_model_config(self):
return dual_encoder.ModelConfig(
max_sequence_length=32,
encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1)))
def _run_task(self, config):
task = dual_encoder.DualEncoderTask(config)
model = task.build_model()
metrics = task.build_metrics()
strategy = tf.distribute.get_strategy()
dataset = strategy.distribute_datasets_from_function(
functools.partial(task.build_inputs, config.train_data))
dataset.batch(10)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
task.validation_step(next(iterator), model, metrics=metrics)
model.save(os.path.join(self.get_temp_dir(), "saved_model"))
def test_task(self):
config = dual_encoder.DualEncoderConfig(
init_checkpoint=self.get_temp_dir(),
model=self.get_model_config(),
train_data=self._train_data_config)
task = dual_encoder.DualEncoderTask(config)
model = task.build_model()
metrics = task.build_metrics()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
task.validation_step(next(iterator), model, metrics=metrics)
# Saves a checkpoint.
pretrain_cfg = bert.PretrainerConfig(
encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1)))
pretrain_model = masked_lm.MaskedLMTask(None).build_model(pretrain_cfg)
ckpt = tf.train.Checkpoint(
model=pretrain_model, **pretrain_model.checkpoint_items)
ckpt.save(config.init_checkpoint)
task.initialize(model)
def _export_bert_tfhub(self):
bert_config = configs.BertConfig(
vocab_size=30522,
hidden_size=16,
intermediate_size=32,
max_position_embeddings=128,
num_attention_heads=2,
num_hidden_layers=4)
encoder = export_tfhub_lib.get_bert_encoder(bert_config)
model_checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoint")
checkpoint = tf.train.Checkpoint(encoder=encoder)
checkpoint.save(os.path.join(model_checkpoint_dir, "test"))
model_checkpoint_path = tf.train.latest_checkpoint(model_checkpoint_dir)
vocab_file = os.path.join(self.get_temp_dir(), "uncased_vocab.txt")
with tf.io.gfile.GFile(vocab_file, "w") as f:
f.write("dummy content")
export_path = os.path.join(self.get_temp_dir(), "hub")
export_tfhub_lib.export_model(
export_path,
bert_config=bert_config,
encoder_config=None,
model_checkpoint_path=model_checkpoint_path,
vocab_file=vocab_file,
do_lower_case=True,
with_mlm=False)
return export_path
def test_task_with_hub(self):
hub_module_url = self._export_bert_tfhub()
config = dual_encoder.DualEncoderConfig(
hub_module_url=hub_module_url,
model=self.get_model_config(),
train_data=self._train_data_config)
self._run_task(config)
if __name__ == "__main__":
tf.test.main()
| 4,561 | 34.92126 | 77 | py |
models | models-master/official/nlp/tasks/sentence_prediction.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sentence prediction (classification) task."""
import dataclasses
from typing import List, Union, Optional
from absl import logging
import numpy as np
import orbit
from scipy import stats
from sklearn import metrics as sklearn_metrics
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.nlp.data import data_loader_factory
from official.nlp.modeling import models
from official.nlp.tasks import utils
METRIC_TYPES = frozenset(
['accuracy', 'f1', 'matthews_corrcoef', 'pearson_spearman_corr'])
@dataclasses.dataclass
class ModelConfig(base_config.Config):
"""A classifier/regressor configuration."""
num_classes: int = 0
use_encoder_pooler: bool = False
encoder: encoders.EncoderConfig = dataclasses.field(default_factory=encoders.EncoderConfig)
@dataclasses.dataclass
class SentencePredictionConfig(cfg.TaskConfig):
"""The model config."""
# At most one of `init_checkpoint` and `hub_module_url` can
# be specified.
init_checkpoint: str = ''
init_cls_pooler: bool = False
hub_module_url: str = ''
metric_type: str = 'accuracy'
# Defines the concrete model config at instantiation time.
model: ModelConfig = dataclasses.field(default_factory=ModelConfig)
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
@task_factory.register_task_cls(SentencePredictionConfig)
class SentencePredictionTask(base_task.Task):
"""Task object for sentence_prediction."""
def __init__(self, params: cfg.TaskConfig, logging_dir=None, name=None):
super().__init__(params, logging_dir, name=name)
if params.metric_type not in METRIC_TYPES:
raise ValueError('Invalid metric_type: {}'.format(params.metric_type))
self.metric_type = params.metric_type
if hasattr(params.train_data, 'label_field'):
self.label_field = params.train_data.label_field
else:
self.label_field = 'label_ids'
def build_model(self):
if self.task_config.hub_module_url and self.task_config.init_checkpoint:
raise ValueError('At most one of `hub_module_url` and '
'`init_checkpoint` can be specified.')
if self.task_config.hub_module_url:
encoder_network = utils.get_encoder_from_hub(
self.task_config.hub_module_url)
else:
encoder_network = encoders.build_encoder(self.task_config.model.encoder)
encoder_cfg = self.task_config.model.encoder.get()
if self.task_config.model.encoder.type == 'xlnet':
return models.XLNetClassifier(
network=encoder_network,
num_classes=self.task_config.model.num_classes,
initializer=tf.keras.initializers.RandomNormal(
stddev=encoder_cfg.initializer_range))
else:
return models.BertClassifier(
network=encoder_network,
num_classes=self.task_config.model.num_classes,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
use_encoder_pooler=self.task_config.model.use_encoder_pooler)
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
label_ids = labels[self.label_field]
if self.task_config.model.num_classes == 1:
loss = tf.keras.losses.mean_squared_error(label_ids, model_outputs)
else:
loss = tf.keras.losses.sparse_categorical_crossentropy(
label_ids, tf.cast(model_outputs, tf.float32), from_logits=True)
if aux_losses:
loss += tf.add_n(aux_losses)
return tf_utils.safe_mean(loss)
def build_inputs(self, params, input_context=None):
"""Returns tf.data.Dataset for sentence_prediction task."""
if params.input_path == 'dummy':
def dummy_data(_):
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
x = dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids)
if self.task_config.model.num_classes == 1:
y = tf.zeros((1,), dtype=tf.float32)
else:
y = tf.zeros((1, 1), dtype=tf.int32)
x[self.label_field] = y
return x
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
return data_loader_factory.get_data_loader(params).load(input_context)
def build_metrics(self, training=None):
del training
if self.task_config.model.num_classes == 1:
metrics = [tf.keras.metrics.MeanSquaredError()]
elif self.task_config.model.num_classes == 2:
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='cls_accuracy'),
tf.keras.metrics.AUC(name='auc', curve='PR'),
]
else:
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='cls_accuracy'),
]
return metrics
def process_metrics(self, metrics, labels, model_outputs):
for metric in metrics:
if metric.name == 'auc':
# Convert the logit to probability and extract the probability of True..
metric.update_state(
labels[self.label_field],
tf.expand_dims(tf.nn.softmax(model_outputs)[:, 1], axis=1))
if metric.name == 'cls_accuracy':
metric.update_state(labels[self.label_field], model_outputs)
def process_compiled_metrics(self, compiled_metrics, labels, model_outputs):
compiled_metrics.update_state(labels[self.label_field], model_outputs)
def validation_step(self, inputs, model: tf.keras.Model, metrics=None):
features, labels = inputs, inputs
outputs = self.inference_step(features, model)
loss = self.build_losses(
labels=labels, model_outputs=outputs, aux_losses=model.losses)
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, labels, outputs)
if model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in metrics or []})
logs.update({m.name: m.result() for m in model.metrics})
if self.metric_type == 'matthews_corrcoef':
logs.update({
'sentence_prediction': # Ensure one prediction along batch dimension.
tf.expand_dims(tf.math.argmax(outputs, axis=1), axis=1),
'labels':
labels[self.label_field],
})
else:
logs.update({
'sentence_prediction': outputs,
'labels': labels[self.label_field],
})
return logs
def aggregate_logs(self, state=None, step_outputs=None):
if self.metric_type == 'accuracy':
return None
if state is None:
state = {'sentence_prediction': [], 'labels': []}
state['sentence_prediction'].append(
np.concatenate([v.numpy() for v in step_outputs['sentence_prediction']],
axis=0))
state['labels'].append(
np.concatenate([v.numpy() for v in step_outputs['labels']], axis=0))
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
if self.metric_type == 'accuracy':
return None
preds = np.concatenate(aggregated_logs['sentence_prediction'], axis=0)
labels = np.concatenate(aggregated_logs['labels'], axis=0)
if self.metric_type == 'f1':
preds = np.argmax(preds, axis=1)
return {self.metric_type: sklearn_metrics.f1_score(labels, preds)}
elif self.metric_type == 'matthews_corrcoef':
preds = np.reshape(preds, -1)
labels = np.reshape(labels, -1)
return {
self.metric_type: sklearn_metrics.matthews_corrcoef(preds, labels)
}
elif self.metric_type == 'pearson_spearman_corr':
preds = np.reshape(preds, -1)
labels = np.reshape(labels, -1)
pearson_corr = stats.pearsonr(preds, labels)[0]
spearman_corr = stats.spearmanr(preds, labels)[0]
corr_metric = (pearson_corr + spearman_corr) / 2
return {self.metric_type: corr_metric}
def initialize(self, model):
"""Load a pretrained checkpoint (if exists) and then train from iter 0."""
ckpt_dir_or_file = self.task_config.init_checkpoint
logging.info('Trying to load pretrained checkpoint from %s',
ckpt_dir_or_file)
if ckpt_dir_or_file and tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
if not ckpt_dir_or_file:
logging.info('No checkpoint file found from %s. Will not load.',
ckpt_dir_or_file)
return
pretrain2finetune_mapping = {
'encoder': model.checkpoint_items['encoder'],
}
if self.task_config.init_cls_pooler:
# This option is valid when use_encoder_pooler is false.
pretrain2finetune_mapping[
'next_sentence.pooler_dense'] = model.checkpoint_items[
'sentence_prediction.pooler_dense']
ckpt = tf.train.Checkpoint(**pretrain2finetune_mapping)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def predict(task: SentencePredictionTask,
params: cfg.DataConfig,
model: tf.keras.Model,
params_aug: Optional[cfg.DataConfig] = None,
test_time_aug_wgt: float = 0.3) -> List[Union[int, float]]:
"""Predicts on the input data.
Args:
task: A `SentencePredictionTask` object.
params: A `cfg.DataConfig` object.
model: A keras.Model.
params_aug: A `cfg.DataConfig` object for augmented data.
test_time_aug_wgt: Test time augmentation weight. The prediction score will
use (1. - test_time_aug_wgt) original prediction plus test_time_aug_wgt
augmented prediction.
Returns:
A list of predictions with length of `num_examples`. For regression task,
each element in the list is the predicted score; for classification task,
each element is the predicted class id.
"""
def predict_step(inputs):
"""Replicated prediction calculation."""
x = inputs
example_id = x.pop('example_id')
outputs = task.inference_step(x, model)
return dict(example_id=example_id, predictions=outputs)
def aggregate_fn(state, outputs):
"""Concatenates model's outputs."""
if state is None:
state = []
for per_replica_example_id, per_replica_batch_predictions in zip(
outputs['example_id'], outputs['predictions']):
state.extend(zip(per_replica_example_id, per_replica_batch_predictions))
return state
dataset = orbit.utils.make_distributed_dataset(tf.distribute.get_strategy(),
task.build_inputs, params)
outputs = utils.predict(predict_step, aggregate_fn, dataset)
# When running on TPU POD, the order of output cannot be maintained,
# so we need to sort by example_id.
outputs = sorted(outputs, key=lambda x: x[0])
is_regression = task.task_config.model.num_classes == 1
if params_aug is not None:
dataset_aug = orbit.utils.make_distributed_dataset(
tf.distribute.get_strategy(), task.build_inputs, params_aug)
outputs_aug = utils.predict(predict_step, aggregate_fn, dataset_aug)
outputs_aug = sorted(outputs_aug, key=lambda x: x[0])
if is_regression:
return [(1. - test_time_aug_wgt) * x[1] + test_time_aug_wgt * y[1]
for x, y in zip(outputs, outputs_aug)]
else:
return [
tf.argmax(
(1. - test_time_aug_wgt) * x[1] + test_time_aug_wgt * y[1],
axis=-1) for x, y in zip(outputs, outputs_aug)
]
if is_regression:
return [x[1] for x in outputs]
else:
return [tf.argmax(x[1], axis=-1) for x in outputs]
| 12,638 | 38.496875 | 93 | py |
models | models-master/official/nlp/tasks/masked_lm_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.tasks.masked_lm."""
import tensorflow as tf
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import pretrain_dataloader
from official.nlp.tasks import masked_lm
class MLMTaskTest(tf.test.TestCase):
def test_task(self):
config = masked_lm.MaskedLMConfig(
init_checkpoint=self.get_temp_dir(),
scale_loss=True,
model=bert.PretrainerConfig(
encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522,
num_layers=1)),
cls_heads=[
bert.ClsHeadConfig(
inner_dim=10, num_classes=2, name="next_sentence")
]),
train_data=pretrain_dataloader.BertPretrainDataConfig(
input_path="dummy",
max_predictions_per_seq=20,
seq_length=128,
global_batch_size=1))
task = masked_lm.MaskedLMTask(config)
model = task.build_model()
metrics = task.build_metrics()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
task.validation_step(next(iterator), model, metrics=metrics)
# Saves a checkpoint.
ckpt = tf.train.Checkpoint(model=model, **model.checkpoint_items)
ckpt.save(config.init_checkpoint)
task.initialize(model)
if __name__ == "__main__":
tf.test.main()
| 2,160 | 33.854839 | 74 | py |
models | models-master/official/nlp/tasks/translation_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.tasks.translation."""
import functools
import os
import orbit
import tensorflow as tf
from sentencepiece import SentencePieceTrainer
from official.nlp.data import wmt_dataloader
from official.nlp.tasks import translation
def _generate_line_file(filepath, lines):
with tf.io.gfile.GFile(filepath, "w") as f:
for l in lines:
f.write("{}\n".format(l))
def _generate_record_file(filepath, src_lines, tgt_lines):
writer = tf.io.TFRecordWriter(filepath)
for src, tgt in zip(src_lines, tgt_lines):
example = tf.train.Example(
features=tf.train.Features(
feature={
"en": tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[src.encode()])),
"reverse_en": tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[tgt.encode()])),
}))
writer.write(example.SerializeToString())
writer.close()
def _train_sentencepiece(input_path, vocab_size, model_path, eos_id=1):
argstr = " ".join([
f"--input={input_path}", f"--vocab_size={vocab_size}",
"--character_coverage=0.995",
f"--model_prefix={model_path}", "--model_type=bpe",
"--bos_id=-1", "--pad_id=0", f"--eos_id={eos_id}", "--unk_id=2"
])
SentencePieceTrainer.Train(argstr)
class TranslationTaskTest(tf.test.TestCase):
def setUp(self):
super(TranslationTaskTest, self).setUp()
self._temp_dir = self.get_temp_dir()
src_lines = [
"abc ede fg",
"bbcd ef a g",
"de f a a g"
]
tgt_lines = [
"dd cc a ef g",
"bcd ef a g",
"gef cd ba"
]
self._record_input_path = os.path.join(self._temp_dir, "inputs.record")
_generate_record_file(self._record_input_path, src_lines, tgt_lines)
self._sentencepeice_input_path = os.path.join(self._temp_dir, "inputs.txt")
_generate_line_file(self._sentencepeice_input_path, src_lines + tgt_lines)
sentencepeice_model_prefix = os.path.join(self._temp_dir, "sp")
_train_sentencepiece(self._sentencepeice_input_path, 11,
sentencepeice_model_prefix)
self._sentencepeice_model_path = "{}.model".format(
sentencepeice_model_prefix)
def test_task(self):
config = translation.TranslationConfig(
model=translation.ModelConfig(
encoder=translation.EncDecoder(num_layers=1),
decoder=translation.EncDecoder(num_layers=1)),
train_data=wmt_dataloader.WMTDataConfig(
input_path=self._record_input_path,
src_lang="en", tgt_lang="reverse_en",
is_training=True, static_batch=True, global_batch_size=24,
max_seq_length=12),
sentencepiece_model_path=self._sentencepeice_model_path)
task = translation.TranslationTask(config)
model = task.build_model()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(iterator), model, optimizer)
def test_no_sentencepiece_path(self):
config = translation.TranslationConfig(
model=translation.ModelConfig(
encoder=translation.EncDecoder(num_layers=1),
decoder=translation.EncDecoder(num_layers=1)),
train_data=wmt_dataloader.WMTDataConfig(
input_path=self._record_input_path,
src_lang="en", tgt_lang="reverse_en",
is_training=True, static_batch=True, global_batch_size=4,
max_seq_length=4),
sentencepiece_model_path=None)
with self.assertRaisesRegex(
ValueError,
"Setencepiece model path not provided."):
translation.TranslationTask(config)
def test_sentencepiece_no_eos(self):
sentencepeice_model_prefix = os.path.join(self._temp_dir, "sp_no_eos")
_train_sentencepiece(self._sentencepeice_input_path, 20,
sentencepeice_model_prefix, eos_id=-1)
sentencepeice_model_path = "{}.model".format(
sentencepeice_model_prefix)
config = translation.TranslationConfig(
model=translation.ModelConfig(
encoder=translation.EncDecoder(num_layers=1),
decoder=translation.EncDecoder(num_layers=1)),
train_data=wmt_dataloader.WMTDataConfig(
input_path=self._record_input_path,
src_lang="en", tgt_lang="reverse_en",
is_training=True, static_batch=True, global_batch_size=4,
max_seq_length=4),
sentencepiece_model_path=sentencepeice_model_path)
with self.assertRaisesRegex(
ValueError,
"EOS token not in tokenizer vocab.*"):
translation.TranslationTask(config)
def test_evaluation(self):
config = translation.TranslationConfig(
model=translation.ModelConfig(
encoder=translation.EncDecoder(num_layers=1),
decoder=translation.EncDecoder(num_layers=1),
padded_decode=False,
decode_max_length=64),
validation_data=wmt_dataloader.WMTDataConfig(
input_path=self._record_input_path, src_lang="en",
tgt_lang="reverse_en", static_batch=True, global_batch_size=4),
sentencepiece_model_path=self._sentencepeice_model_path)
logging_dir = self.get_temp_dir()
task = translation.TranslationTask(config, logging_dir=logging_dir)
dataset = orbit.utils.make_distributed_dataset(tf.distribute.get_strategy(),
task.build_inputs,
config.validation_data)
model = task.build_model()
strategy = tf.distribute.get_strategy()
aggregated = None
for data in dataset:
distributed_outputs = strategy.run(
functools.partial(task.validation_step, model=model),
args=(data,))
outputs = tf.nest.map_structure(strategy.experimental_local_results,
distributed_outputs)
aggregated = task.aggregate_logs(state=aggregated, step_outputs=outputs)
metrics = task.reduce_aggregated_logs(aggregated)
self.assertIn("sacrebleu_score", metrics)
self.assertIn("bleu_score", metrics)
if __name__ == "__main__":
tf.test.main()
| 6,871 | 38.953488 | 80 | py |
models | models-master/official/nlp/tasks/question_answering_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.tasks.question_answering."""
import itertools
import json
import os
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import question_answering_dataloader
from official.nlp.tasks import masked_lm
from official.nlp.tasks import question_answering
class QuestionAnsweringTaskTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(QuestionAnsweringTaskTest, self).setUp()
self._encoder_config = encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1))
self._train_data_config = question_answering_dataloader.QADataConfig(
input_path="dummy", seq_length=128, global_batch_size=1)
val_data = {
"version":
"1.1",
"data": [{
"paragraphs": [{
"context":
"Sky is blue.",
"qas": [{
"question":
"What is blue?",
"id":
"1234",
"answers": [{
"text": "Sky",
"answer_start": 0
}, {
"text": "Sky",
"answer_start": 0
}, {
"text": "Sky",
"answer_start": 0
}]
}]
}]
}]
}
self._val_input_path = os.path.join(self.get_temp_dir(), "val_data.json")
with tf.io.gfile.GFile(self._val_input_path, "w") as writer:
writer.write(json.dumps(val_data, indent=4) + "\n")
self._test_vocab = os.path.join(self.get_temp_dir(), "vocab.txt")
with tf.io.gfile.GFile(self._test_vocab, "w") as writer:
writer.write("[PAD]\n[UNK]\n[CLS]\n[SEP]\n[MASK]\nsky\nis\nblue\n")
def _get_validation_data_config(self, version_2_with_negative=False):
return question_answering_dataloader.QADataConfig(
is_training=False,
input_path=self._val_input_path,
input_preprocessed_data_path=self.get_temp_dir(),
seq_length=128,
global_batch_size=1,
version_2_with_negative=version_2_with_negative,
vocab_file=self._test_vocab,
tokenization="WordPiece",
do_lower_case=True)
def _run_task(self, config):
task = question_answering.QuestionAnsweringTask(config)
model = task.build_model()
metrics = task.build_metrics()
task.initialize(model)
train_dataset = task.build_inputs(config.train_data)
train_iterator = iter(train_dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(train_iterator), model, optimizer, metrics=metrics)
val_dataset = task.build_inputs(config.validation_data)
val_iterator = iter(val_dataset)
logs = task.validation_step(next(val_iterator), model, metrics=metrics)
# Mock that `logs` is from one replica.
logs = {x: (logs[x],) for x in logs}
logs = task.aggregate_logs(step_outputs=logs)
metrics = task.reduce_aggregated_logs(logs)
self.assertIn("final_f1", metrics)
model.save(os.path.join(self.get_temp_dir(), "saved_model"))
@parameterized.parameters(
itertools.product(
(False, True),
("WordPiece", "SentencePiece"),
))
def test_task(self, version_2_with_negative, tokenization):
# Saves a checkpoint.
pretrain_cfg = bert.PretrainerConfig(
encoder=self._encoder_config,
cls_heads=[
bert.ClsHeadConfig(
inner_dim=10, num_classes=3, name="next_sentence")
])
pretrain_model = masked_lm.MaskedLMTask(None).build_model(pretrain_cfg)
ckpt = tf.train.Checkpoint(
model=pretrain_model, **pretrain_model.checkpoint_items)
saved_path = ckpt.save(self.get_temp_dir())
config = question_answering.QuestionAnsweringConfig(
init_checkpoint=saved_path,
model=question_answering.ModelConfig(encoder=self._encoder_config),
train_data=self._train_data_config,
validation_data=self._get_validation_data_config(
version_2_with_negative))
self._run_task(config)
def _export_bert_tfhub(self):
encoder = encoders.build_encoder(
encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1)))
encoder_inputs_dict = {x.name: x for x in encoder.inputs}
encoder_output_dict = encoder(encoder_inputs_dict)
core_model = tf.keras.Model(
inputs=encoder_inputs_dict, outputs=encoder_output_dict)
hub_destination = os.path.join(self.get_temp_dir(), "hub")
core_model.save(hub_destination, include_optimizer=False, save_format="tf")
return hub_destination
def test_task_with_hub(self):
hub_module_url = self._export_bert_tfhub()
config = question_answering.QuestionAnsweringConfig(
hub_module_url=hub_module_url,
model=question_answering.ModelConfig(encoder=self._encoder_config),
train_data=self._train_data_config,
validation_data=self._get_validation_data_config())
self._run_task(config)
@parameterized.named_parameters(("squad1", False), ("squad2", True))
def test_predict(self, version_2_with_negative):
validation_data = self._get_validation_data_config(
version_2_with_negative=version_2_with_negative)
config = question_answering.QuestionAnsweringConfig(
model=question_answering.ModelConfig(encoder=self._encoder_config),
train_data=self._train_data_config,
validation_data=validation_data)
task = question_answering.QuestionAnsweringTask(config)
model = task.build_model()
all_predictions, all_nbest, scores_diff = question_answering.predict(
task, validation_data, model)
self.assertLen(all_predictions, 1)
self.assertLen(all_nbest, 1)
if version_2_with_negative:
self.assertLen(scores_diff, 1)
else:
self.assertEmpty(scores_diff)
class XLNetQuestionAnsweringTaskTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(XLNetQuestionAnsweringTaskTest, self).setUp()
self._encoder_config = encoders.EncoderConfig(
type="xlnet",
xlnet=encoders.XLNetEncoderConfig(vocab_size=30522, num_layers=1))
self._train_data_config = question_answering_dataloader.QADataConfig(
input_path="dummy", seq_length=128,
global_batch_size=2, xlnet_format=True)
val_data = {
"version":
"2.0",
"data": [{
"paragraphs": [{
"context":
"Sky is blue.",
"qas": [{
"question":
"What is blue?",
"id":
"1234",
"answers": [{
"text": "Sky",
"answer_start": 0
}, {
"text": "Sky",
"answer_start": 0
}, {
"text": "Sky",
"answer_start": 0
}]
}]
}]
}]
}
self._val_input_path = os.path.join(self.get_temp_dir(), "val_data.json")
with tf.io.gfile.GFile(self._val_input_path, "w") as writer:
writer.write(json.dumps(val_data, indent=4) + "\n")
self._test_vocab = os.path.join(self.get_temp_dir(), "vocab.txt")
with tf.io.gfile.GFile(self._test_vocab, "w") as writer:
writer.write("[PAD]\n[UNK]\n[CLS]\n[SEP]\n[MASK]\nsky\nis\nblue\n")
def _get_validation_data_config(self):
return question_answering_dataloader.QADataConfig(
is_training=False,
input_path=self._val_input_path,
input_preprocessed_data_path=self.get_temp_dir(),
seq_length=128,
global_batch_size=2,
version_2_with_negative=True,
vocab_file=self._test_vocab,
tokenization="WordPiece",
do_lower_case=True,
xlnet_format=True)
def _run_task(self, config):
task = question_answering.XLNetQuestionAnsweringTask(config)
model = task.build_model()
metrics = task.build_metrics()
task.initialize(model)
train_dataset = task.build_inputs(config.train_data)
train_iterator = iter(train_dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(train_iterator), model, optimizer, metrics=metrics)
val_dataset = task.build_inputs(config.validation_data)
val_iterator = iter(val_dataset)
logs = task.validation_step(next(val_iterator), model, metrics=metrics)
# Mock that `logs` is from one replica.
logs = {x: (logs[x],) for x in logs}
logs = task.aggregate_logs(step_outputs=logs)
metrics = task.reduce_aggregated_logs(logs)
self.assertIn("final_f1", metrics)
self.assertNotIn("loss", metrics)
def test_task(self):
config = question_answering.XLNetQuestionAnsweringConfig(
init_checkpoint="",
n_best_size=5,
model=question_answering.ModelConfig(encoder=self._encoder_config),
train_data=self._train_data_config,
validation_data=self._get_validation_data_config())
self._run_task(config)
if __name__ == "__main__":
tf.test.main()
| 9,939 | 36.368421 | 79 | py |
models | models-master/official/projects/bigbird/recomputing_dropout.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras dropout layer that is aware of `RecomputeContext`."""
import numpy as np
import tensorflow as tf
from official.projects.bigbird import recompute_grad as recompute_grad_lib
from official.projects.bigbird import stateless_dropout as stateless_dropout_lib
# Reimplements internal function
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/framework/smart_cond.py.
def smart_cond(pred, true_fn=None, false_fn=None, name=None):
"""Return either `true_fn()` if predicate `pred` is true else `false_fn()`.
If `pred` is a bool or has a constant value, we return either `true_fn()`
or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.
Arguments:
pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`.
true_fn: The callable to be performed if pred is true.
false_fn: The callable to be performed if pred is false.
name: Optional name prefix when using `tf.cond`.
Returns:
Tensors returned by the call to either `true_fn` or `false_fn`.
Raises:
TypeError: If `true_fn` or `false_fn` is not callable.
"""
if not callable(true_fn):
raise TypeError('`true_fn` must be callable.')
if not callable(false_fn):
raise TypeError('`false_fn` must be callable.')
pred_value = tf.get_static_value(pred)
if isinstance(pred, tf.Variable) or pred_value is None:
return tf.cond(
pred, true_fn=true_fn, false_fn=false_fn, name=name)
if pred_value:
return true_fn()
else:
return false_fn()
# See https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dropout.
class RecomputingDropout(tf.keras.layers.Layer):
"""`tf.keras.layers.Dropout` that supports `recompute_grad`."""
def __init__(self,
rate,
noise_shape=None,
seed=None,
force_recomputation=False,
**kwargs):
"""Initializes `RecomputingDropout`.
Args:
rate: Float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the binary
dropout mask that will be multiplied with the input. For instance, if
inputs have shape `(batch_size, timesteps, features)` and you want the
dropout mask to be the same for all timesteps, you can use
`noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
force_recomputation: If `True`, then raises an error if called outside a
recompute context.
**kwargs: Keyword arguments for `tf.keras.layers.Layer`.
"""
super(RecomputingDropout, self).__init__(**kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
self.force_recomputation = force_recomputation
self.supports_masking = True
# Create a layer-specific seed to combine with the global recompute seed.
self._recompute_seed = (
np.random.randint(-2**31, 2**31, dtype=np.int32)
if seed is None else seed)
def _get_noise_shape(self, inputs):
# Subclasses of `Dropout` may implement `_get_noise_shape(self, inputs)`,
# which will override `self.noise_shape`, and allows for custom noise
# shapes with dynamically sized inputs.
if self.noise_shape is None:
return None
concrete_inputs_shape = tf.shape(inputs)
noise_shape = []
for i, value in enumerate(self.noise_shape):
noise_shape.append(concrete_inputs_shape[i] if value is None else value)
return tf.convert_to_tensor(noise_shape)
def call(self, inputs, training=None):
"""Builds computation graph.
Args:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Returns:
`inputs` masked according to layer configuration.
Raises:
ValueError: If `force_recomputation` is `True` and called outside a
a recompute context.
"""
if training is None:
training = tf.keras.backend.learning_phase()
def dropped_inputs():
"""Randomly drops elements of `inputs` when `training=True`."""
recompute_context = recompute_grad_lib.get_recompute_context()
if recompute_context is None:
if self.force_recomputation:
raise ValueError(
'RecomputeContext is required when force_recomputation=True.')
return tf.nn.dropout(
inputs,
noise_shape=self._get_noise_shape(inputs),
seed=self.seed,
rate=self.rate)
seed = tf.stack([recompute_context.seed, self._recompute_seed])
return stateless_dropout_lib.stateless_dropout(
inputs,
rate=self.rate,
seed=seed,
noise_shape=self._get_noise_shape(inputs))
output = smart_cond(training, dropped_inputs, lambda: tf.identity(inputs))
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'rate': self.rate,
'noise_shape': self.noise_shape,
'seed': self.seed,
'force_recomputation': self.force_recomputation,
}
base_config = super(RecomputingDropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 5,942 | 36.14375 | 97 | py |
models | models-master/official/projects/bigbird/encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer-based text encoder network."""
# pylint: disable=g-classes-have-attributes
import tensorflow as tf
from official.modeling import activations
from official.modeling import tf_utils
from official.nlp import modeling
from official.nlp.modeling import layers
from official.projects.bigbird import recompute_grad
from official.projects.bigbird import recomputing_dropout
_MAX_SEQ_LEN = 4096
class RecomputeTransformerLayer(layers.TransformerScaffold):
"""Transformer layer that recomputes the forward pass during backpropagation."""
def call(self, inputs, training=None):
emb, mask = inputs
def f(*args):
# recompute_grad can only handle tensor inputs. so we enumerate the
# nested input [emb, mask] as follows:
# args[0]: emb
# args[1]: mask[0] = band_mask
# args[2]: mask[1] = encoder_from_mask
# args[3]: mask[2] = encoder_to_mask
# args[4]: mask[3] = blocked_encoder_mask
x = super(RecomputeTransformerLayer,
self).call([args[0], [args[1], args[2], args[3], args[4]]],
training=training)
return x
f = recompute_grad.recompute_grad(f)
return f(emb, *mask)
@tf.keras.utils.register_keras_serializable(package='Text')
class BigBirdEncoder(tf.keras.Model):
"""Transformer-based encoder network with BigBird attentions.
*Note* that the network is constructed by
[Keras Functional API](https://keras.io/guides/functional_api/).
Args:
vocab_size: The size of the token vocabulary.
hidden_size: The size of the transformer hidden layers.
num_layers: The number of transformer layers.
num_attention_heads: The number of attention heads for each transformer. The
hidden size must be divisible by the number of attention heads.
max_position_embeddings: The maximum length of position embeddings that this
encoder can consume. If None, max_position_embeddings uses the value from
sequence length. This determines the variable shape for positional
embeddings.
type_vocab_size: The number of types that the 'type_ids' input can take.
intermediate_size: The intermediate size for the transformer layers.
block_size: int. A BigBird Attention parameter: size of block in from/to
sequences.
num_rand_blocks: int. A BigBird Attention parameter: number of random chunks
per row.
activation: The activation to use for the transformer layers.
dropout_rate: The dropout rate to use for the transformer layers.
attention_dropout_rate: The dropout rate to use for the attention layers
within the transformer layers.
initializer: The initialzer to use for all weights in this encoder.
embedding_width: The width of the word embeddings. If the embedding width is
not equal to hidden size, embedding parameters will be factorized into two
matrices in the shape of ['vocab_size', 'embedding_width'] and
['embedding_width', 'hidden_size'] ('embedding_width' is usually much
smaller than 'hidden_size').
use_gradient_checkpointing: Use gradient checkpointing to trade-off compute
for memory.
"""
def __init__(self,
vocab_size,
hidden_size=768,
num_layers=12,
num_attention_heads=12,
max_position_embeddings=_MAX_SEQ_LEN,
type_vocab_size=16,
intermediate_size=3072,
block_size=64,
num_rand_blocks=3,
activation=activations.gelu,
dropout_rate=0.1,
attention_dropout_rate=0.1,
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
embedding_width=None,
use_gradient_checkpointing=False,
**kwargs):
activation = tf.keras.activations.get(activation)
initializer = tf.keras.initializers.get(initializer)
if use_gradient_checkpointing:
tf.keras.layers.Dropout = recomputing_dropout.RecomputingDropout
layer_cls = RecomputeTransformerLayer
else:
layer_cls = layers.TransformerScaffold
self._self_setattr_tracking = False
self._config_dict = {
'vocab_size': vocab_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'num_attention_heads': num_attention_heads,
'max_position_embeddings': max_position_embeddings,
'type_vocab_size': type_vocab_size,
'intermediate_size': intermediate_size,
'block_size': block_size,
'num_rand_blocks': num_rand_blocks,
'activation': tf_utils.serialize_activation(
activation, use_legacy_format=True
),
'dropout_rate': dropout_rate,
'attention_dropout_rate': attention_dropout_rate,
'initializer': tf_utils.serialize_initializer(
initializer, use_legacy_format=True
),
'embedding_width': embedding_width,
}
word_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_word_ids')
mask = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_mask')
type_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_type_ids')
if embedding_width is None:
embedding_width = hidden_size
self._embedding_layer = modeling.layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=initializer,
name='word_embeddings')
word_embeddings = self._embedding_layer(word_ids)
# Always uses dynamic slicing for simplicity.
self._position_embedding_layer = modeling.layers.PositionEmbedding(
initializer=initializer,
max_length=max_position_embeddings,
name='position_embedding')
position_embeddings = self._position_embedding_layer(word_embeddings)
self._type_embedding_layer = modeling.layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=initializer,
use_one_hot=True,
name='type_embeddings')
type_embeddings = self._type_embedding_layer(type_ids)
embeddings = tf.keras.layers.Add()(
[word_embeddings, position_embeddings, type_embeddings])
self._embedding_norm_layer = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)
embeddings = self._embedding_norm_layer(embeddings)
embeddings = tf.keras.layers.Dropout(rate=dropout_rate)(embeddings)
# We project the 'embedding' output to 'hidden_size' if it is not already
# 'hidden_size'.
if embedding_width != hidden_size:
self._embedding_projection = tf.keras.layers.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
kernel_initializer=initializer,
name='embedding_projection')
embeddings = self._embedding_projection(embeddings)
self._transformer_layers = []
data = embeddings
masks = layers.BigBirdMasks(block_size=block_size)(
data, mask)
encoder_outputs = []
attn_head_dim = hidden_size // num_attention_heads
for i in range(num_layers):
layer = layer_cls(
num_attention_heads,
intermediate_size,
activation,
attention_cls=layers.BigBirdAttention,
attention_cfg=dict(
num_heads=num_attention_heads,
key_dim=attn_head_dim,
kernel_initializer=initializer,
from_block_size=block_size,
to_block_size=block_size,
num_rand_blocks=num_rand_blocks,
max_rand_mask_length=max_position_embeddings,
seed=i),
dropout_rate=dropout_rate,
attention_dropout_rate=dropout_rate,
kernel_initializer=initializer)
self._transformer_layers.append(layer)
data = layer([data, masks])
encoder_outputs.append(data)
outputs = dict(
sequence_output=encoder_outputs[-1], encoder_outputs=encoder_outputs)
super().__init__(
inputs=[word_ids, mask, type_ids], outputs=outputs, **kwargs)
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_embedding_layer(self):
return self._embedding_layer
def get_config(self):
return self._config_dict
@property
def transformer_layers(self):
"""List of Transformer layers in the encoder."""
return self._transformer_layers
@property
def pooler_layer(self):
"""The pooler dense layer after the transformer layers."""
return self._pooler_layer
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 9,349 | 37.319672 | 82 | py |
models | models-master/official/projects/bigbird/recompute_grad.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for rematerialization.
Incubates a version of tf.recompute_grad that is XLA compatible.
"""
import collections
import os
import threading
from typing import Deque, List, NamedTuple, Optional, Sequence
from absl import logging
import numpy as np
import tensorflow as tf
class RecomputeContext(
NamedTuple('RecomputeContext', [
('is_recomputing', bool),
('seed', tf.Tensor),
('children', Deque['RecomputeContext']),
])):
"""Context for recomputation.
Attributes:
is_recomputing: Whether we are in a recomputation phase.
seed: Scalar integer tensor that should be used with stateless random ops
for deterministic behavior and correct computation of the gradient.
children: Nested `RecomputeContext` instances. Used internally by
`recompute_grad` to track nested instances of `RecomputeContext`.
"""
def __enter__(self):
return _context_stack.push(self)
def __exit__(self, exc_type, exc_value, traceback):
_context_stack.pop(self)
# Simplified version of `_DefaultStack` in
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/framework/ops.py.
class _ContextStack(threading.local):
"""A thread-local stack for providing implicit recompute contexts."""
def __init__(self):
super(_ContextStack, self).__init__()
self._stack = []
def top(self) -> Optional[RecomputeContext]:
return self._stack[-1] if self._stack else None
def push(self, context: RecomputeContext):
self._stack.append(context)
return context
def pop(self, context: RecomputeContext):
if self._stack[-1] is not context:
raise AssertionError('Nesting violated for RecomputeContext.')
self._stack.pop()
_context_stack = _ContextStack()
def get_recompute_context() -> Optional[RecomputeContext]:
"""Returns the current recomputing context if it exists."""
return _context_stack.top()
# Adapted from
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/control_flow_util.py.
def _get_containing_xla_context(graph: tf.Graph) -> Optional[object]:
"""Returns the first ancestor `XLAControlFlowContext` in the `graph`."""
ctxt = graph._get_control_flow_context() # pylint: disable=protected-access
while ctxt:
if ctxt.IsXLAContext():
return ctxt
ctxt = ctxt.outer_context
return None
def _in_xla_context(graph: Optional[tf.Graph] = None) -> bool:
"""Detects whether we are in an XLA context."""
if '--tf_xla_auto_jit=2' in os.environ.get('TF_XLA_FLAGS', ''):
return True
graph = tf.compat.v1.get_default_graph() if graph is None else graph
while True:
if _get_containing_xla_context(graph) is not None:
return True
try:
graph = graph.outer_graph
except AttributeError:
return False
def _force_data_dependency(
first_compute: Sequence[tf.Tensor],
then_compute: Sequence[tf.Tensor]) -> List[tf.Tensor]:
"""Force all of `then_compute` to depend on all of `first_compute`.
Uses a dummy data dependency, which is useful when running on TPUs because
XLA ignores control dependencies. Only supports float arguments.
Args:
first_compute: Sequence of `Tensor`s to be executed before `then_compute`.
then_compute: Sequence of `Tensor`s to executed after `first_compute`.
Returns:
Sequence of `Tensor`s with same length of `then_compute`.
Raises:
ValueError: if ranks are unknown or types are not floating.
"""
def _first_element(x):
if x.shape.ndims is None:
raise ValueError('Rank of Tensor %s must be known' % x)
ndims = x.shape.ndims
begin = tf.zeros(ndims, dtype=tf.int32)
size = tf.ones(ndims, dtype=tf.int32)
return tf.reshape(tf.slice(x, begin, size), [])
first_compute_sum = tf.add_n(
[_first_element(x) for x in first_compute if x is not None])
dtype = first_compute_sum.dtype
if not dtype.is_floating:
raise ValueError('_force_data_dependency only supports floating dtypes.')
zero = np.finfo(dtype.as_numpy_dtype).tiny * first_compute_sum
return [
x + tf.cast(zero, x.dtype) if x is not None else None
for x in then_compute
]
def _make_seed_if_none(seed: Optional[tf.Tensor]) -> tf.Tensor:
"""Uses the global generator to make a seed if necessary."""
if seed is not None:
return seed
generator = tf.random.experimental.get_global_generator()
# The two seeds for stateless random ops don't have individual semantics and
# are scrambled together, so providing one seed is fine. This makes it easier
# for users to provide a local seed without worrying about integer overflow.
# See `make_seeds` in
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/stateful_random_ops.py.
try:
return generator.uniform_full_int([], tf.int32, name='recompute_grad_seed')
except (RuntimeError, TypeError, ValueError, tf.errors.NotFoundError) as e:
# For a number of reasons, the above operation can fail like using multiple
# graphs or toggling between eager and graph modes. Reset the generator.
logging.warn('Resetting the generator. %s: %s', type(e), e)
tf.random.experimental.set_global_generator(None)
generator = tf.random.experimental.get_global_generator()
return generator.uniform_full_int([], tf.int32, name='recompute_grad_seed')
def recompute_grad(f, seed=None):
"""An eager-compatible version of recompute_grad.
For f(*args, **kwargs), this supports gradients with respect to args, or to
gradients with respect to any variables residing in the kwarg 'variables'.
Note that for keras layer and model objects, this is handled automatically.
Warning: If `f` was originally a tf.keras Model or Layer object, `g` will not
be able to access the member variables of that object, because `g` returns
through the wrapper function `inner`. When recomputing gradients through
objects that inherit from keras, we suggest keeping a reference to the
underlying object around for the purpose of accessing these variables.
Args:
f: function `f(*x)` that returns a `Tensor` or sequence of `Tensor` outputs.
seed: Optional seed for random ops. `seed` should an integer scalar
`Tensor`. When compiling to XLA, `seed` must have dtype `tf.int32`. If
`seed` is not provided one will be generated.
Returns:
A function `g` that wraps `f`, but which recomputes `f` on the backwards
pass of a gradient call.
"""
@tf.custom_gradient
def inner(*args, **kwargs):
"""Inner function closure for calculating gradients."""
# Detect when we're nested and in the backwards pass, so we don't generate
# an additional seed.
parent_context = get_recompute_context()
if parent_context is not None and parent_context.is_recomputing:
# Use the cached context in the recomputation phase.
with parent_context.children.popleft()._replace(
is_recomputing=True) as context:
result = f(*args, **kwargs)
else:
with RecomputeContext(
is_recomputing=False,
seed=_make_seed_if_none(seed),
children=collections.deque()) as context:
result = f(*args, **kwargs)
# In the forward pass, build up a tree of recomputation contexts.
if parent_context is not None and not parent_context.is_recomputing:
parent_context.children.append(context)
def grad(*dresult, **grad_kwargs):
"""Gradient function calculation for inner function."""
variables = grad_kwargs.pop('variables', None)
if grad_kwargs:
raise ValueError('Found unexpected kwargs for `grad`: ',
list(grad_kwargs.keys()))
inputs, seed = list(args), context.seed
if _in_xla_context():
inputs = _force_data_dependency(
tf.nest.flatten(dresult), inputs + [seed])
seed = inputs.pop()
with tf.GradientTape() as tape:
tape.watch(inputs)
if variables is not None:
tape.watch(variables)
with tf.control_dependencies(dresult):
with context._replace(is_recomputing=True, seed=seed):
result = f(*inputs, **kwargs)
kw_vars = []
if variables is not None:
kw_vars = list(variables)
grads = tape.gradient(
result, list(inputs) + kw_vars, output_gradients=dresult)
return grads[:len(inputs)], grads[len(inputs):]
return result, grad
return inner
| 9,027 | 36.460581 | 102 | py |
models | models-master/official/projects/bigbird/encoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.projects.bigbird.encoder."""
import numpy as np
import tensorflow as tf
from official.projects.bigbird import encoder
class BigBirdEncoderTest(tf.test.TestCase):
def test_encoder(self):
sequence_length = 1024
batch_size = 2
vocab_size = 1024
network = encoder.BigBirdEncoder(
num_layers=1, vocab_size=1024, max_position_embeddings=4096)
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(2, size=(batch_size, sequence_length))
outputs = network([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs["sequence_output"].shape,
(batch_size, sequence_length, 768))
def test_save_restore(self):
sequence_length = 1024
batch_size = 2
vocab_size = 1024
network = encoder.BigBirdEncoder(
num_layers=1, vocab_size=1024, max_position_embeddings=4096)
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(2, size=(batch_size, sequence_length))
inputs = dict(
input_word_ids=word_id_data,
input_mask=mask_data,
input_type_ids=type_id_data)
ref_outputs = network(inputs)
model_path = self.get_temp_dir() + "/model"
network.save(model_path)
loaded = tf.keras.models.load_model(model_path)
outputs = loaded(inputs)
self.assertAllClose(outputs["sequence_output"],
ref_outputs["sequence_output"])
if __name__ == "__main__":
tf.test.main()
| 2,340 | 35.578125 | 75 | py |
models | models-master/official/projects/lra/linformer_encoder_block.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based LinformerEncoder block layer."""
from typing import Any, Optional
from absl import logging
import tensorflow as tf
import tensorflow_models as tfm
from official.modeling import tf_utils
@tf.keras.utils.register_keras_serializable(package="Text")
class LinformerEncoderBlock(tf.keras.layers.Layer):
"""LinformerEncoderBlock layer.
This layer implements the Linformer Encoder from
"Linformer: Self-Attention with Linear Complexity".
(https://arxiv.org/abs/2006.04768)
References:
[Linformer: Self-Attention with Linear Complexity]
(https://arxiv.org/abs/2006.04768)
[Long Range Arena: A Benchmark for Efficient Transformers]
(https://arxiv.org/abs/2011.04006)
"""
def __init__(
self,
num_attention_heads,
inner_dim,
inner_activation,
low_rank_features,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_bias=True,
norm_first=False,
norm_epsilon=1e-12,
output_dropout=0.0,
attention_dropout=0.0,
inner_dropout=0.0,
attention_initializer=None,
attention_axes=None,
use_query_residual=True,
key_dim=None,
value_dim=None,
output_last_dim=None,
diff_q_kv_att_layer_norm=False,
return_attention_scores=False,
**kwargs
):
"""Initializes `LinformerEncoder`.
Note: If `output_last_dim` is used and `use_query_residual` is `True`, the
`output_last_dim`'s value must equal the first input's last dimension for
the query residual connection to work. This is because the residual
connection after the multi-head-attention requires their dimensions to
match. If `use_query_residual` is `False`, the `output_last_dim` dictactes
the last dimension of the output of this module and the
multi-head-attention.
E.g. let's say input dims are `[batch_size, seq_dim, input_last_dim]`.
Scenario 1: If `output_last_dim` is not `None`, then the output dims of this
module would be `[batch_size, seq_dim, output_last_dim]`. Note `key_dim` is
overriden by `output_last_dim`.
Scenario 2: If `output_last_dim` is `None` and `key_dim` is not `None`, then
the output dims of this module would be `[batch_size, seq_dim, key_dim]`.
Scenario 3: If the `output_last_dim` and `key_dim` are both `None`, the
output dims would be `[batch_size, seq_dim, input_last_dim]`.
Args:
num_attention_heads: Number of attention heads.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network.
low_rank_features: The number of dimensions for low-rank projection.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
use_bias: Whether to enable use_bias in attention layer. If set False,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate
dense layers. If set False, output of attention and intermediate dense
layers is normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: Dropout probability for within the attention layer.
inner_dropout: Dropout probability for the first Dense layer in a
two-layer feedforward network.
attention_initializer: Initializer for kernels of attention layers. If set
`None`, attention layers use kernel_initializer as initializer for
kernel.
attention_axes: axes over which the attention is applied. `None` means
attention over all axes, but batch, heads, and features.
use_query_residual: Toggle to execute residual connection after attention.
key_dim: `key_dim` for the `tf.keras.layers.MultiHeadAttention`. If
`None`, we use the first `input_shape`'s last dim.
value_dim: `value_dim` for the `tf.keras.layers.MultiHeadAttention`.
output_last_dim: Final dimension of the output of this module. This also
dictates the value for the final dimension of the multi-head-attention.
When it's `None`, we use, in order of decreasing precedence, `key_dim` *
`num_heads` or the first `input_shape`'s last dim as the output's last
dim.
diff_q_kv_att_layer_norm: If `True`, create a separate attention layer
norm layer for query and key-value if `norm_first` is `True`. Invalid to
set to `True` if `norm_first` is `False`.
return_attention_scores: If `True`, the output of this layer will be a
tuple and additionally contain the attention scores in the shape of
`[batch_size, num_attention_heads, seq_dim, seq_dim]`.
**kwargs: keyword arguments.
"""
tfm.nlp.layers.util.filter_kwargs(kwargs)
super().__init__(**kwargs)
self._num_heads = num_attention_heads
self._low_rank_features = low_rank_features
self._inner_dim = inner_dim
self._inner_activation = inner_activation
self._attention_dropout_rate = attention_dropout
self._output_dropout_rate = output_dropout
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._inner_dropout = inner_dropout
self._use_query_residual = use_query_residual
self._key_dim = key_dim
self._value_dim = value_dim
self._output_last_dim = output_last_dim
self._diff_q_kv_att_layer_norm = diff_q_kv_att_layer_norm
self._return_attention_scores = return_attention_scores
if attention_initializer:
self._attention_initializer = tf.keras.initializers.get(
attention_initializer
)
else:
self._attention_initializer = tf_utils.clone_initializer(
self._kernel_initializer
)
self._attention_axes = attention_axes
if self._diff_q_kv_att_layer_norm and not self._norm_first:
raise ValueError(
"Setting `diff_q_and_kv_attention_layer_norm` to True"
"when `norm_first` is False is invalid."
)
def build(self, input_shape):
if isinstance(input_shape, tf.TensorShape):
input_tensor_shape = input_shape
elif isinstance(input_shape, (list, tuple)):
input_tensor_shape = tf.TensorShape(input_shape[0])
else:
raise ValueError(
"The type of input shape argument is not supported, got: %s"
% type(input_shape)
)
einsum_equation = "abc,cd->abd"
if len(input_tensor_shape.as_list()) > 3:
einsum_equation = "...bc,cd->...bd"
hidden_size = input_tensor_shape[-1]
if hidden_size % self._num_heads != 0:
logging.warning(
(
"The input size (%d) is not a multiple of the number of attention"
" heads (%d)"
),
hidden_size,
self._num_heads,
)
if self._key_dim is None:
self._key_dim = int(hidden_size // self._num_heads)
if self._output_last_dim is None:
last_output_shape = hidden_size
else:
last_output_shape = self._output_last_dim
common_kwargs = dict(
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint,
)
self._key_projection = tf.keras.layers.Dense(
self._low_rank_features,
activation=None,
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
name="key_low_rank_projection",
**common_kwargs
)
self._value_projection = tf.keras.layers.Dense(
self._low_rank_features,
activation=None,
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
name="value_low_rank_projection",
**common_kwargs
)
self._attention_layer = tf.keras.layers.MultiHeadAttention(
num_heads=self._num_heads,
key_dim=self._low_rank_features,
value_dim=self._low_rank_features,
dropout=self._attention_dropout_rate,
use_bias=self._use_bias,
kernel_initializer=self._attention_initializer,
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
attention_axes=self._attention_axes,
output_shape=self._output_last_dim,
name="self_attention",
**common_kwargs
)
self._attention_dropout = tf.keras.layers.Dropout(
rate=self._attention_dropout_rate
)
# Use float32 in layernorm for numeric stability.
# It is probably safe in mixed_float16, but we haven't validated this yet.
self._attention_layer_norm = tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32,
)
self._attention_layer_norm_kv = self._attention_layer_norm
if self._diff_q_kv_att_layer_norm:
self._attention_layer_norm_kv = tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm_kv",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32,
)
self._intermediate_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=(None, self._inner_dim),
bias_axes="d",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
name="intermediate",
**common_kwargs
)
policy = tf.keras.mixed_precision.global_policy()
if policy.name == "mixed_bfloat16":
# bfloat16 causes BERT with the LAMB optimizer to not converge
# as well, so we use float32.
# TODO(b/154538392): Investigate this.
policy = tf.float32
self._intermediate_activation_layer = tf.keras.layers.Activation(
self._inner_activation, dtype=policy
)
self._inner_dropout_layer = tf.keras.layers.Dropout(
rate=self._inner_dropout
)
self._output_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=(None, last_output_shape),
bias_axes="d",
name="output",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs
)
self._output_dropout = tf.keras.layers.Dropout(
rate=self._output_dropout_rate
)
# Use float32 in layernorm for numeric stability.
self._output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32,
)
super().build(input_shape)
def get_config(self):
config = {
"num_attention_heads": self._num_heads,
"low_rank_features": self._low_rank_features,
"inner_dim": self._inner_dim,
"inner_activation": self._inner_activation,
"output_dropout": self._output_dropout_rate,
"attention_dropout": self._attention_dropout_rate,
"kernel_initializer": tf.keras.initializers.serialize(
self._kernel_initializer
),
"bias_initializer": tf.keras.initializers.serialize(
self._bias_initializer
),
"kernel_regularizer": tf.keras.regularizers.serialize(
self._kernel_regularizer
),
"bias_regularizer": tf.keras.regularizers.serialize(
self._bias_regularizer
),
"activity_regularizer": tf.keras.regularizers.serialize(
self._activity_regularizer
),
"kernel_constraint": tf.keras.constraints.serialize(
self._kernel_constraint
),
"bias_constraint": tf.keras.constraints.serialize(
self._bias_constraint
),
"use_bias": self._use_bias,
"norm_first": self._norm_first,
"norm_epsilon": self._norm_epsilon,
"inner_dropout": self._inner_dropout,
"attention_initializer": tf.keras.initializers.serialize(
self._attention_initializer
),
"attention_axes": self._attention_axes,
"use_query_residual": self._use_query_residual,
"key_dim": self._key_dim,
"value_dim": self._value_dim,
"output_last_dim": self._output_last_dim,
"diff_q_kv_att_layer_norm": self._diff_q_kv_att_layer_norm,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs: Any, output_range: Optional[tf.Tensor] = None) -> Any:
"""Transformer self-attention encoder block call.
Args:
inputs: a single tensor or a list of tensors. `input tensor` as the single
sequence of embeddings. [`input tensor`, `attention mask`] to have the
additional attention mask. [`query tensor`, `key value tensor`,
`attention mask`] to have separate input streams for the query, and
key/value to the multi-head attention.
output_range: the sequence output range, [0, output_range) for slicing the
target sequence. `None` means the target sequence is not sliced. If you
would like to have no change to the model training, it is better to only
set the `output_range` for serving.
Returns:
An output tensor with the same dimensions as input/query tensor.
"""
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
input_tensor, attention_mask = inputs
key_value = None
elif len(inputs) == 3:
input_tensor, key_value, attention_mask = inputs
else:
raise ValueError(
"Unexpected inputs to %s with length at %d"
% (self.__class__, len(inputs))
)
else:
input_tensor, key_value, attention_mask = (inputs, None, None)
if output_range:
if self._norm_first:
source_tensor = input_tensor[:, 0:output_range, :]
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm_kv(key_value)
target_tensor = input_tensor[:, 0:output_range, :]
if attention_mask is not None:
attention_mask = attention_mask[:, 0:output_range, :]
else:
if self._norm_first:
source_tensor = input_tensor
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm_kv(key_value)
target_tensor = input_tensor
if key_value is None:
key_value = input_tensor
## Low Rank Projection Here
key = self._key_projection(key_value)
value = self._value_projection(input_tensor)
## Low Rank Projection Done
if self._return_attention_scores:
attention_output, attention_scores = self._attention_layer(
query=target_tensor,
key=key,
value=value,
attention_mask=attention_mask,
return_attention_scores=True,
)
else:
attention_output = self._attention_layer(
query=target_tensor,
key=key,
value=value,
attention_mask=attention_mask,
)
attention_output = self._attention_dropout(attention_output)
if self._norm_first:
# Important to not combine `self._norm_first` and
# `self._use_query_residual` into one if clause because else is only for
# `_norm_first == False`.
if self._use_query_residual:
attention_output = source_tensor + attention_output
else:
if self._use_query_residual:
attention_output = target_tensor + attention_output
attention_output = self._attention_layer_norm(attention_output)
if self._norm_first:
source_attention_output = attention_output
attention_output = self._output_layer_norm(attention_output)
inner_output = self._intermediate_dense(attention_output)
inner_output = self._intermediate_activation_layer(inner_output)
inner_output = self._inner_dropout_layer(inner_output)
layer_output = self._output_dense(inner_output)
layer_output = self._output_dropout(layer_output)
if self._norm_first:
layer_output = source_attention_output + layer_output
else:
# During mixed precision training, layer norm output is always fp32 for
# now. Casts fp32 for the subsequent add.
layer_output = tf.cast(layer_output, tf.float32)
layer_output = self._output_layer_norm(layer_output + attention_output)
if self._return_attention_scores:
return layer_output, attention_scores
else:
return layer_output
| 18,587 | 39.496732 | 80 | py |
models | models-master/official/projects/lra/moving_average_gated_attention.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based MegaEncoder block layer."""
from typing import Any
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.lra.exponential_moving_average import MultiHeadEMA
def get_activation_fn(activation):
## Helper Function for Activation
if activation == "silu":
return tf.nn.silu
elif activation == "softmax":
return tf.nn.softmax
else:
raise NotImplementedError
return
class RelativePositionBias(tf.keras.layers.Layer):
"""Relative position embedding layer with bias."""
def __init__(self, max_positions):
super().__init__()
self.max_positions = max_positions
def build(self, input_shape):
gauss_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.02)
self.rel_pos_bias = tf.Variable(
gauss_init(shape=[2 * self.max_positions - 1], dtype=tf.float32),
trainable=True,
)
def call(self, seq_len):
if seq_len is None:
seq_len = self.max_positions
seq_len = tf.get_static_value(seq_len)
# seq_len * 2 -1
b = self.rel_pos_bias[
(self.max_positions - seq_len) : (self.max_positions + seq_len - 1)
]
# seq_len * 3 - 1
t = tf.pad(b, paddings=tf.constant([[0, seq_len]]))
# (seq_len * 3 - 1) * seq_len
t = tf.tile(t, (seq_len,))
t = t[:-seq_len]
# seq_len x (3 * seq_len - 2)
t = tf.reshape(t, shape=(seq_len, 3 * seq_len - 2))
r = (2 * seq_len - 1) // 2
start = r
end = t.shape[1] - r
t = t[:, start:end]
return t
class MovingAverageGatedAttention(tf.keras.layers.Layer):
"""MegaEncoderBlock layer.
This layer implements the Mega Encoder from
"Mega: Moving Average Equipped Gated Attention".
(https://arxiv.org/abs/2209.10655)
"""
def __init__(
self,
embed_dim,
zdim,
hdim,
ndim,
intermediate_size,
inner_activation=None,
dropout=0.0,
attention_dropout=0.0,
hidden_dropout=0.0,
activation="silu",
bidirectional=False,
truncation=None,
prenorm=True,
max_positions=1024,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
attention_initializer=None,
attention_axes=None,
return_attention_scores=False,
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
):
self.embed_dim = embed_dim
self.hdim = hdim
self.zdim = zdim
self.ndim = ndim
self.inner_dim = intermediate_size
self.activation = get_activation_fn(activation=activation)
self.inner_activation = inner_activation
self.scaling = self.zdim**-0.5
self.dropout = tf.keras.layers.Dropout(rate=dropout)
self.hidden_dropout = tf.keras.layers.Dropout(rate=hidden_dropout)
self.attention_dropout_rate = attention_dropout
self.attention_dropout = tf.keras.layers.Dropout(rate=attention_dropout)
self.ffn_intermediate_dropout = tf.keras.layers.Dropout(rate=hidden_dropout)
self.output_dropout = tf.keras.layers.Dropout(rate=hidden_dropout)
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
if attention_initializer:
self._attention_initializer = tf.keras.initializers.get(
attention_initializer
)
else:
self._attention_initializer = tf_utils.clone_initializer(
self._kernel_initializer
)
self._attention_axes = attention_axes
self._use_bias = use_bias
self.return_attention_scores = return_attention_scores
self.prenorm = prenorm
self.norm = tf.keras.layers.LayerNormalization(axis=-1)
self.ffn_norm = tf.keras.layers.LayerNormalization(axis=-1)
self.move = MultiHeadEMA(
embed_dim, ndim=ndim, bidirectional=bidirectional, truncation=truncation
)
self.max_positions = max_positions
super().__init__()
def build(self, input_shape):
gauss_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.02)
zero_init = tf.keras.initializers.Zeros()
self.v_proj = tf.keras.layers.Dense(
self.hdim,
activation=None,
use_bias=True,
kernel_initializer=tf_utils.clone_initializer(gauss_init),
bias_initializer=tf_utils.clone_initializer(zero_init),
name="v_proj",
)
self.mx_proj = tf.keras.layers.Dense(
self.zdim + self.hdim + 2 * self.embed_dim,
activation=None,
use_bias=True,
kernel_initializer=tf_utils.clone_initializer(gauss_init),
bias_initializer=tf_utils.clone_initializer(zero_init),
name="mx_proj",
)
self.h_proj = tf.keras.layers.Dense(
self.embed_dim,
activation=None,
use_bias=True,
kernel_initializer=tf_utils.clone_initializer(gauss_init),
bias_initializer=tf_utils.clone_initializer(zero_init),
name="h_proj",
)
self._intermediate_dense = tf.keras.layers.Dense(
self.inner_dim, use_bias=True
)
self._output_dense = tf.keras.layers.Dense(self.embed_dim, use_bias=True)
policy = tf.keras.mixed_precision.global_policy()
self._intermediate_activation_layer = tf.keras.layers.Activation(
self.inner_activation, dtype=policy
)
self.gamma = tf.Variable(
gauss_init(shape=[2, self.zdim], dtype=tf.float32), trainable=True
)
self.beta = tf.Variable(
zero_init(shape=[2, self.zdim], dtype=tf.float32), trainable=True
)
self.rel_pos_bias = RelativePositionBias(max_positions=self.max_positions)
super().build(input_shape)
def get_config(self):
base_config = super().get_config()
base_config.update({
"embed_dim": self.embed_dim,
"zdim": self.zdim,
"hdim": self.hdim,
"dropout": self.dropout,
"attention_dropout": self.attention_dropout_rate,
"kernel_initializer": tf.keras.initializers.serialize(
self._kernel_initializer
),
"bias_initializer": tf.keras.initializers.serialize(
self._bias_initializer
),
"use_bias": self._use_bias,
"prenorm": self.prenorm,
"max_positions": self.max_positions,
"attention_initializer": tf.keras.initializers.serialize(
self._attention_initializer
),
"attention_axes": self._attention_axes,
"return_attention_scores": self.return_attention_scores,
})
return base_config
def _softmax_attention(self, q, k):
slen = k.shape[1]
# C x C
if slen is None:
slen = 2
bias = self.rel_pos_bias(slen)
# scaled attention
q = q * self.scaling
# B x K x C x C
qk = tf.matmul(q, tf.transpose(k, perm=(0, 2, 1))) + bias
attn_weights = tf.nn.softmax(qk, axis=-1)
return attn_weights
def call(self, inputs: Any) -> Any:
"""MEGA encoder block call.
Args:
inputs: a single tensor or a list of tensors. `input tensor`
as the single sequence of embeddings. [`input tensor`,
`attention mask`] to have the
additional attention mask. [`query tensor`, `key value tensor`,
`attention mask`] to have separate input streams for the query, and
key/value to the multi-head attention.
Returns:
An output tensor with the same dimensions as input/query tensor.
"""
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
(input_tensor, attention_mask) = inputs
key_value = None
elif len(inputs) == 3:
(input_tensor, key_value, attention_mask) = inputs
else:
raise ValueError(
"Unexpected inputs to %s with length at %d"
% (self.__class__, len(inputs))
)
else:
(input_tensor, key_value, attention_mask) = (inputs, None, None)
if self.prenorm:
input_tensor = self.norm(input_tensor)
if key_value is not None:
key_value = self.norm(key_value)
## B*L*D -> L*B*D
## Multi-Dimensional Damped EMA
x = tf.transpose(input_tensor, perm=[1, 0, 2])
residual = x
seq_len, bsz, _ = x.shape
# L x B x E
v = self.activation(self.v_proj(x))
# L x B x D
mx = self.move(x, attention_mask)
mx = self.dropout(mx)
# L x B x D -> L x B x (2*D+S+E)
base = self.mx_proj(mx)
u, zr, hx = tf.split(
base, [self.embed_dim, self.zdim + self.hdim, self.embed_dim], axis=-1
)
# L x B x D
u = tf.math.sigmoid(u)
# L x B x (E+S)
z, r = tf.split(tf.nn.silu(zr), [self.zdim, self.hdim], axis=-1)
# L x B x S -> L x B x 1 x S -> L x B x 2 x S
z = tf.expand_dims(z, axis=2) * self.gamma + self.beta
# L x B x 2 x S -> L x B x S
q, k = tf.unstack(z, axis=2)
# L x B x D -> B x L x D
q = tf.transpose(q, perm=(1, 0, 2))
k = tf.transpose(k, perm=(1, 0, 2))
# L x B x E -> B x L x E
v = tf.transpose(v, perm=(1, 0, 2))
attn_weights = self._softmax_attention(q, k)
v = self.hidden_dropout(v)
kernel = tf.squeeze(self.attention_dropout(attn_weights))
# B x K x C x E -> B x L x E -> L x B x E
h = tf.transpose(
tf.reshape(
tf.linalg.matmul(kernel, v), shape=(bsz, seq_len, self.hdim)
),
perm=(1, 0, 2),
)
# L x B x E -> L x B x D
h = self.activation(hx + self.h_proj(h * r))
h = self.dropout(h)
# L x B x D
out = residual + tf.math.multiply(u, h - residual)
if not self.prenorm:
out = self.norm(out)
out = tf.transpose(out, perm=(1, 0, 2))
if self.prenorm:
out = self.ffn_norm(out)
inner_output = self._intermediate_dense(out)
inner_output = self._intermediate_activation_layer(inner_output)
inner_output = self.ffn_intermediate_dropout(inner_output)
layer_output = self._output_dense(inner_output)
layer_output = self.output_dropout(layer_output) + out
if not self.prenorm:
layer_output = self.ffn_norm(layer_output)
return layer_output
| 11,011 | 30.373219 | 80 | py |
models | models-master/official/projects/lra/transformer_encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer encoder."""
# pylint: disable=g-classes-have-attributes
from typing import Any, Callable, Optional, Union
from absl import logging
import tensorflow as tf
import tensorflow_models as tfm
from official.modeling import tf_utils
layers = tfm.nlp.layers
_Initializer = Union[str, tf.keras.initializers.Initializer]
_approx_gelu = lambda x: tf.keras.activations.gelu(x, approximate=True)
class TransformerEncoder(tf.keras.layers.Layer):
"""TransformerEncoder.
Args:
vocab_size: The size of the token vocabulary.
pad_token_id: the token id for the pad token
hidden_size: The size of the transformer hidden layers.
num_layers: The number of transformer layers.
num_attention_heads: The number of attention heads for each transformer. The
hidden size must be divisible by the number of attention heads.
max_sequence_length: The maximum sequence length that this encoder can
consume. If None, max_sequence_length uses the value from sequence length.
This determines the variable shape for positional embeddings.
type_vocab_size: The number of types that the 'type_ids' input can take.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network for each transformer.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network for each transformer.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: The dropout rate to use for the attention layers within
the transformer layers.
initializer: The initialzer to use for all weights in this encoder.
output_range: The sequence output range, [0, output_range), by slicing the
target sequence of the last transformer layer. `None` means the entire
target sequence will attend to the source sequence, which yields the full
output.
embedding_width: The width of the word embeddings. If the embedding width is
not equal to hidden size, embedding parameters will be factorized into two
matrices in the shape of ['vocab_size', 'embedding_width'] and
['embedding_width', 'hidden_size'] ('embedding_width' is usually much
smaller than 'hidden_size').
embedding_layer: An optional Layer instance which will be called to generate
embeddings for the input word IDs.
norm_first: Whether to normalize inputs to attention and intermediate dense
layers. If set False, output of attention and intermediate dense layers is
normalized.
"""
def __init__(
self,
vocab_size: int,
hidden_size: int = 768,
num_layers: int = 12,
num_attention_heads: int = 12,
max_sequence_length: int = 512,
type_vocab_size: int = 16,
inner_dim: int = 3072,
inner_activation: Callable[..., Any] = _approx_gelu,
output_dropout: float = 0.1,
attention_dropout: float = 0.1,
initializer: _Initializer = tf.keras.initializers.TruncatedNormal(
stddev=0.02
),
output_range: Optional[int] = None,
embedding_width: Optional[int] = None,
embedding_layer: Optional[tf.keras.layers.Layer] = None,
norm_first: bool = False,
**kwargs
):
super().__init__(**kwargs)
activation = tf.keras.activations.get(inner_activation)
initializer = tf.keras.initializers.get(initializer)
if embedding_width is None:
embedding_width = hidden_size
if embedding_layer is None:
self._embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=initializer,
name='word_embeddings',
)
else:
self._embedding_layer = embedding_layer
self._position_embedding_layer = layers.PositionEmbedding(
initializer=initializer,
max_length=max_sequence_length,
name='position_embedding',
)
self._type_embedding_layer = layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=initializer,
use_one_hot=True,
name='type_embeddings',
)
self._embedding_norm_layer = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32
)
self._embedding_dropout = tf.keras.layers.Dropout(
rate=output_dropout, name='embedding_dropout'
)
# We project the 'embedding' output to 'hidden_size' if it is not already
# 'hidden_size'.
self._embedding_projection = None
if embedding_width != hidden_size:
self._embedding_projection = tf.keras.layers.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
kernel_initializer=initializer,
name='embedding_projection',
)
self._transformer_layers = []
self._attention_mask_layer = layers.SelfAttentionMask(
name='self_attention_mask'
)
for i in range(num_layers):
layer = layers.TransformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=inner_dim,
inner_activation=inner_activation,
output_dropout=output_dropout,
attention_dropout=attention_dropout,
norm_first=norm_first,
return_attention_scores=False,
kernel_initializer=tf_utils.clone_initializer(initializer),
name='transformer/layer_%d' % i,
)
self._transformer_layers.append(layer)
self._num_layers = num_layers
self._pooler_layer = tf.keras.layers.Dense(
units=hidden_size,
activation='tanh',
kernel_initializer=initializer,
name='pooler_transform',
)
self._config = {
'vocab_size': vocab_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'num_attention_heads': num_attention_heads,
'max_sequence_length': max_sequence_length,
'type_vocab_size': type_vocab_size,
'inner_dim': inner_dim,
'inner_activation': tf.keras.activations.serialize(activation),
'output_dropout': output_dropout,
'attention_dropout': attention_dropout,
'initializer': tf.keras.initializers.serialize(initializer),
'output_range': output_range,
'embedding_width': embedding_width,
'embedding_layer': embedding_layer,
'norm_first': norm_first,
}
self.inputs = dict(
input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
)
def call(self, inputs):
word_embeddings = None
if isinstance(inputs, dict):
if 'input_word_ids' in inputs.keys():
word_ids = inputs.get('input_word_ids')
mask = inputs.get('input_mask')
type_ids = inputs.get('input_type_ids', None)
word_embeddings = inputs.get('input_word_embeddings', None)
elif 'left_word_ids' in inputs.keys():
word_ids = inputs.get('left_word_ids')
mask = inputs.get('left_mask')
elif 'right_word_ids' in inputs.keys():
word_ids = inputs.get('right_word_ids')
mask = inputs.get('right_mask')
dense_inputs = inputs.get('dense_inputs', None)
dense_mask = inputs.get('dense_mask', None)
dense_type_ids = inputs.get('dense_type_ids', None)
elif isinstance(inputs, list):
## Dual Encoder Tasks
word_ids, mask = inputs
type_ids = None
dense_inputs, dense_mask, dense_type_ids = None, None, None
else:
raise ValueError('Unexpected inputs type to %s.' % self.__class__)
if type_ids is None:
type_ids = tf.zeros_like(mask)
if word_embeddings is None:
word_embeddings = self._embedding_layer(word_ids)
if dense_inputs is not None:
mask = tf.concat([mask, dense_mask], axis=1)
embeddings = self._get_embeddings(
word_ids, type_ids, word_embeddings, dense_inputs, dense_type_ids
)
embeddings = self._embedding_norm_layer(embeddings)
embeddings = self._embedding_dropout(embeddings)
if self._embedding_projection is not None:
embeddings = self._embedding_projection(embeddings)
attention_mask = self._attention_mask_layer(embeddings, mask)
encoder_outputs = []
x = embeddings
for layer in self._transformer_layers:
x = layer([x, attention_mask])
encoder_outputs.append(x)
last_encoder_output = encoder_outputs[-1]
first_token_tensor = last_encoder_output[:, 0, :]
pooled_output = self._pooler_layer(first_token_tensor)
output = dict(
sequence_output=encoder_outputs[-1],
pooled_output=pooled_output,
encoder_outputs=encoder_outputs,
)
return output
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_embedding_layer(self):
return self._embedding_layer
def get_config(self):
return dict(self._config)
@property
def transformer_layers(self):
"""List of Transformer layers in the encoder."""
return self._transformer_layers
@property
def pooler_layer(self):
"""The pooler dense layer after the transformer layers."""
return self._pooler_layer
@classmethod
def from_config(cls, config, custom_objects=None):
if 'embedding_layer' in config and config['embedding_layer'] is not None:
warn_string = (
'You are reloading a model that was saved with a '
'potentially-shared embedding layer object. If you contine to '
'train this model, the embedding layer will no longer be shared. '
'To work around this, load the model outside of the Keras API.'
)
print('WARNING: ' + warn_string)
logging.warn(warn_string)
return cls(**config)
def _get_embeddings(
self,
word_ids: tf.Tensor,
type_ids: tf.Tensor,
word_embeddings: Optional[tf.Tensor],
dense_inputs: Optional[tf.Tensor],
dense_type_ids: Optional[tf.Tensor],
) -> tf.Tensor:
if word_embeddings is None:
word_embeddings = self._embedding_layer(word_ids)
if dense_inputs is not None:
# Concat the dense embeddings at sequence end.
word_embeddings = tf.concat([word_embeddings, dense_inputs], axis=1)
type_ids = tf.concat([type_ids, dense_type_ids], axis=1)
type_embeddings = self._type_embedding_layer(type_ids)
# absolute position embeddings.
position_embeddings = self._position_embedding_layer(word_embeddings)
return word_embeddings + position_embeddings + type_embeddings
| 11,272 | 35.247588 | 80 | py |
models | models-master/official/projects/lra/lra_dual_encoder_task.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trainer network for dual encoder style models."""
# pylint: disable=g-classes-have-attributes
import dataclasses
from typing import List, Union, Optional
from absl import logging
import numpy as np
import orbit
from scipy import stats
from sklearn import metrics as sklearn_metrics
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.nlp.data import data_loader_factory
from official.nlp.tasks import utils
from official.projects.lra import lra_dual_encoder
METRIC_TYPES = frozenset(
['accuracy', 'f1', 'matthews_corrcoef', 'pearson_spearman_corr']
)
@dataclasses.dataclass
class ModelConfig(base_config.Config):
"""A classifier/regressor configuration."""
num_classes: int = 2
use_encoder_pooler: bool = False
encoder: encoders.EncoderConfig = encoders.EncoderConfig()
max_seq_length: int = 512
@dataclasses.dataclass
class DualEncoderConfig(cfg.TaskConfig):
"""The model config."""
# At most one of `init_checkpoint` and `hub_module_url` can
# be specified.
init_checkpoint: str = ''
init_cls_pooler: bool = False
hub_module_url: str = ''
metric_type: str = 'accuracy'
# Defines the concrete model config at instantiation time.
model: ModelConfig = ModelConfig()
train_data: cfg.DataConfig = cfg.DataConfig()
validation_data: cfg.DataConfig = cfg.DataConfig()
@task_factory.register_task_cls(DualEncoderConfig)
class DualEncoderTask(base_task.Task):
"""Task object for DualEncoderTask."""
def __init__(self, params: cfg.TaskConfig, logging_dir=None, name=None):
super().__init__(params, logging_dir, name=name)
if params.metric_type not in METRIC_TYPES:
raise ValueError('Invalid metric_type: {}'.format(params.metric_type))
self.metric_type = params.metric_type
if hasattr(params.train_data, 'label_field'):
self.label_field = params.train_data.label_field
else:
self.label_field = 'label_ids'
def build_model(self):
if self.task_config.hub_module_url and self.task_config.init_checkpoint:
raise ValueError(
'At most one of `hub_module_url` and '
'`init_checkpoint` can be specified.'
)
if self.task_config.hub_module_url:
encoder_network = utils.get_encoder_from_hub(
self.task_config.hub_module_url
)
else:
encoder_network = encoders.build_encoder(self.task_config.model.encoder)
encoder_cfg = self.task_config.model.encoder.get()
return lra_dual_encoder.LRADualEncoder(
network=encoder_network,
max_seq_length=self.task_config.model.max_seq_length,
num_classes=self.task_config.model.num_classes,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range
),
use_encoder_pooler=self.task_config.model.use_encoder_pooler,
inner_dim=encoder_cfg.hidden_size * 2,
)
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
label_ids = labels[self.label_field]
if self.task_config.model.num_classes == 1:
loss = tf.keras.losses.mean_squared_error(label_ids, model_outputs)
else:
loss = tf.keras.losses.sparse_categorical_crossentropy(
label_ids, tf.cast(model_outputs, tf.float32), from_logits=True
)
if aux_losses:
loss += tf.add_n(aux_losses)
return tf_utils.safe_mean(loss)
def build_inputs(self, params, input_context=None):
"""Returns tf.data.Dataset for sentence_prediction task."""
if params.input_path == 'dummy':
def dummy_data(_):
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
x = dict(
left_word_ids=dummy_ids,
left_mask=dummy_ids,
right_word_ids=dummy_ids,
right_mask=dummy_ids,
)
if self.task_config.model.num_classes == 1:
y = tf.zeros((1,), dtype=tf.float32)
else:
y = tf.zeros((1, 1), dtype=tf.int32)
x[self.label_field] = y
return x
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE
)
return dataset
return data_loader_factory.get_data_loader(params).load(input_context)
def build_metrics(self, training=None):
del training
if self.task_config.model.num_classes == 1:
metrics = [tf.keras.metrics.MeanSquaredError()]
elif self.task_config.model.num_classes == 2:
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='cls_accuracy'),
tf.keras.metrics.AUC(name='auc', curve='PR'),
]
else:
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='cls_accuracy'),
]
return metrics
def process_metrics(self, metrics, labels, model_outputs):
for metric in metrics:
if metric.name == 'auc':
# Convert the logit to probability and extract the probability of True..
metric.update_state(
labels[self.label_field],
tf.expand_dims(tf.nn.softmax(model_outputs)[:, 1], axis=1),
)
if metric.name == 'cls_accuracy':
metric.update_state(labels[self.label_field], model_outputs)
def process_compiled_metrics(self, compiled_metrics, labels, model_outputs):
compiled_metrics.update_state(labels[self.label_field], model_outputs)
def validation_step(self, inputs, model: tf.keras.Model, metrics=None):
features, labels = inputs, inputs
outputs = self.inference_step(features, model)
loss = self.build_losses(
labels=labels, model_outputs=outputs, aux_losses=model.losses
)
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, labels, outputs)
if model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in metrics or []})
logs.update({m.name: m.result() for m in model.metrics})
if self.metric_type == 'matthews_corrcoef':
logs.update({
'sentence_prediction': (
tf.expand_dims( # Ensure one prediction along batch dimension.
tf.math.argmax(outputs, axis=1), axis=1
)
),
'labels': labels[self.label_field],
})
else:
logs.update({
'sentence_prediction': outputs,
'labels': labels[self.label_field],
})
return logs
def aggregate_logs(self, state=None, step_outputs=None):
if self.metric_type == 'accuracy':
return None
if state is None:
state = {'sentence_prediction': [], 'labels': []}
state['sentence_prediction'].append(
np.concatenate(
[v.numpy() for v in step_outputs['sentence_prediction']], axis=0
)
)
state['labels'].append(
np.concatenate([v.numpy() for v in step_outputs['labels']], axis=0)
)
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
if self.metric_type == 'accuracy':
return None
preds = np.concatenate(aggregated_logs['sentence_prediction'], axis=0)
labels = np.concatenate(aggregated_logs['labels'], axis=0)
if self.metric_type == 'f1':
preds = np.argmax(preds, axis=1)
return {self.metric_type: sklearn_metrics.f1_score(labels, preds)}
elif self.metric_type == 'matthews_corrcoef':
preds = np.reshape(preds, -1)
labels = np.reshape(labels, -1)
return {
self.metric_type: sklearn_metrics.matthews_corrcoef(preds, labels)
}
elif self.metric_type == 'pearson_spearman_corr':
preds = np.reshape(preds, -1)
labels = np.reshape(labels, -1)
pearson_corr = stats.pearsonr(preds, labels)[0]
spearman_corr = stats.spearmanr(preds, labels)[0]
corr_metric = (pearson_corr + spearman_corr) / 2
return {self.metric_type: corr_metric}
def initialize(self, model):
"""Load a pretrained checkpoint (if exists) and then train from iter 0."""
ckpt_dir_or_file = self.task_config.init_checkpoint
logging.info(
'Trying to load pretrained checkpoint from %s', ckpt_dir_or_file
)
if ckpt_dir_or_file and tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
if not ckpt_dir_or_file:
logging.info(
'No checkpoint file found from %s. Will not load.', ckpt_dir_or_file
)
return
pretrain2finetune_mapping = {
'encoder': model.checkpoint_items['encoder'],
}
if self.task_config.init_cls_pooler:
# This option is valid when use_encoder_pooler is false.
pretrain2finetune_mapping['next_sentence.pooler_dense'] = (
model.checkpoint_items['sentence_prediction.pooler_dense']
)
ckpt = tf.train.Checkpoint(**pretrain2finetune_mapping)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info(
'Finished loading pretrained checkpoint from %s', ckpt_dir_or_file
)
def predict(
task: DualEncoderTask,
params: cfg.DataConfig,
model: tf.keras.Model,
params_aug: Optional[cfg.DataConfig] = None,
test_time_aug_wgt: float = 0.3,
) -> List[Union[int, float]]:
"""Predicts on the input data.
Args:
task: A `DualEncoderTask` object.
params: A `cfg.DataConfig` object.
model: A keras.Model.
params_aug: A `cfg.DataConfig` object for augmented data.
test_time_aug_wgt: Test time augmentation weight. The prediction score will
use (1. - test_time_aug_wgt) original prediction plus test_time_aug_wgt
augmented prediction.
Returns:
A list of predictions with length of `num_examples`. For regression task,
each element in the list is the predicted score; for classification task,
each element is the predicted class id.
"""
def predict_step(inputs):
"""Replicated prediction calculation."""
x = inputs
example_id = x.pop('example_id')
outputs = task.inference_step(x, model)
return dict(example_id=example_id, predictions=outputs)
def aggregate_fn(state, outputs):
"""Concatenates model's outputs."""
if state is None:
state = []
for per_replica_example_id, per_replica_batch_predictions in zip(
outputs['example_id'], outputs['predictions']
):
state.extend(zip(per_replica_example_id, per_replica_batch_predictions))
return state
dataset = orbit.utils.make_distributed_dataset(
tf.distribute.get_strategy(), task.build_inputs, params
)
outputs = utils.predict(predict_step, aggregate_fn, dataset)
# When running on TPU POD, the order of output cannot be maintained,
# so we need to sort by example_id.
outputs = sorted(outputs, key=lambda x: x[0])
is_regression = task.task_config.model.num_classes == 1
if params_aug is not None:
dataset_aug = orbit.utils.make_distributed_dataset(
tf.distribute.get_strategy(), task.build_inputs, params_aug
)
outputs_aug = utils.predict(predict_step, aggregate_fn, dataset_aug)
outputs_aug = sorted(outputs_aug, key=lambda x: x[0])
if is_regression:
return [
(1.0 - test_time_aug_wgt) * x[1] + test_time_aug_wgt * y[1]
for x, y in zip(outputs, outputs_aug)
]
else:
return [
tf.argmax(
(1.0 - test_time_aug_wgt) * x[1] + test_time_aug_wgt * y[1],
axis=-1,
)
for x, y in zip(outputs, outputs_aug)
]
if is_regression:
return [x[1] for x in outputs]
else:
return [tf.argmax(x[1], axis=-1) for x in outputs]
| 12,471 | 34.634286 | 80 | py |
models | models-master/official/projects/lra/exponential_moving_average.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based MegaEncoder block layer."""
from typing import Optional
import tensorflow as tf
class MultiHeadEMA(tf.keras.layers.Layer):
"""Exponential Moving Average Layer.
See "https://arxiv.org/abs/2209.10655" for more details.
"""
def __init__(
self, embed_dim, ndim=2, bidirectional=False, truncation=None, **kwargs
):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.ndim = ndim
self.bidirectional = bidirectional
self.truncation = truncation
self.scale = tf.math.sqrt(1.0 / self.ndim)
self.kernel_dim = 2 * embed_dim if self.bidirectional else embed_dim
self._kernel = None
self._coeffs = None
def build(self, input_shape):
self.damping_factor = self.add_weight(
shape=(self.kernel_dim, self.ndim, 1),
initializer="random_normal",
trainable=True,
name="damping_factor",
dtype=tf.float32,
)
self.decay_factor = self.add_weight(
shape=(self.kernel_dim, self.ndim, 1),
initializer="random_normal",
trainable=True,
name="decay_factor",
dtype=tf.float32,
)
self.ema_expansion_matrix = self.add_weight(
shape=(self.kernel_dim, self.ndim, 1),
initializer="random_normal",
trainable=True,
name="ema_expansion_matrix",
dtype=tf.float32,
)
self.kernel_projection_matrix = self.add_weight(
shape=(self.kernel_dim, self.ndim),
initializer="random_normal",
trainable=True,
name="kernel_projection_matrix",
dtype=tf.float32,
)
self.residual_weight = self.add_weight(
shape=(self.embed_dim,),
initializer="ones",
trainable=True,
name="residual_weight",
dtype=tf.float32,
)
super().build(input_shape)
def _calc_coeffs(self):
self._coeffs = None
# D x N x 1
damping_factor = tf.math.sigmoid(self.damping_factor)
decay_factor = tf.math.sigmoid(self.decay_factor)
previous_timestep_weight = 1.0 - damping_factor * decay_factor
return damping_factor, previous_timestep_weight
def _compute_kernel(self, length: int):
self._kernel = None
# D x N x 1
damping_factor, previous_timestep_weight = self._calc_coeffs()
# D x N x L
vander = tf.cast(
tf.reshape(tf.range(length), shape=(1, 1, length)),
dtype=damping_factor.dtype,
) * tf.math.log(previous_timestep_weight)
kernel = (damping_factor * self.ema_expansion_matrix) * tf.math.exp(vander)
# D x L
return tf.einsum(
"dnl,dn->dl", kernel, self.kernel_projection_matrix * self.scale
)
def coeffs(self):
if self.training:
return self._calc_coeffs()
else:
if self._coeffs is None:
self._coeffs = self._calc_coeffs()
return self._coeffs
def kernel(self, length: int):
assert self.truncation is None, "WEIRD!"
kernel_size = (
length if self.truncation is None else min(self.truncation, length)
)
return self._compute_kernel(kernel_size)
def call(self, x, padding_mask: Optional[tf.Tensor] = None) -> tf.Tensor:
"""Input shape: Time x Batch x Channel.
Args:
x: Tensor input.
padding_mask (ByteTensor, optional): mask to exclude keys that are pads,
of shape `(batch, src_len)`, where padding elements are indicated by
1s.
Returns:
transformed: transformed Tensor.
"""
seq_len, _, embed_dim = x.shape
assert embed_dim == self.embed_dim
if seq_len is None:
seq_len = 1
# L x B x D
residual = x * self.residual_weight
# L x B x D -> B x D x L
x = tf.transpose(x, perm=(1, 2, 0))
# Masking of the tensor
if padding_mask is not None:
x = x * tf.cast(tf.expand_dims(padding_mask, axis=1), x.dtype)
k = self.kernel(seq_len)
kernel_size = k.shape[1]
fft_len = seq_len
s = 0
if self.bidirectional:
k1, k2 = tf.split(k, [self.embed_dim, self.embed_dim], axis=0)
# D x 2*L-1
padding_l = tf.constant([[0, 0], [kernel_size - 1, 0]])
padding_r = tf.constant([[0, 0], [0, kernel_size - 1]])
padding_x = tf.constant([[0, 0], [0, 0], [kernel_size - 1, 0]])
k = tf.pad(k1, padding_l) + tf.pad(tf.reverse(k2, axis=[-1]), padding_r)
x = tf.pad(x, padding_x)
fft_len = fft_len + kernel_size - 1
s = 2 * kernel_size - 2
k_f = tf.signal.rfft(
k, fft_length=tf.constant([2 * fft_len], dtype=tf.int32)
)
x_f = tf.signal.rfft(
x, fft_length=tf.constant([2 * fft_len], dtype=tf.int32)
)
# B x D x L
out = tf.signal.irfft(
x_f * k_f, fft_length=tf.constant([2 * fft_len], dtype=tf.int32)
)[..., s : s + seq_len]
# B x D x L -> L x B x D
out = tf.nn.silu(tf.transpose(out, perm=(2, 0, 1)) + residual)
return out
| 5,446 | 29.601124 | 79 | py |
models | models-master/official/projects/lra/linformer_encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linformer encoder. Modified From huggingface/transformers."""
# pylint: disable=g-classes-have-attributes
from typing import Any, Callable, Optional, Union
from absl import logging
import tensorflow as tf
import tensorflow_models as tfm
from official.modeling import tf_utils
from official.projects.lra.linformer_encoder_block import LinformerEncoderBlock
layers = tfm.nlp.layers
_Initializer = Union[str, tf.keras.initializers.Initializer]
_approx_gelu = lambda x: tf.keras.activations.gelu(x, approximate=True)
class LinformerEncoder(tf.keras.layers.Layer):
"""LinformerEncoder.
Args:
vocab_size: The size of the token vocabulary.
hidden_size: The size of the transformer hidden layers.
num_layers: The number of transformer layers.
num_attention_heads: The number of attention heads for each transformer. The
hidden size must be divisible by the number of attention heads.
low_rank_features: The number of dimensions for low rank projection.
max_sequence_length: The maximum sequence length that this encoder can
consume. If None, max_sequence_length uses the value from sequence length.
This determines the variable shape for positional embeddings.
type_vocab_size: The number of types that the 'type_ids' input can take.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network for each transformer.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network for each transformer.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: The dropout rate to use for the attention layers within
the transformer layers.
initializer: The initialzer to use for all weights in this encoder.
output_range: The sequence output range, [0, output_range), by slicing the
target sequence of the last transformer layer. `None` means the entire
target sequence will attend to the source sequence, which yields the full
output.
embedding_width: The width of the word embeddings. If the embedding width is
not equal to hidden size, embedding parameters will be factorized into two
matrices in the shape of ['vocab_size', 'embedding_width'] and
['embedding_width', 'hidden_size'] ('embedding_width' is usually much
smaller than 'hidden_size').
embedding_layer: An optional Layer instance which will be called to generate
embeddings for the input word IDs.
norm_first: Whether to normalize inputs to attention and intermediate dense
layers. If set False, output of attention and intermediate dense layers is
normalized.
"""
def __init__(
self,
vocab_size: int,
hidden_size: int = 768,
num_layers: int = 12,
num_attention_heads: int = 12,
low_rank_features: int = 32,
max_sequence_length: int = 512,
type_vocab_size: int = 16,
inner_dim: int = 3072,
inner_activation: Callable[..., Any] = _approx_gelu,
output_dropout: float = 0.1,
attention_dropout: float = 0.1,
initializer: _Initializer = tf.keras.initializers.TruncatedNormal(
stddev=0.02
),
output_range: Optional[int] = None,
embedding_width: Optional[int] = None,
embedding_layer: Optional[tf.keras.layers.Layer] = None,
norm_first: bool = False,
**kwargs
):
super().__init__(**kwargs)
# Linformer args
self._low_rank_features = low_rank_features
activation = tf.keras.activations.get(inner_activation)
initializer = tf.keras.initializers.get(initializer)
if embedding_width is None:
embedding_width = hidden_size
if embedding_layer is None:
self._embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=initializer,
name='word_embeddings',
)
else:
self._embedding_layer = embedding_layer
self._position_embedding_layer = layers.PositionEmbedding(
initializer=initializer,
max_length=max_sequence_length,
name='position_embedding',
)
self._type_embedding_layer = layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=initializer,
use_one_hot=True,
name='type_embeddings',
)
self._embedding_norm_layer = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32
)
self._embedding_dropout = tf.keras.layers.Dropout(
rate=output_dropout, name='embedding_dropout'
)
# We project the 'embedding' output to 'hidden_size' if it is not already
# 'hidden_size'.
self._embedding_projection = None
if embedding_width != hidden_size:
self._embedding_projection = tf.keras.layers.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
kernel_initializer=initializer,
name='embedding_projection',
)
self._transformer_layers = []
self._attention_mask_layer = layers.SelfAttentionMask(
name='self_attention_mask'
)
for i in range(num_layers):
layer = LinformerEncoderBlock(
num_attention_heads=num_attention_heads,
low_rank_features=low_rank_features,
inner_dim=inner_dim,
inner_activation=inner_activation,
output_dropout=output_dropout,
attention_dropout=attention_dropout,
norm_first=norm_first,
return_attention_scores=False,
kernel_initializer=tf_utils.clone_initializer(initializer),
name='transformer/layer_%d' % i,
)
self._transformer_layers.append(layer)
self._num_layers = num_layers
self._pooler_layer = tf.keras.layers.Dense(
units=hidden_size,
activation='tanh',
kernel_initializer=initializer,
name='pooler_transform',
)
self._config = {
'vocab_size': vocab_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'low_rank_features': low_rank_features,
'num_attention_heads': num_attention_heads,
'max_sequence_length': max_sequence_length,
'type_vocab_size': type_vocab_size,
'inner_dim': inner_dim,
'inner_activation': tf.keras.activations.serialize(activation),
'output_dropout': output_dropout,
'attention_dropout': attention_dropout,
'initializer': tf.keras.initializers.serialize(initializer),
'output_range': output_range,
'embedding_width': embedding_width,
'embedding_layer': embedding_layer,
'norm_first': norm_first,
}
self.inputs = dict(
input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
)
def call(self, inputs):
if isinstance(inputs, dict):
word_embeddings = inputs.get('input_word_embeddings', None)
type_ids = inputs.get('input_type_ids', None)
if 'input_word_ids' in inputs.keys():
word_ids = inputs.get('input_word_ids')
mask = inputs.get('input_mask')
elif 'left_word_ids' in inputs.keys():
word_ids = inputs.get('left_word_ids')
mask = inputs.get('left_mask')
elif 'right_word_ids' in inputs.keys():
word_ids = inputs.get('right_word_ids')
mask = inputs.get('right_mask')
dense_inputs = inputs.get('dense_inputs', None)
dense_mask = inputs.get('dense_mask', None)
dense_type_ids = inputs.get('dense_type_ids', None)
elif isinstance(inputs, list):
## Dual Encoder Tasks
word_ids, mask = inputs
word_embeddings = None
type_ids = None
dense_inputs, dense_mask, dense_type_ids = None, None, None
else:
raise ValueError('Unexpected inputs type to %s.' % self.__class__)
if type_ids is None:
type_ids = tf.zeros_like(mask)
if word_embeddings is None:
word_embeddings = self._embedding_layer(word_ids)
if dense_inputs is not None:
mask = tf.concat([mask, dense_mask], axis=1)
embeddings = self._get_embeddings(
word_ids, type_ids, word_embeddings, dense_inputs, dense_type_ids
)
embeddings = self._embedding_norm_layer(embeddings)
embeddings = self._embedding_dropout(embeddings)
if self._embedding_projection is not None:
embeddings = self._embedding_projection(embeddings)
attention_mask = self._attention_mask_layer(embeddings, mask)
encoder_outputs = []
x = embeddings
for layer in self._transformer_layers:
x = layer([x, attention_mask])
encoder_outputs.append(x)
last_encoder_output = encoder_outputs[-1]
first_token_tensor = last_encoder_output[:, 0, :]
pooled_output = self._pooler_layer(first_token_tensor)
output = dict(
sequence_output=encoder_outputs[-1],
pooled_output=pooled_output,
encoder_outputs=encoder_outputs,
)
return output
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_embedding_layer(self):
return self._embedding_layer
def get_config(self):
return dict(self._config)
@classmethod
def from_config(cls, config, custom_objects=None):
if 'embedding_layer' in config and config['embedding_layer'] is not None:
warn_string = (
'You are reloading a model that was saved with a '
'potentially-shared embedding layer object. If you contine to '
'train this model, the embedding layer will no longer be shared. '
'To work around this, load the model outside of the Keras API.'
)
print('WARNING: ' + warn_string)
logging.warn(warn_string)
return cls(**config)
def _get_embeddings(
self,
word_ids: tf.Tensor,
type_ids: tf.Tensor,
word_embeddings: Optional[tf.Tensor],
dense_inputs: Optional[tf.Tensor],
dense_type_ids: Optional[tf.Tensor],
) -> tf.Tensor:
if word_embeddings is None:
word_embeddings = self._embedding_layer(word_ids)
if dense_inputs is not None:
# Concat the dense embeddings at sequence end.
word_embeddings = tf.concat([word_embeddings, dense_inputs], axis=1)
type_ids = tf.concat([type_ids, dense_type_ids], axis=1)
type_embeddings = self._type_embedding_layer(type_ids)
# absolute position embeddings.
position_embeddings = self._position_embedding_layer(word_embeddings)
return word_embeddings + position_embeddings + type_embeddings
| 11,332 | 35.915309 | 80 | py |
models | models-master/official/projects/lra/linformer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linformer model configurations and instantiation methods."""
import dataclasses
import tensorflow as tf
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.projects.lra.linformer_encoder import LinformerEncoder
@dataclasses.dataclass
class LinformerEncoderConfig(encoders.BertEncoderConfig):
"""Extra paramerters for Linformer configs.
Attributes:
pad_token_id: the token id for the pad token
low_rank_features: number of dimensions for low-rank projection
"""
pad_token_id: int = 0
low_rank_features: int = 256
@base_config.bind(LinformerEncoderConfig)
def get_encoder(encoder_cfg: LinformerEncoderConfig):
"""Gets a 'LinformerEncoder' object.
Args:
encoder_cfg: A 'LinformerEncoderConfig'.
Returns:
A encoder object.
"""
encoder = LinformerEncoder(
vocab_size=encoder_cfg.vocab_size,
hidden_size=encoder_cfg.hidden_size,
num_layers=encoder_cfg.num_layers,
num_attention_heads=encoder_cfg.num_attention_heads,
low_rank_features=encoder_cfg.low_rank_features,
inner_dim=encoder_cfg.intermediate_size,
inner_activation=tf_utils.get_activation(encoder_cfg.hidden_activation),
output_dropout=encoder_cfg.dropout_rate,
attention_dropout=encoder_cfg.attention_dropout_rate,
max_sequence_length=encoder_cfg.max_position_embeddings,
type_vocab_size=encoder_cfg.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range
),
output_range=encoder_cfg.output_range,
embedding_width=encoder_cfg.embedding_size,
norm_first=encoder_cfg.norm_first,
)
return encoder
| 2,362 | 33.246377 | 78 | py |
models | models-master/official/projects/lra/transformer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Longformer model configurations and instantiation methods."""
import dataclasses
import tensorflow as tf
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.projects.lra.transformer_encoder import TransformerEncoder
@dataclasses.dataclass
class TransformerEncoderConfig(encoders.BertEncoderConfig):
"""Extra paramerters for Transformer configs.
Attributes: For in-place usage only
"""
@base_config.bind(TransformerEncoderConfig)
def get_encoder(encoder_cfg: TransformerEncoderConfig):
"""Gets a 'TransformerEncoder' object.
Args:
encoder_cfg: A 'TransformerEncoderConfig'.
Returns:
A encoder object.
"""
encoder = TransformerEncoder(
vocab_size=encoder_cfg.vocab_size,
hidden_size=encoder_cfg.hidden_size,
num_layers=encoder_cfg.num_layers,
num_attention_heads=encoder_cfg.num_attention_heads,
inner_dim=encoder_cfg.intermediate_size,
inner_activation=tf_utils.get_activation(encoder_cfg.hidden_activation),
output_dropout=encoder_cfg.dropout_rate,
attention_dropout=encoder_cfg.attention_dropout_rate,
max_sequence_length=encoder_cfg.max_position_embeddings,
type_vocab_size=encoder_cfg.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range
),
output_range=encoder_cfg.output_range,
embedding_width=encoder_cfg.embedding_size,
norm_first=encoder_cfg.norm_first,
)
return encoder
| 2,176 | 34.112903 | 78 | py |
models | models-master/official/projects/lra/mega.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mega model configurations and instantiation methods."""
import dataclasses
import tensorflow as tf
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.projects.lra.mega_encoder import MegaEncoder
@dataclasses.dataclass
class MegaEncoderConfig(encoders.BertEncoderConfig):
"""Extra paramerters for Mega configs.
Attributes:
pad_token_id: the token id for the pad token
low_rank_features: number of dimensions for low-rank projection
"""
zdim: int = 64
hdim: int = 256
ndim: int = 16
activation: str = 'silu'
bidirectional: bool = False
dropout: float = 0.0
hidden_dropout: float = 0.0
@base_config.bind(MegaEncoderConfig)
def get_encoder(encoder_cfg: MegaEncoderConfig):
"""Gets a 'MegaEncoder' object.
Args:
encoder_cfg: A 'MegaEncoderConfig'.
Returns:
A encoder object.
"""
encoder = MegaEncoder(
vocab_size=encoder_cfg.vocab_size,
hidden_size=encoder_cfg.hidden_size,
num_layers=encoder_cfg.num_layers,
zdim=encoder_cfg.zdim,
hdim=encoder_cfg.hdim,
ndim=encoder_cfg.ndim,
activation=encoder_cfg.activation,
bidirectional=encoder_cfg.bidirectional,
dropout=encoder_cfg.dropout,
hidden_dropout=encoder_cfg.hidden_dropout,
inner_activation=tf_utils.get_activation(encoder_cfg.hidden_activation),
attention_dropout=encoder_cfg.attention_dropout_rate,
max_sequence_length=encoder_cfg.max_position_embeddings,
type_vocab_size=encoder_cfg.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range
),
output_range=encoder_cfg.output_range,
embedding_width=encoder_cfg.embedding_size,
norm_first=encoder_cfg.norm_first,
)
return encoder
| 2,470 | 31.090909 | 78 | py |
models | models-master/official/projects/lra/mega_encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mega encoder. Modified From huggingface/transformers."""
# pylint: disable=g-classes-have-attributes
from typing import Any, Callable, Optional, Union
from absl import logging
import tensorflow as tf
import tensorflow_models as tfm
from official.modeling import tf_utils
from official.projects.lra.moving_average_gated_attention import MovingAverageGatedAttention
layers = tfm.nlp.layers
_Initializer = Union[str, tf.keras.initializers.Initializer]
_approx_gelu = lambda x: tf.keras.activations.gelu(x, approximate=True)
@tf.keras.utils.register_keras_serializable(package='Text')
class MegaEncoder(tf.keras.layers.Layer):
"""MegaEncoder.
Args:
vocab_size: The size of the token vocabulary.
embedding_width: The number of embedding dimensions.
intermediate_size: The number of dimension for MLP layers.
num_layers: The number of transformer layers.
max_sequence_length: The maximum sequence length that this encoder can
consume. If None, max_sequence_length uses the value from sequence length.
This determines the variable shape for positional embeddings.
type_vocab_size: The number of types that the 'type_ids' input can take.
zdim: hidden dimension for gates used in MEGA Layer.
hdim: hidden dimension used in MEGA Layer.
ndim: number of EMA used in MEGA layer.
activation: The activation for the first Dense layer in a two-layer
feedforward network for each transformer.
bidirectional: Whether to use bidirectional EMA.
dropout: Dropout probability for the post-attention and output dropout.
attention_dropout: The dropout rate to use for the attention layers within
the transformer layers.
hidden_dropout: The dropout rate to use for hidden states in MEGA.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network for each transformer.
initializer: The initialzer to use for all weights in this encoder.
output_range: The sequence output range, [0, output_range), by slicing the
target sequence of the last transformer layer. `None` means the entire
target sequence will attend to the source sequence, which yields the full
output.
embedding_layer: An optional Layer instance which will be called to generate
embeddings for the input word IDs.
norm_first: Whether to normalize inputs to attention and intermediate dense
layers. If set False, output of attention and intermediate dense layers is
normalized.
"""
def __init__(
self,
vocab_size: int,
embedding_width: int = 128,
intermediate_size: int = 256,
num_layers: int = 12,
max_sequence_length: int = 512,
type_vocab_size: int = 16,
zdim: int = 64,
hdim: int = 256,
ndim: int = 16,
activation='silu',
bidirectional=False,
dropout: float = 0.0,
attention_dropout: float = 0.0,
hidden_dropout: float = 0.0,
inner_activation: Callable[..., Any] = _approx_gelu,
initializer: _Initializer = tf.keras.initializers.TruncatedNormal(
stddev=0.02
),
output_range: Optional[int] = None,
embedding_layer: Optional[tf.keras.layers.Layer] = None,
norm_first: bool = False,
hidden_size: Optional[int] = None,
**kwargs
):
super().__init__(**kwargs)
# Mega args
initializer = tf.keras.initializers.get(initializer)
if embedding_layer is None:
self._embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=initializer,
name='word_embeddings',
)
else:
self._embedding_layer = embedding_layer
self._position_embedding_layer = layers.PositionEmbedding(
initializer=initializer,
max_length=max_sequence_length,
name='position_embedding',
)
self._type_embedding_layer = layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=initializer,
use_one_hot=True,
name='type_embeddings',
)
self._embedding_norm_layer = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32
)
self._embedding_dropout = tf.keras.layers.Dropout(
rate=dropout, name='embedding_dropout'
)
self._transformer_layers = []
self._attention_mask_layer = layers.SelfAttentionMask(
name='self_attention_mask'
)
for _ in range(num_layers):
layer = MovingAverageGatedAttention(
embed_dim=embedding_width,
zdim=zdim,
hdim=hdim,
ndim=ndim,
intermediate_size=intermediate_size,
inner_activation=inner_activation,
dropout=dropout,
attention_dropout=attention_dropout,
hidden_dropout=hidden_dropout,
activation=activation,
bidirectional=bidirectional,
prenorm=norm_first,
max_positions=max_sequence_length,
use_bias=True,
return_attention_scores=False,
kernel_initializer=tf_utils.clone_initializer(initializer),
)
self._transformer_layers.append(layer)
self._num_layers = num_layers
self._pooler_layer = tf.keras.layers.Dense(
units=embedding_width,
activation='silu',
kernel_initializer=initializer,
name='pooler_transform',
)
self._config = {
'vocab_size': vocab_size,
'num_layers': num_layers,
'max_sequence_length': max_sequence_length,
'type_vocab_size': type_vocab_size,
'zdim': zdim,
'hdim': hdim,
'ndim': ndim,
'activation': activation,
'bidirectional': bidirectional,
'dropout': dropout,
'attention_dropout': attention_dropout,
'hidden_dropout': hidden_dropout,
'inner_activation': tf.keras.activations.serialize(inner_activation),
'initializer': tf.keras.initializers.serialize(initializer),
'output_range': output_range,
'embedding_width': embedding_width,
'embedding_layer': embedding_layer,
'norm_first': norm_first,
}
self.inputs = dict(
input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
)
def call(self, inputs):
word_embeddings = None
if isinstance(inputs, dict):
if 'input_word_ids' in inputs.keys():
word_ids = inputs.get('input_word_ids')
mask = inputs.get('input_mask')
type_ids = inputs.get('input_type_ids', None)
word_embeddings = inputs.get('input_word_embeddings', None)
elif 'left_word_ids' in inputs.keys():
word_ids = inputs.get('left_word_ids')
mask = inputs.get('left_mask')
elif 'right_word_ids' in inputs.keys():
word_ids = inputs.get('right_word_ids')
mask = inputs.get('right_mask')
dense_inputs = inputs.get('dense_inputs', None)
dense_mask = inputs.get('dense_mask', None)
elif isinstance(inputs, list):
## Dual Encoder Tasks
word_ids, mask = inputs
type_ids = None
dense_inputs, dense_mask = None, None
else:
raise ValueError('Unexpected inputs type to %s.' % self.__class__)
if type_ids is None:
type_ids = tf.zeros_like(mask)
if word_embeddings is None:
word_embeddings = self._embedding_layer(word_ids)
if dense_inputs is not None:
mask = tf.concat([mask, dense_mask], axis=1)
embeddings = self._embedding_norm_layer(word_embeddings)
embeddings = self._embedding_dropout(embeddings)
encoder_outputs = []
x = embeddings
for l in range(self._num_layers):
if x.shape[0] is None:
pass
else:
x = self._transformer_layers[l]([x, mask])
encoder_outputs.append(x)
last_encoder_output = encoder_outputs[-1]
avg_token_tensor = tf.math.reduce_mean(last_encoder_output, axis=1)
pooled_output = self._pooler_layer(avg_token_tensor)
output = dict(
sequence_output=encoder_outputs[-1],
pooled_output=pooled_output,
encoder_outputs=encoder_outputs,
)
return output
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_embedding_layer(self):
return self._embedding_layer
def get_config(self):
return dict(self._config)
@property
def transformer_layers(self):
"""List of Transformer layers in the encoder."""
return self._transformer_layers
@property
def pooler_layer(self):
"""The pooler dense layer after the transformer layers."""
return self._pooler_layer
@classmethod
def from_config(cls, config, custom_objects=None):
if 'embedding_layer' in config and config['embedding_layer'] is not None:
warn_string = (
'You are reloading a model that was saved with a '
'potentially-shared embedding layer object. If you contine to '
'train this model, the embedding layer will no longer be shared. '
'To work around this, load the model outside of the Keras API.'
)
print('WARNING: ' + warn_string)
logging.warn(warn_string)
return cls(**config)
def _get_embeddings(
self,
word_ids: tf.Tensor,
type_ids: tf.Tensor,
word_embeddings: Optional[tf.Tensor],
dense_inputs: Optional[tf.Tensor],
dense_type_ids: Optional[tf.Tensor],
) -> tf.Tensor:
if word_embeddings is None:
word_embeddings = self._embedding_layer(word_ids)
if dense_inputs is not None:
# Concat the dense embeddings at sequence end.
word_embeddings = tf.concat([word_embeddings, dense_inputs], axis=1)
type_ids = tf.concat([type_ids, dense_type_ids], axis=1)
type_embeddings = self._type_embedding_layer(type_ids)
# absolute position embeddings.
position_embeddings = self._position_embedding_layer(word_embeddings)
return word_embeddings + position_embeddings + type_embeddings
| 10,751 | 34.368421 | 92 | py |
models | models-master/official/projects/lra/lra_dual_encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trainer network for dual encoder style models."""
# pylint: disable=g-classes-have-attributes
import collections
import tensorflow as tf
import tensorflow_models as tfm
@tf.keras.utils.register_keras_serializable(package='Text')
class LRADualEncoder(tf.keras.layers.Layer):
"""A dual encoder model based on a transformer-based encoder.
This is an implementation of the dual encoder network structure based on the
transfomer stack, as described in ["Language-agnostic BERT Sentence
Embedding"](https://arxiv.org/abs/2007.01852)
The DualEncoder allows a user to pass in a transformer stack, and build a dual
encoder model based on the transformer stack.
Args:
network: A transformer network which should output an encoding output.
max_seq_length: The maximum allowed sequence length for transformer.
normalize: If set to True, normalize the encoding produced by transfomer.
logit_scale: The scaling factor of dot products when doing training.
logit_margin: The margin between positive and negative when doing training.
output: The output style for this network. Can be either `logits` or
`predictions`. If set to `predictions`, it will output the embedding
producted by transformer network.
"""
def __init__(
self,
network,
num_classes,
max_seq_length,
dropout_rate=0.1,
initializer='glorot_uniform',
use_encoder_pooler=True,
inner_dim=None,
head_name='dual_encode',
**kwargs
):
super().__init__(**kwargs)
config_dict = {
'network': network,
'num_classes': num_classes,
'head_name': head_name,
'max_seq_length': max_seq_length,
'initializer': initializer,
'use_encoder_pooler': use_encoder_pooler,
'inner_dim': inner_dim,
}
# We are storing the config dict as a namedtuple here to ensure checkpoint
# compatibility with an earlier version of this model which did not track
# the config dict attribute. TF does not track immutable attrs which
# do not contain Trackables, so by creating a config namedtuple instead of
# a dict we avoid tracking it.
config_cls = collections.namedtuple('Config', config_dict.keys())
self._config = config_cls(**config_dict)
self._use_encoder_pooler = use_encoder_pooler
self.network = network
self.classifier = tfm.nlp.layers.ClassificationHead(
inner_dim=0 if use_encoder_pooler else inner_dim,
num_classes=num_classes,
initializer=initializer,
dropout_rate=dropout_rate,
name=head_name,
)
def call(self, inputs):
if isinstance(inputs, dict):
left_word_ids = inputs.get('left_word_ids')
left_mask = inputs.get('left_mask')
right_word_ids = inputs.get('right_word_ids')
right_mask = inputs.get('right_mask')
else:
raise ValueError('Unexpected inputs type to %s.' % self.__class__)
inputs = [left_word_ids, left_mask, right_word_ids, right_mask]
left_inputs = [left_word_ids, left_mask]
left_outputs = self.network(left_inputs)
right_inputs = [right_word_ids, right_mask]
right_outputs = self.network(right_inputs)
if self._use_encoder_pooler:
# Because we have a copy of inputs to create this Model object, we can
# invoke the Network object with its own input tensors to start the Model.
if isinstance(left_outputs, list):
left_cls_inputs = left_outputs[1]
right_cls_inputs = right_outputs[1]
else:
left_cls_inputs = left_outputs['pooled_output']
right_cls_inputs = right_outputs['pooled_output']
else:
if isinstance(left_outputs, list):
left_cls_inputs = left_outputs[0]
right_cls_inputs = right_outputs[0]
else:
left_cls_inputs = left_outputs['sequence_output']
right_cls_inputs = right_outputs['sequence_output']
cls_inputs = tf.concat([left_cls_inputs, right_cls_inputs], -1)
predictions = self.classifier(cls_inputs)
return predictions
def get_config(self):
return dict(self._config._asdict())
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(encoder=self.network)
return items
| 4,977 | 35.602941 | 80 | py |
models | models-master/official/projects/mobilebert/export_tfhub.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script to export the MobileBERT encoder model as a TF-Hub SavedModel."""
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from official.projects.mobilebert import model_utils
FLAGS = flags.FLAGS
flags.DEFINE_string(
"bert_config_file", None,
"Bert configuration file to define core mobilebert layers.")
flags.DEFINE_string("model_checkpoint_path", None,
"File path to TF model checkpoint.")
flags.DEFINE_string("export_path", None, "TF-Hub SavedModel destination path.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_bool("do_lower_case", True, "Whether to lowercase.")
def create_mobilebert_model(bert_config):
"""Creates a model for exporting to tfhub."""
pretrainer = model_utils.create_mobilebert_pretrainer(bert_config)
encoder = pretrainer.encoder_network
encoder_inputs_dict = {x.name: x for x in encoder.inputs}
encoder_output_dict = encoder(encoder_inputs_dict)
# For interchangeability with other text representations,
# add "default" as an alias for MobileBERT's whole-input reptesentations.
encoder_output_dict["default"] = encoder_output_dict["pooled_output"]
core_model = tf.keras.Model(
inputs=encoder_inputs_dict, outputs=encoder_output_dict)
pretrainer_inputs_dict = {x.name: x for x in pretrainer.inputs}
pretrainer_output_dict = pretrainer(pretrainer_inputs_dict)
mlm_model = tf.keras.Model(
inputs=pretrainer_inputs_dict, outputs=pretrainer_output_dict)
# Set `_auto_track_sub_layers` to False, so that the additional weights
# from `mlm` sub-object will not be included in the core model.
# TODO(b/169210253): Use public API after the bug is resolved.
core_model._auto_track_sub_layers = False # pylint: disable=protected-access
core_model.mlm = mlm_model
return core_model, pretrainer
def export_bert_tfhub(bert_config, model_checkpoint_path, hub_destination,
vocab_file, do_lower_case):
"""Restores a tf.keras.Model and saves for TF-Hub."""
core_model, pretrainer = create_mobilebert_model(bert_config)
checkpoint = tf.train.Checkpoint(**pretrainer.checkpoint_items)
logging.info("Begin to load model")
checkpoint.restore(model_checkpoint_path).assert_existing_objects_matched()
logging.info("Loading model finished")
core_model.vocab_file = tf.saved_model.Asset(vocab_file)
core_model.do_lower_case = tf.Variable(do_lower_case, trainable=False)
logging.info("Begin to save files for tfhub at %s", hub_destination)
core_model.save(hub_destination, include_optimizer=False, save_format="tf")
logging.info("tfhub files exported!")
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
bert_config = model_utils.BertConfig.from_json_file(FLAGS.bert_config_file)
export_bert_tfhub(bert_config, FLAGS.model_checkpoint_path, FLAGS.export_path,
FLAGS.vocab_file, FLAGS.do_lower_case)
if __name__ == "__main__":
app.run(main)
| 3,689 | 41.413793 | 80 | py |
models | models-master/official/projects/mobilebert/model_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checkpoint converter for Mobilebert."""
import copy
import json
import tensorflow.compat.v1 as tf
from official.modeling import tf_utils
from official.nlp.modeling import layers
from official.nlp.modeling import models
from official.nlp.modeling import networks
class BertConfig(object):
"""Configuration for `BertModel`."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02,
embedding_size=None,
trigram_input=False,
use_bottleneck=False,
intra_bottleneck_size=None,
use_bottleneck_attention=False,
key_query_shared_bottleneck=False,
num_feedforward_networks=1,
normalization_type="layer_norm",
classifier_activation=True):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
embedding_size: The size of the token embedding.
trigram_input: Use a convolution of trigram as input.
use_bottleneck: Use the bottleneck/inverted-bottleneck structure in BERT.
intra_bottleneck_size: The hidden size in the bottleneck.
use_bottleneck_attention: Use attention inputs from the bottleneck
transformation.
key_query_shared_bottleneck: Use the same linear transformation for
query&key in the bottleneck.
num_feedforward_networks: Number of FFNs in a block.
normalization_type: The normalization type in BERT.
classifier_activation: Using the tanh activation for the final
representation of the [CLS] token in fine-tuning.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.embedding_size = embedding_size
self.trigram_input = trigram_input
self.use_bottleneck = use_bottleneck
self.intra_bottleneck_size = intra_bottleneck_size
self.use_bottleneck_attention = use_bottleneck_attention
self.key_query_shared_bottleneck = key_query_shared_bottleneck
self.num_feedforward_networks = num_feedforward_networks
self.normalization_type = normalization_type
self.classifier_activation = classifier_activation
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in json_object.items():
config.__dict__[key] = value
if config.embedding_size is None:
config.embedding_size = config.hidden_size
if config.intra_bottleneck_size is None:
config.intra_bottleneck_size = config.hidden_size
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def create_mobilebert_pretrainer(bert_config):
"""Creates a BertPretrainerV2 that wraps MobileBERTEncoder model."""
mobilebert_encoder = networks.MobileBERTEncoder(
word_vocab_size=bert_config.vocab_size,
word_embed_size=bert_config.embedding_size,
type_vocab_size=bert_config.type_vocab_size,
max_sequence_length=bert_config.max_position_embeddings,
num_blocks=bert_config.num_hidden_layers,
hidden_size=bert_config.hidden_size,
num_attention_heads=bert_config.num_attention_heads,
intermediate_size=bert_config.intermediate_size,
intermediate_act_fn=tf_utils.get_activation(bert_config.hidden_act),
hidden_dropout_prob=bert_config.hidden_dropout_prob,
attention_probs_dropout_prob=bert_config.attention_probs_dropout_prob,
intra_bottleneck_size=bert_config.intra_bottleneck_size,
initializer_range=bert_config.initializer_range,
use_bottleneck_attention=bert_config.use_bottleneck_attention,
key_query_shared_bottleneck=bert_config.key_query_shared_bottleneck,
num_feedforward_networks=bert_config.num_feedforward_networks,
normalization_type=bert_config.normalization_type,
classifier_activation=bert_config.classifier_activation)
masked_lm = layers.MobileBertMaskedLM(
embedding_table=mobilebert_encoder.get_embedding_table(),
activation=tf_utils.get_activation(bert_config.hidden_act),
initializer=tf.keras.initializers.TruncatedNormal(
stddev=bert_config.initializer_range),
name="cls/predictions")
pretrainer = models.BertPretrainerV2(
encoder_network=mobilebert_encoder, customized_masked_lm=masked_lm)
# Makes sure the pretrainer variables are created.
_ = pretrainer(pretrainer.inputs)
return pretrainer
| 7,470 | 42.690058 | 80 | py |
models | models-master/official/projects/mobilebert/distillation.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Progressive distillation for MobileBERT student model."""
import dataclasses
from typing import List, Optional
from absl import logging
import orbit
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.modeling import optimization
from official.modeling import tf_utils
from official.modeling.fast_training.progressive import policies
from official.modeling.hyperparams import base_config
from official.nlp import modeling
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import data_loader_factory
from official.nlp.modeling import layers
from official.nlp.modeling import models
@dataclasses.dataclass
class LayerWiseDistillConfig(base_config.Config):
"""Defines the behavior of layerwise distillation."""
num_steps: int = 10000
warmup_steps: int = 0
initial_learning_rate: float = 1.5e-3
end_learning_rate: float = 1.5e-3
decay_steps: int = 10000
hidden_distill_factor: float = 100.0
beta_distill_factor: float = 5000.0
gamma_distill_factor: float = 5.0
if_transfer_attention: bool = True
attention_distill_factor: float = 1.0
if_freeze_previous_layers: bool = False
# The ids of teacher layers that will be mapped to the student model.
# For example, if you want to compress a 24 layer teacher to a 6 layer
# student, you can set it to [3, 7, 11, 15, 19, 23] (the index starts from 0).
# If `None`, we assume teacher and student have the same number of layers,
# and each layer of teacher model will be mapped to student's corresponding
# layer.
transfer_teacher_layers: Optional[List[int]] = None
@dataclasses.dataclass
class PretrainDistillConfig(base_config.Config):
"""Defines the behavior of pretrain distillation."""
num_steps: int = 500000
warmup_steps: int = 10000
initial_learning_rate: float = 1.5e-3
end_learning_rate: float = 1.5e-7
decay_steps: int = 500000
if_use_nsp_loss: bool = True
distill_ground_truth_ratio: float = 0.5
@dataclasses.dataclass
class BertDistillationProgressiveConfig(policies.ProgressiveConfig):
"""Defines the specific distillation behavior."""
if_copy_embeddings: bool = True
layer_wise_distill_config: LayerWiseDistillConfig = dataclasses.field(
default_factory=LayerWiseDistillConfig
)
pretrain_distill_config: PretrainDistillConfig = dataclasses.field(
default_factory=PretrainDistillConfig
)
@dataclasses.dataclass
class BertDistillationTaskConfig(cfg.TaskConfig):
"""Defines the teacher/student model architecture and training data."""
teacher_model: bert.PretrainerConfig = dataclasses.field(
default_factory=lambda: bert.PretrainerConfig( # pylint: disable=g-long-lambda
encoder=encoders.EncoderConfig(type='mobilebert')
)
)
student_model: bert.PretrainerConfig = dataclasses.field(
default_factory=lambda: bert.PretrainerConfig( # pylint: disable=g-long-lambda
encoder=encoders.EncoderConfig(type='mobilebert')
)
)
# The path to the teacher model checkpoint or its directory.
teacher_model_init_checkpoint: str = ''
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(
default_factory=cfg.DataConfig
)
def build_sub_encoder(encoder, target_layer_id):
"""Builds an encoder that only computes first few transformer layers."""
input_ids = encoder.inputs[0]
input_mask = encoder.inputs[1]
type_ids = encoder.inputs[2]
attention_mask = modeling.layers.SelfAttentionMask()(
inputs=input_ids, to_mask=input_mask)
embedding_output = encoder.embedding_layer(input_ids, type_ids)
layer_output = embedding_output
attention_score = None
for layer_idx in range(target_layer_id + 1):
layer_output, attention_score = encoder.transformer_layers[layer_idx](
layer_output, attention_mask, return_attention_scores=True)
return tf.keras.Model(
inputs=[input_ids, input_mask, type_ids],
outputs=[layer_output, attention_score])
class BertDistillationTask(policies.ProgressivePolicy, base_task.Task):
"""Distillation language modeling task progressively."""
def __init__(self,
strategy,
progressive: BertDistillationProgressiveConfig,
optimizer_config: optimization.OptimizationConfig,
task_config: BertDistillationTaskConfig,
logging_dir=None):
self._strategy = strategy
self._task_config = task_config
self._progressive_config = progressive
self._optimizer_config = optimizer_config
self._train_data_config = task_config.train_data
self._eval_data_config = task_config.validation_data
self._the_only_train_dataset = None
self._the_only_eval_dataset = None
layer_wise_config = self._progressive_config.layer_wise_distill_config
transfer_teacher_layers = layer_wise_config.transfer_teacher_layers
num_teacher_layers = (
self._task_config.teacher_model.encoder.mobilebert.num_blocks)
num_student_layers = (
self._task_config.student_model.encoder.mobilebert.num_blocks)
if transfer_teacher_layers and len(
transfer_teacher_layers) != num_student_layers:
raise ValueError('The number of `transfer_teacher_layers` %s does not '
'match the number of student layers. %d' %
(transfer_teacher_layers, num_student_layers))
if not transfer_teacher_layers and (num_teacher_layers !=
num_student_layers):
raise ValueError('`transfer_teacher_layers` is not specified, and the '
'number of teacher layers does not match '
'the number of student layers.')
ratio = progressive.pretrain_distill_config.distill_ground_truth_ratio
if ratio < 0 or ratio > 1:
raise ValueError('distill_ground_truth_ratio has to be within [0, 1].')
# A non-trainable layer for feature normalization for transfer loss
self._layer_norm = tf.keras.layers.LayerNormalization(
axis=-1,
beta_initializer='zeros',
gamma_initializer='ones',
trainable=False)
# Build the teacher and student pretrainer model.
self._teacher_pretrainer = self._build_pretrainer(
self._task_config.teacher_model, name='teacher')
self._student_pretrainer = self._build_pretrainer(
self._task_config.student_model, name='student')
base_task.Task.__init__(
self, params=task_config, logging_dir=logging_dir)
policies.ProgressivePolicy.__init__(self)
def _build_pretrainer(self, pretrainer_cfg: bert.PretrainerConfig, name: str):
"""Builds pretrainer from config and encoder."""
encoder = encoders.build_encoder(pretrainer_cfg.encoder)
if pretrainer_cfg.cls_heads:
cls_heads = [
layers.ClassificationHead(**cfg.as_dict())
for cfg in pretrainer_cfg.cls_heads
]
else:
cls_heads = []
masked_lm = layers.MobileBertMaskedLM(
embedding_table=encoder.get_embedding_table(),
activation=tf_utils.get_activation(pretrainer_cfg.mlm_activation),
initializer=tf.keras.initializers.TruncatedNormal(
stddev=pretrainer_cfg.mlm_initializer_range),
name='cls/predictions')
pretrainer = models.BertPretrainerV2(
encoder_network=encoder,
classification_heads=cls_heads,
customized_masked_lm=masked_lm,
name=name)
return pretrainer
# override policies.ProgressivePolicy
def num_stages(self):
# One stage for each layer, plus additional stage for pre-training
return self._task_config.student_model.encoder.mobilebert.num_blocks + 1
# override policies.ProgressivePolicy
def num_steps(self, stage_id) -> int:
"""Return the total number of steps in this stage."""
if stage_id + 1 < self.num_stages():
return self._progressive_config.layer_wise_distill_config.num_steps
else:
return self._progressive_config.pretrain_distill_config.num_steps
# override policies.ProgressivePolicy
def get_model(self, stage_id, old_model=None) -> tf.keras.Model:
del old_model
return self.build_model(stage_id)
# override policies.ProgressivePolicy
def get_optimizer(self, stage_id):
"""Build optimizer for each stage."""
if stage_id + 1 < self.num_stages():
distill_config = self._progressive_config.layer_wise_distill_config
else:
distill_config = self._progressive_config.pretrain_distill_config
params = self._optimizer_config.replace(
learning_rate={
'polynomial': {
'decay_steps':
distill_config.decay_steps,
'initial_learning_rate':
distill_config.initial_learning_rate,
'end_learning_rate':
distill_config.end_learning_rate,
}
},
warmup={
'linear':
{'warmup_steps':
distill_config.warmup_steps,
}
})
opt_factory = optimization.OptimizerFactory(params)
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
if isinstance(optimizer, tf.keras.optimizers.experimental.Optimizer):
optimizer = tf.keras.__internal__.optimizers.convert_to_legacy_optimizer(
optimizer)
return optimizer
# override policies.ProgressivePolicy
def get_train_dataset(self, stage_id: int) -> tf.data.Dataset:
"""Return Dataset for this stage."""
del stage_id
if self._the_only_train_dataset is None:
self._the_only_train_dataset = orbit.utils.make_distributed_dataset(
self._strategy, self.build_inputs, self._train_data_config)
return self._the_only_train_dataset
# overrides policies.ProgressivePolicy
def get_eval_dataset(self, stage_id):
del stage_id
if self._the_only_eval_dataset is None:
self._the_only_eval_dataset = orbit.utils.make_distributed_dataset(
self._strategy, self.build_inputs, self._eval_data_config)
return self._the_only_eval_dataset
# override base_task.task
def build_model(self, stage_id) -> tf.keras.Model:
"""Build teacher/student keras models with outputs for current stage."""
# Freeze the teacher model.
self._teacher_pretrainer.trainable = False
layer_wise_config = self._progressive_config.layer_wise_distill_config
freeze_previous_layers = layer_wise_config.if_freeze_previous_layers
student_encoder = self._student_pretrainer.encoder_network
if stage_id != self.num_stages() - 1:
# Build a model that outputs teacher's and student's transformer outputs.
inputs = student_encoder.inputs
student_sub_encoder = build_sub_encoder(
encoder=student_encoder, target_layer_id=stage_id)
student_output_feature, student_attention_score = student_sub_encoder(
inputs)
if layer_wise_config.transfer_teacher_layers:
teacher_layer_id = layer_wise_config.transfer_teacher_layers[stage_id]
else:
teacher_layer_id = stage_id
teacher_sub_encoder = build_sub_encoder(
encoder=self._teacher_pretrainer.encoder_network,
target_layer_id=teacher_layer_id)
teacher_output_feature, teacher_attention_score = teacher_sub_encoder(
inputs)
if freeze_previous_layers:
student_encoder.embedding_layer.trainable = False
for i in range(stage_id):
student_encoder.transformer_layers[i].trainable = False
return tf.keras.Model(
inputs=inputs,
outputs=dict(
student_output_feature=student_output_feature,
student_attention_score=student_attention_score,
teacher_output_feature=teacher_output_feature,
teacher_attention_score=teacher_attention_score))
else:
# Build a model that outputs teacher's and student's MLM/NSP outputs.
inputs = self._student_pretrainer.inputs
student_pretrainer_output = self._student_pretrainer(inputs)
teacher_pretrainer_output = self._teacher_pretrainer(inputs)
# Set all student's transformer blocks to trainable.
if freeze_previous_layers:
student_encoder.embedding_layer.trainable = True
for layer in student_encoder.transformer_layers:
layer.trainable = True
model = tf.keras.Model(
inputs=inputs,
outputs=dict(
student_pretrainer_output=student_pretrainer_output,
teacher_pretrainer_output=teacher_pretrainer_output,
))
# Checkpoint the student encoder which is the goal of distillation.
model.checkpoint_items = self._student_pretrainer.checkpoint_items
return model
# overrides base_task.Task
def build_inputs(self, params, input_context=None):
"""Returns tf.data.Dataset for pretraining."""
# copy from masked_lm.py for testing
if params.input_path == 'dummy':
def dummy_data(_):
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
dummy_lm = tf.zeros((1, params.max_predictions_per_seq), dtype=tf.int32)
return dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids,
masked_lm_positions=dummy_lm,
masked_lm_ids=dummy_lm,
masked_lm_weights=tf.cast(dummy_lm, dtype=tf.float32),
next_sentence_labels=tf.zeros((1, 1), dtype=tf.int32))
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
return data_loader_factory.get_data_loader(params).load(input_context)
def _get_distribution_losses(self, teacher, student):
"""Return the beta and gamma distall losses for feature distribution."""
teacher_mean = tf.math.reduce_mean(teacher, axis=-1, keepdims=True)
student_mean = tf.math.reduce_mean(student, axis=-1, keepdims=True)
teacher_var = tf.math.reduce_variance(teacher, axis=-1, keepdims=True)
student_var = tf.math.reduce_variance(student, axis=-1, keepdims=True)
beta_loss = tf.math.squared_difference(student_mean, teacher_mean)
beta_loss = tf.math.reduce_mean(beta_loss, axis=None, keepdims=False)
gamma_loss = tf.math.abs(student_var - teacher_var)
gamma_loss = tf.math.reduce_mean(gamma_loss, axis=None, keepdims=False)
return beta_loss, gamma_loss
def _get_attention_loss(self, teacher_score, student_score):
# Note that the definition of KLDivergence here is a little different from
# the original one (tf.keras.losses.KLDivergence). We adopt this approach
# to stay consistent with the TF1 implementation.
teacher_weight = tf.keras.activations.softmax(teacher_score, axis=-1)
student_log_weight = tf.nn.log_softmax(student_score, axis=-1)
kl_divergence = -(teacher_weight * student_log_weight)
kl_divergence = tf.math.reduce_sum(kl_divergence, axis=-1, keepdims=True)
kl_divergence = tf.math.reduce_mean(kl_divergence, axis=None,
keepdims=False)
return kl_divergence
def build_losses(self, labels, outputs, metrics) -> tf.Tensor:
"""Builds losses and update loss-related metrics for the current stage."""
last_stage = 'student_pretrainer_output' in outputs
# Layer-wise warmup stage
if not last_stage:
distill_config = self._progressive_config.layer_wise_distill_config
teacher_feature = outputs['teacher_output_feature']
student_feature = outputs['student_output_feature']
feature_transfer_loss = tf.keras.losses.mean_squared_error(
self._layer_norm(teacher_feature), self._layer_norm(student_feature))
feature_transfer_loss *= distill_config.hidden_distill_factor
beta_loss, gamma_loss = self._get_distribution_losses(teacher_feature,
student_feature)
beta_loss *= distill_config.beta_distill_factor
gamma_loss *= distill_config.gamma_distill_factor
total_loss = feature_transfer_loss + beta_loss + gamma_loss
if distill_config.if_transfer_attention:
teacher_attention = outputs['teacher_attention_score']
student_attention = outputs['student_attention_score']
attention_loss = self._get_attention_loss(teacher_attention,
student_attention)
attention_loss *= distill_config.attention_distill_factor
total_loss += attention_loss
total_loss /= tf.cast((self._stage_id + 1), tf.float32)
# Last stage to distill pretraining layer.
else:
distill_config = self._progressive_config.pretrain_distill_config
lm_label = labels['masked_lm_ids']
vocab_size = (
self._task_config.student_model.encoder.mobilebert.word_vocab_size)
# Shape: [batch, max_predictions_per_seq, vocab_size]
lm_label = tf.one_hot(indices=lm_label, depth=vocab_size, on_value=1.0,
off_value=0.0, axis=-1, dtype=tf.float32)
gt_ratio = distill_config.distill_ground_truth_ratio
if gt_ratio != 1.0:
teacher_mlm_logits = outputs['teacher_pretrainer_output']['mlm_logits']
teacher_labels = tf.nn.softmax(teacher_mlm_logits, axis=-1)
lm_label = gt_ratio * lm_label + (1-gt_ratio) * teacher_labels
student_pretrainer_output = outputs['student_pretrainer_output']
# Shape: [batch, max_predictions_per_seq, vocab_size]
student_lm_log_probs = tf.nn.log_softmax(
student_pretrainer_output['mlm_logits'], axis=-1)
# Shape: [batch * max_predictions_per_seq]
per_example_loss = tf.reshape(
-tf.reduce_sum(student_lm_log_probs * lm_label, axis=[-1]), [-1])
lm_label_weights = tf.reshape(labels['masked_lm_weights'], [-1])
lm_numerator_loss = tf.reduce_sum(per_example_loss * lm_label_weights)
lm_denominator_loss = tf.reduce_sum(lm_label_weights)
mlm_loss = tf.math.divide_no_nan(lm_numerator_loss, lm_denominator_loss)
total_loss = mlm_loss
if 'next_sentence_labels' in labels:
sentence_labels = labels['next_sentence_labels']
sentence_outputs = tf.cast(
student_pretrainer_output['next_sentence'], dtype=tf.float32)
sentence_loss = tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(
sentence_labels, sentence_outputs, from_logits=True))
total_loss += sentence_loss
# Also update loss-related metrics here, instead of in `process_metrics`.
metrics = dict([(metric.name, metric) for metric in metrics])
if not last_stage:
metrics['feature_transfer_mse'].update_state(feature_transfer_loss)
metrics['beta_transfer_loss'].update_state(beta_loss)
metrics['gamma_transfer_loss'].update_state(gamma_loss)
layer_wise_config = self._progressive_config.layer_wise_distill_config
if layer_wise_config.if_transfer_attention:
metrics['attention_transfer_loss'].update_state(attention_loss)
else:
metrics['lm_example_loss'].update_state(mlm_loss)
if 'next_sentence_labels' in labels:
metrics['next_sentence_loss'].update_state(sentence_loss)
metrics['total_loss'].update_state(total_loss)
return total_loss
# overrides base_task.Task
def build_metrics(self, training=None):
del training
metrics = [
tf.keras.metrics.Mean(name='feature_transfer_mse'),
tf.keras.metrics.Mean(name='beta_transfer_loss'),
tf.keras.metrics.Mean(name='gamma_transfer_loss'),
tf.keras.metrics.SparseCategoricalAccuracy(name='masked_lm_accuracy'),
tf.keras.metrics.Mean(name='lm_example_loss'),
tf.keras.metrics.Mean(name='total_loss')]
if self._progressive_config.layer_wise_distill_config.if_transfer_attention:
metrics.append(tf.keras.metrics.Mean(name='attention_transfer_loss'))
if self._task_config.train_data.use_next_sentence_label:
metrics.append(tf.keras.metrics.SparseCategoricalAccuracy(
name='next_sentence_accuracy'))
metrics.append(tf.keras.metrics.Mean(name='next_sentence_loss'))
return metrics
# overrides base_task.Task
# process non-loss metrics
def process_metrics(self, metrics, labels, student_pretrainer_output):
metrics = dict([(metric.name, metric) for metric in metrics])
# Final pretrainer layer distillation stage.
if student_pretrainer_output is not None:
if 'masked_lm_accuracy' in metrics:
metrics['masked_lm_accuracy'].update_state(
labels['masked_lm_ids'], student_pretrainer_output['mlm_logits'],
labels['masked_lm_weights'])
if 'next_sentence_accuracy' in metrics:
metrics['next_sentence_accuracy'].update_state(
labels['next_sentence_labels'],
student_pretrainer_output['next_sentence'])
# overrides base_task.Task
def train_step(self, inputs, model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer, metrics):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
with tf.GradientTape() as tape:
outputs = model(inputs, training=True)
# Computes per-replica loss.
loss = self.build_losses(
labels=inputs,
outputs=outputs,
metrics=metrics)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
# TODO(b/154564893): enable loss scaling.
# scaled_loss = loss / tf.distribute.get_strategy().num_replicas_in_sync
# get trainable variables for current stage
tvars = model.trainable_variables
last_stage = 'student_pretrainer_output' in outputs
grads = tape.gradient(loss, tvars)
optimizer.apply_gradients(list(zip(grads, tvars)))
self.process_metrics(
metrics, inputs,
outputs['student_pretrainer_output'] if last_stage else None)
return {self.loss: loss}
# overrides base_task.Task
def validation_step(self, inputs, model: tf.keras.Model, metrics):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
outputs = model(inputs, training=False)
# Computes per-replica loss.
loss = self.build_losses(labels=inputs, outputs=outputs, metrics=metrics)
last_stage = 'student_pretrainer_output' in outputs
self.process_metrics(
metrics, inputs,
outputs['student_pretrainer_output'] if last_stage else None)
return {self.loss: loss}
@property
def cur_checkpoint_items(self):
"""Checkpoints for model, stage_id, optimizer for preemption handling."""
return dict(
stage_id=self._stage_id,
volatiles=self._volatiles,
student_pretrainer=self._student_pretrainer,
teacher_pretrainer=self._teacher_pretrainer,
encoder=self._student_pretrainer.encoder_network)
def initialize(self, model):
"""Loads teacher's pretrained checkpoint and copy student's embedding."""
# This function will be called when no checkpoint found for the model,
# i.e., when the training starts (not preemption case).
# The weights of teacher pretrainer and student pretrainer will be
# initialized, rather than the passed-in `model`.
del model
logging.info('Begin to load checkpoint for teacher pretrainer model.')
ckpt_dir_or_file = self._task_config.teacher_model_init_checkpoint
if not ckpt_dir_or_file:
raise ValueError('`teacher_model_init_checkpoint` is not specified.')
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Makes sure the teacher pretrainer variables are created.
_ = self._teacher_pretrainer(self._teacher_pretrainer.inputs)
teacher_checkpoint = tf.train.Checkpoint(
**self._teacher_pretrainer.checkpoint_items)
teacher_checkpoint.read(ckpt_dir_or_file).assert_existing_objects_matched()
logging.info('Begin to copy word embedding from teacher model to student.')
teacher_encoder = self._teacher_pretrainer.encoder_network
student_encoder = self._student_pretrainer.encoder_network
embedding_weights = teacher_encoder.embedding_layer.get_weights()
student_encoder.embedding_layer.set_weights(embedding_weights)
| 25,420 | 40.948845 | 85 | py |
models | models-master/official/projects/mobilebert/distillation_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.projects.mobilebert.distillation."""
import os
from absl import logging
from absl.testing import parameterized
import tensorflow as tf
from official.core import config_definitions as cfg
from official.modeling import optimization
from official.modeling import tf_utils
from official.modeling.fast_training.progressive import trainer as prog_trainer_lib
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import pretrain_dataloader
from official.nlp.modeling import layers
from official.nlp.modeling import models
from official.projects.mobilebert import distillation
class DistillationTest(tf.test.TestCase, parameterized.TestCase):
def prepare_config(self, teacher_block_num, student_block_num,
transfer_teacher_layers):
# using small model for testing
task_config = distillation.BertDistillationTaskConfig(
teacher_model=bert.PretrainerConfig(
encoder=encoders.EncoderConfig(
type='mobilebert',
mobilebert=encoders.MobileBertEncoderConfig(
num_blocks=teacher_block_num)),
cls_heads=[
bert.ClsHeadConfig(
inner_dim=256,
num_classes=2,
dropout_rate=0.1,
name='next_sentence')
],
mlm_activation='gelu'),
student_model=bert.PretrainerConfig(
encoder=encoders.EncoderConfig(
type='mobilebert',
mobilebert=encoders.MobileBertEncoderConfig(
num_blocks=student_block_num)),
cls_heads=[
bert.ClsHeadConfig(
inner_dim=256,
num_classes=2,
dropout_rate=0.1,
name='next_sentence')
],
mlm_activation='relu'),
train_data=pretrain_dataloader.BertPretrainDataConfig(
input_path='dummy',
max_predictions_per_seq=76,
seq_length=512,
global_batch_size=10),
validation_data=pretrain_dataloader.BertPretrainDataConfig(
input_path='dummy',
max_predictions_per_seq=76,
seq_length=512,
global_batch_size=10))
# set only 1 step for each stage
progressive_config = distillation.BertDistillationProgressiveConfig()
progressive_config.layer_wise_distill_config.transfer_teacher_layers = (
transfer_teacher_layers)
progressive_config.layer_wise_distill_config.num_steps = 1
progressive_config.pretrain_distill_config.num_steps = 1
optimization_config = optimization.OptimizationConfig(
optimizer=optimization.OptimizerConfig(
type='lamb',
lamb=optimization.LAMBConfig(
weight_decay_rate=0.0001,
exclude_from_weight_decay=[
'LayerNorm', 'layer_norm', 'bias', 'no_norm'
])),
learning_rate=optimization.LrConfig(
type='polynomial',
polynomial=optimization.PolynomialLrConfig(
initial_learning_rate=1.5e-3,
decay_steps=10000,
end_learning_rate=1.5e-3)),
warmup=optimization.WarmupConfig(
type='linear',
linear=optimization.LinearWarmupConfig(warmup_learning_rate=0)))
exp_config = cfg.ExperimentConfig(
task=task_config,
trainer=prog_trainer_lib.ProgressiveTrainerConfig(
progressive=progressive_config,
optimizer_config=optimization_config))
# Create a teacher model checkpoint.
teacher_encoder = encoders.build_encoder(task_config.teacher_model.encoder)
pretrainer_config = task_config.teacher_model
if pretrainer_config.cls_heads:
teacher_cls_heads = [
layers.ClassificationHead(**cfg.as_dict())
for cfg in pretrainer_config.cls_heads
]
else:
teacher_cls_heads = []
masked_lm = layers.MobileBertMaskedLM(
embedding_table=teacher_encoder.get_embedding_table(),
activation=tf_utils.get_activation(pretrainer_config.mlm_activation),
initializer=tf.keras.initializers.TruncatedNormal(
stddev=pretrainer_config.mlm_initializer_range),
name='cls/predictions')
teacher_pretrainer = models.BertPretrainerV2(
encoder_network=teacher_encoder,
classification_heads=teacher_cls_heads,
customized_masked_lm=masked_lm)
# The model variables will be created after the forward call.
_ = teacher_pretrainer(teacher_pretrainer.inputs)
teacher_pretrainer_ckpt = tf.train.Checkpoint(
**teacher_pretrainer.checkpoint_items)
teacher_ckpt_path = os.path.join(self.get_temp_dir(), 'teacher_model.ckpt')
teacher_pretrainer_ckpt.save(teacher_ckpt_path)
exp_config.task.teacher_model_init_checkpoint = self.get_temp_dir()
return exp_config
@parameterized.parameters((2, 2, None), (4, 2, [1, 3]))
def test_task(self, teacher_block_num, student_block_num,
transfer_teacher_layers):
exp_config = self.prepare_config(teacher_block_num, student_block_num,
transfer_teacher_layers)
bert_distillation_task = distillation.BertDistillationTask(
strategy=tf.distribute.get_strategy(),
progressive=exp_config.trainer.progressive,
optimizer_config=exp_config.trainer.optimizer_config,
task_config=exp_config.task)
metrics = bert_distillation_task.build_metrics()
train_dataset = bert_distillation_task.get_train_dataset(stage_id=0)
train_iterator = iter(train_dataset)
eval_dataset = bert_distillation_task.get_eval_dataset(stage_id=0)
eval_iterator = iter(eval_dataset)
optimizer = tf.keras.optimizers.legacy.SGD(learning_rate=0.1)
# test train/val step for all stages, including the last pretraining stage
for stage in range(student_block_num + 1):
step = stage
bert_distillation_task.update_pt_stage(step)
model = bert_distillation_task.get_model(stage, None)
bert_distillation_task.initialize(model)
bert_distillation_task.train_step(next(train_iterator), model, optimizer,
metrics=metrics)
bert_distillation_task.validation_step(next(eval_iterator), model,
metrics=metrics)
logging.info('begin to save and load model checkpoint')
ckpt = tf.train.Checkpoint(model=model)
ckpt.save(self.get_temp_dir())
if __name__ == '__main__':
tf.test.main()
| 7,257 | 40.474286 | 83 | py |
models | models-master/official/projects/deepmac_maskrcnn/serving/detection.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Detection input and model functions for serving/inference."""
from typing import Dict, Mapping, Text
import tensorflow as tf
from official.projects.deepmac_maskrcnn.configs import deep_mask_head_rcnn as cfg
from official.projects.deepmac_maskrcnn.modeling import maskrcnn_model
from official.projects.deepmac_maskrcnn.tasks import deep_mask_head_rcnn
from official.vision.ops import box_ops
from official.vision.serving import detection
def reverse_input_box_transformation(boxes, image_info):
"""Reverse the Mask R-CNN model's input boxes tranformation.
Args:
boxes: A [batch_size, num_boxes, 4] float tensor of boxes in normalized
coordinates.
image_info: a 2D `Tensor` that encodes the information of the image and the
applied preprocessing. It is in the format of
[[original_height, original_width], [desired_height, desired_width],
[y_scale, x_scale], [y_offset, x_offset]], where [desired_height,
desired_width] is the actual scaled image size, and [y_scale, x_scale] is
the scaling factor, which is the ratio of
scaled dimension / original dimension.
Returns:
boxes: Same shape as input `boxes` but in the absolute coordinate space of
the preprocessed image.
"""
# Reversing sequence from Detection_module.serve when
# output_normalized_coordinates=true
scale = image_info[:, 2:3, :]
scale = tf.tile(scale, [1, 1, 2])
boxes = boxes * scale
height_width = image_info[:, 0:1, :]
return box_ops.denormalize_boxes(boxes, height_width)
class DetectionModule(detection.DetectionModule):
"""Detection Module."""
def _build_model(self):
if self._batch_size is None:
ValueError("batch_size can't be None for detection models")
if self.params.task.model.detection_generator.nms_version != 'batched':
ValueError('Only batched_nms is supported.')
input_specs = tf.keras.layers.InputSpec(shape=[self._batch_size] +
self._input_image_size + [3])
if isinstance(self.params.task.model, cfg.DeepMaskHeadRCNN):
model = deep_mask_head_rcnn.build_maskrcnn(
input_specs=input_specs, model_config=self.params.task.model)
else:
raise ValueError('Detection module not implemented for {} model.'.format(
type(self.params.task.model)))
return model
@tf.function
def inference_for_tflite_image_and_boxes(
self, images: tf.Tensor, boxes: tf.Tensor) -> Mapping[str, tf.Tensor]:
"""A tf-function for serve_image_and_boxes.
Args:
images: A [batch_size, height, width, channels] float tensor.
boxes: A [batch_size, num_boxes, 4] float tensor containing boxes
normalized to the input image.
Returns:
result: A dict containing:
'detection_masks': A [batch_size, num_boxes, mask_height, mask_width]
float tensor containing per-pixel mask probabilities.
"""
if not isinstance(self.model, maskrcnn_model.DeepMaskRCNNModel):
raise ValueError(
('Can only use image and boxes input for DeepMaskRCNNModel, '
'Found {}'.format(type(self.model))))
return self.serve_image_and_boxes(images, boxes)
def serve_image_and_boxes(self, images: tf.Tensor, boxes: tf.Tensor):
"""Function used to export a model that consumes and image and boxes.
The model predicts the class-agnostic masks at the given box locations.
Args:
images: A [batch_size, height, width, channels] float tensor.
boxes: A [batch_size, num_boxes, 4] float tensor containing boxes
normalized to the input image.
Returns:
result: A dict containing:
'detection_masks': A [batch_size, num_boxes, mask_height, mask_width]
float tensor containing per-pixel mask probabilities.
"""
images, _, image_info = self.preprocess(images)
boxes = reverse_input_box_transformation(boxes, image_info)
result = self.model.call_images_and_boxes(images, boxes)
return result
def get_inference_signatures(self, function_keys: Dict[Text, Text]):
signatures = {}
if 'image_and_boxes_tensor' in function_keys:
def_name = function_keys['image_and_boxes_tensor']
image_signature = tf.TensorSpec(
shape=[self._batch_size] + [None] * len(self._input_image_size) +
[self._num_channels],
dtype=tf.uint8)
boxes_signature = tf.TensorSpec(shape=[self._batch_size, None, 4],
dtype=tf.float32)
tf_function = self.inference_for_tflite_image_and_boxes
signatures[def_name] = tf_function.get_concrete_function(
image_signature, boxes_signature)
function_keys.pop('image_and_boxes_tensor', None)
parent_signatures = super(DetectionModule, self).get_inference_signatures(
function_keys)
signatures.update(parent_signatures)
return signatures
| 5,494 | 38.25 | 81 | py |
models | models-master/official/projects/deepmac_maskrcnn/modeling/maskrcnn_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for maskrcnn_model.py."""
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.deepmac_maskrcnn.modeling import maskrcnn_model
from official.projects.deepmac_maskrcnn.modeling.heads import instance_heads as deep_instance_heads
from official.vision.modeling.backbones import resnet
from official.vision.modeling.decoders import fpn
from official.vision.modeling.heads import dense_prediction_heads
from official.vision.modeling.heads import instance_heads
from official.vision.modeling.layers import detection_generator
from official.vision.modeling.layers import mask_sampler
from official.vision.modeling.layers import roi_aligner
from official.vision.modeling.layers import roi_generator
from official.vision.modeling.layers import roi_sampler
from official.vision.ops import anchor
def construct_model_and_anchors(image_size, use_gt_boxes_for_masks):
num_classes = 3
min_level = 3
max_level = 4
num_scales = 3
aspect_ratios = [1.0]
anchor_boxes = anchor.Anchor(
min_level=min_level,
max_level=max_level,
num_scales=num_scales,
aspect_ratios=aspect_ratios,
anchor_size=3,
image_size=image_size).multilevel_boxes
num_anchors_per_location = len(aspect_ratios) * num_scales
input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, 3])
backbone = resnet.ResNet(model_id=50, input_specs=input_specs)
decoder = fpn.FPN(
min_level=min_level,
max_level=max_level,
input_specs=backbone.output_specs)
rpn_head = dense_prediction_heads.RPNHead(
min_level=min_level,
max_level=max_level,
num_anchors_per_location=num_anchors_per_location)
detection_head = instance_heads.DetectionHead(
num_classes=num_classes)
roi_generator_obj = roi_generator.MultilevelROIGenerator()
roi_sampler_obj = roi_sampler.ROISampler()
roi_aligner_obj = roi_aligner.MultilevelROIAligner()
detection_generator_obj = detection_generator.DetectionGenerator()
mask_head = deep_instance_heads.DeepMaskHead(
num_classes=num_classes, upsample_factor=2)
mask_sampler_obj = mask_sampler.MaskSampler(
mask_target_size=28, num_sampled_masks=1)
mask_roi_aligner_obj = roi_aligner.MultilevelROIAligner(crop_size=14)
model = maskrcnn_model.DeepMaskRCNNModel(
backbone,
decoder,
rpn_head,
detection_head,
roi_generator_obj,
roi_sampler_obj,
roi_aligner_obj,
detection_generator_obj,
mask_head,
mask_sampler_obj,
mask_roi_aligner_obj,
use_gt_boxes_for_masks=use_gt_boxes_for_masks)
return model, anchor_boxes
class MaskRCNNModelTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(False, False, False),
(False, True, False),
(True, False, True),
(True, False, False),
(True, True, True),
(True, True, False),
)
def test_forward(self, use_gt_boxes_for_masks, training, use_outer_boxes):
image_size = (256, 256)
images = np.random.rand(2, image_size[0], image_size[1], 3)
image_shape = np.array([[224, 100], [100, 224]])
model, anchor_boxes = construct_model_and_anchors(
image_size, use_gt_boxes_for_masks)
gt_boxes = tf.zeros((2, 16, 4), dtype=tf.float32)
gt_outer_boxes = None
if use_outer_boxes:
gt_outer_boxes = tf.zeros((2, 16, 4), dtype=tf.float32)
gt_masks = tf.zeros((2, 16, 32, 32))
gt_classes = tf.zeros((2, 16), dtype=tf.int32)
results = model(images.astype(np.uint8),
image_shape,
anchor_boxes,
gt_boxes,
gt_classes,
gt_masks,
gt_outer_boxes,
training=training)
self.assertIn('rpn_boxes', results)
self.assertIn('rpn_scores', results)
if training:
self.assertIn('class_targets', results)
self.assertIn('box_targets', results)
self.assertIn('class_outputs', results)
self.assertIn('box_outputs', results)
self.assertIn('mask_outputs', results)
self.assertEqual(results['mask_targets'].shape,
results['mask_outputs'].shape)
else:
self.assertIn('detection_boxes', results)
self.assertIn('detection_scores', results)
self.assertIn('detection_classes', results)
self.assertIn('num_detections', results)
self.assertIn('detection_masks', results)
@parameterized.parameters(
[(1, 5), (1, 10), (1, 15), (2, 5), (2, 10), (2, 15)]
)
def test_image_and_boxes(self, batch_size, num_boxes):
image_size = (640, 640)
images = np.random.rand(batch_size, image_size[0], image_size[1], 3).astype(
np.float32)
model, _ = construct_model_and_anchors(
image_size, use_gt_boxes_for_masks=True)
boxes = np.zeros((batch_size, num_boxes, 4), dtype=np.float32)
boxes[:, :, [2, 3]] = 1.0
boxes = tf.constant(boxes)
results = model.call_images_and_boxes(images, boxes)
self.assertIn('detection_masks', results)
if __name__ == '__main__':
tf.test.main()
| 5,731 | 34.825 | 99 | py |
models | models-master/official/projects/deepmac_maskrcnn/modeling/maskrcnn_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mask R-CNN model."""
from typing import List, Mapping, Optional, Union
# Import libraries
from absl import logging
import tensorflow as tf
from official.vision.modeling import maskrcnn_model
from official.vision.ops import box_ops
def resize_as(source, size):
source = tf.transpose(source, (0, 2, 3, 1))
source = tf.image.resize(source, (size, size))
return tf.transpose(source, (0, 3, 1, 2))
class DeepMaskRCNNModel(maskrcnn_model.MaskRCNNModel):
"""The Mask R-CNN model."""
def __init__(self,
backbone: tf.keras.Model,
decoder: tf.keras.Model,
rpn_head: tf.keras.layers.Layer,
detection_head: Union[tf.keras.layers.Layer,
List[tf.keras.layers.Layer]],
roi_generator: tf.keras.layers.Layer,
roi_sampler: Union[tf.keras.layers.Layer,
List[tf.keras.layers.Layer]],
roi_aligner: tf.keras.layers.Layer,
detection_generator: tf.keras.layers.Layer,
mask_head: Optional[tf.keras.layers.Layer] = None,
mask_sampler: Optional[tf.keras.layers.Layer] = None,
mask_roi_aligner: Optional[tf.keras.layers.Layer] = None,
class_agnostic_bbox_pred: bool = False,
cascade_class_ensemble: bool = False,
min_level: Optional[int] = None,
max_level: Optional[int] = None,
num_scales: Optional[int] = None,
aspect_ratios: Optional[List[float]] = None,
anchor_size: Optional[float] = None,
outer_boxes_scale: float = 1.0,
use_gt_boxes_for_masks=False,
**kwargs):
"""Initializes the Mask R-CNN model.
Args:
backbone: `tf.keras.Model`, the backbone network.
decoder: `tf.keras.Model`, the decoder network.
rpn_head: the RPN head.
detection_head: the detection head or a list of heads.
roi_generator: the ROI generator.
roi_sampler: a single ROI sampler or a list of ROI samplers for cascade
detection heads.
roi_aligner: the ROI aligner.
detection_generator: the detection generator.
mask_head: the mask head.
mask_sampler: the mask sampler.
mask_roi_aligner: the ROI alginer for mask prediction.
class_agnostic_bbox_pred: if True, perform class agnostic bounding box
prediction. Needs to be `True` for Cascade RCNN models.
cascade_class_ensemble: if True, ensemble classification scores over all
detection heads.
min_level: Minimum level in output feature maps.
max_level: Maximum level in output feature maps.
num_scales: A number representing intermediate scales added on each level.
For instances, num_scales=2 adds one additional intermediate anchor
scales [2^0, 2^0.5] on each level.
aspect_ratios: A list representing the aspect raito anchors added on each
level. The number indicates the ratio of width to height. For instances,
aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each scale level.
anchor_size: A number representing the scale of size of the base anchor to
the feature stride 2^level.
outer_boxes_scale: a float to scale up the bounding boxes to generate
more inclusive masks. The scale is expected to be >=1.0.
use_gt_boxes_for_masks: bool, if set, crop using groundtruth boxes instead
of proposals for training mask head
**kwargs: keyword arguments to be passed.
"""
super().__init__(
backbone=backbone,
decoder=decoder,
rpn_head=rpn_head,
detection_head=detection_head,
roi_generator=roi_generator,
roi_sampler=roi_sampler,
roi_aligner=roi_aligner,
detection_generator=detection_generator,
mask_head=mask_head,
mask_sampler=mask_sampler,
mask_roi_aligner=mask_roi_aligner,
class_agnostic_bbox_pred=class_agnostic_bbox_pred,
cascade_class_ensemble=cascade_class_ensemble,
min_level=min_level,
max_level=max_level,
num_scales=num_scales,
aspect_ratios=aspect_ratios,
anchor_size=anchor_size,
outer_boxes_scale=outer_boxes_scale,
**kwargs)
self._config_dict['use_gt_boxes_for_masks'] = use_gt_boxes_for_masks
def call(self,
images: tf.Tensor,
image_shape: tf.Tensor,
anchor_boxes: Optional[Mapping[str, tf.Tensor]] = None,
gt_boxes: Optional[tf.Tensor] = None,
gt_classes: Optional[tf.Tensor] = None,
gt_masks: Optional[tf.Tensor] = None,
gt_outer_boxes: Optional[tf.Tensor] = None,
training: Optional[bool] = None) -> Mapping[str, tf.Tensor]:
call_box_outputs_kwargs = {
'images': images,
'image_shape': image_shape,
'anchor_boxes': anchor_boxes,
'gt_boxes': gt_boxes,
'gt_classes': gt_classes,
'training': training
}
if self.outer_boxes_scale > 1.0:
call_box_outputs_kwargs['gt_outer_boxes'] = gt_outer_boxes
model_outputs, intermediate_outputs = self._call_box_outputs(
**call_box_outputs_kwargs)
if not self._include_mask:
return model_outputs
if self.outer_boxes_scale == 1.0:
current_rois = intermediate_outputs['current_rois']
matched_gt_boxes = intermediate_outputs['matched_gt_boxes']
mask_head_gt_boxes = gt_boxes
else:
current_rois = box_ops.compute_outer_boxes(
intermediate_outputs['current_rois'],
tf.expand_dims(image_shape, axis=1), self.outer_boxes_scale)
matched_gt_boxes = intermediate_outputs['matched_gt_outer_boxes']
mask_head_gt_boxes = gt_outer_boxes
model_mask_outputs = self._call_mask_outputs(
model_box_outputs=model_outputs,
features=model_outputs['decoder_features'],
current_rois=current_rois,
matched_gt_indices=intermediate_outputs['matched_gt_indices'],
matched_gt_boxes=matched_gt_boxes,
matched_gt_classes=intermediate_outputs['matched_gt_classes'],
gt_masks=gt_masks,
gt_classes=gt_classes,
gt_boxes=mask_head_gt_boxes,
training=training)
model_outputs.update(model_mask_outputs)
return model_outputs
def call_images_and_boxes(self, images, boxes):
"""Predict masks given an image and bounding boxes."""
_, decoder_features = self._get_backbone_and_decoder_features(images)
boxes_shape = tf.shape(boxes)
batch_size, num_boxes = boxes_shape[0], boxes_shape[1]
classes = tf.zeros((batch_size, num_boxes), dtype=tf.int32)
_, mask_probs = self._features_to_mask_outputs(
decoder_features, boxes, classes)
return {
'detection_masks': mask_probs
}
def _call_mask_outputs(
self,
model_box_outputs: Mapping[str, tf.Tensor],
features: tf.Tensor,
current_rois: tf.Tensor,
matched_gt_indices: tf.Tensor,
matched_gt_boxes: tf.Tensor,
matched_gt_classes: tf.Tensor,
gt_masks: tf.Tensor,
gt_classes: tf.Tensor,
gt_boxes: tf.Tensor,
training: Optional[bool] = None) -> Mapping[str, tf.Tensor]:
model_outputs = dict(model_box_outputs)
if training:
if self._config_dict['use_gt_boxes_for_masks']:
mask_size = (
self.mask_roi_aligner._config_dict['crop_size'] * # pylint:disable=protected-access
self.mask_head._config_dict['upsample_factor'] # pylint:disable=protected-access
)
gt_masks = resize_as(source=gt_masks, size=mask_size)
logging.info('Using GT class and mask targets.')
model_outputs.update({
'mask_class_targets': gt_classes,
'mask_targets': gt_masks,
})
else:
rois, roi_classes, roi_masks = self.mask_sampler(
current_rois, matched_gt_boxes, matched_gt_classes,
matched_gt_indices, gt_masks)
roi_masks = tf.stop_gradient(roi_masks)
model_outputs.update({
'mask_class_targets': roi_classes,
'mask_targets': roi_masks,
})
else:
if self.outer_boxes_scale == 1.0:
rois = model_outputs['detection_boxes']
else:
rois = model_outputs['detection_outer_boxes']
roi_classes = model_outputs['detection_classes']
# Mask RoI align.
if training and self._config_dict['use_gt_boxes_for_masks']:
logging.info('Using GT mask roi features.')
roi_aligner_boxes = gt_boxes
mask_head_classes = gt_classes
else:
roi_aligner_boxes = rois
mask_head_classes = roi_classes
mask_logits, mask_probs = self._features_to_mask_outputs(
features, roi_aligner_boxes, mask_head_classes)
if training:
model_outputs.update({
'mask_outputs': mask_logits,
})
else:
model_outputs.update({
'detection_masks': mask_probs,
})
return model_outputs
| 9,656 | 37.628 | 96 | py |
models | models-master/official/projects/deepmac_maskrcnn/modeling/heads/hourglass_network.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Hourglass[1] network.
[1]: https://arxiv.org/abs/1603.06937
"""
import tensorflow as tf
BATCH_NORM_EPSILON = 1e-5
BATCH_NORM_MOMENTUM = 0.1
BATCH_NORM_FUSED = True
class IdentityLayer(tf.keras.layers.Layer):
"""A layer which passes through the input as it is."""
def call(self, inputs):
return inputs
def _get_padding_for_kernel_size(kernel_size):
if kernel_size == 7:
return (3, 3)
elif kernel_size == 3:
return (1, 1)
else:
raise ValueError('Padding for kernel size {} not known.'.format(
kernel_size))
def batchnorm():
try:
return tf.keras.layers.experimental.SyncBatchNormalization(
name='batchnorm', epsilon=1e-5, momentum=0.1)
except AttributeError:
return tf.keras.layers.BatchNormalization(
name='batchnorm', epsilon=1e-5, momentum=0.1, fused=BATCH_NORM_FUSED)
class ConvolutionalBlock(tf.keras.layers.Layer):
"""Block that aggregates Convolution + Norm layer + ReLU."""
def __init__(self, kernel_size, out_channels, stride=1, relu=True,
padding='same'):
"""Initializes the Convolutional block.
Args:
kernel_size: int, convolution kernel size.
out_channels: int, the desired number of output channels.
stride: Integer, stride used in the convolution.
relu: bool, whether to use relu at the end of the layer.
padding: str, the padding scheme to use when kernel_size <= 1
"""
super(ConvolutionalBlock, self).__init__()
if kernel_size > 1:
padding = 'valid'
padding_size = _get_padding_for_kernel_size(kernel_size)
# TODO(vighneshb) Explore if removing and using padding option in conv
# layer works.
self.pad = tf.keras.layers.ZeroPadding2D(padding_size)
else:
self.pad = IdentityLayer()
self.conv = tf.keras.layers.Conv2D(
filters=out_channels, kernel_size=kernel_size, use_bias=False,
strides=stride, padding=padding)
self.norm = batchnorm()
if relu:
self.relu = tf.keras.layers.ReLU()
else:
self.relu = IdentityLayer()
def call(self, inputs):
net = self.pad(inputs)
net = self.conv(net)
net = self.norm(net)
return self.relu(net)
class SkipConvolution(ConvolutionalBlock):
"""The skip connection layer for a ResNet."""
def __init__(self, out_channels, stride):
"""Initializes the skip convolution layer.
Args:
out_channels: int, the desired number of output channels.
stride: int, the stride for the layer.
"""
super(SkipConvolution, self).__init__(
out_channels=out_channels, kernel_size=1, stride=stride, relu=False)
class ResidualBlock(tf.keras.layers.Layer):
"""A Residual block."""
def __init__(self, out_channels, skip_conv=False, kernel_size=3, stride=1,
padding='same'):
"""Initializes the Residual block.
Args:
out_channels: int, the desired number of output channels.
skip_conv: bool, whether to use a conv layer for skip connections.
kernel_size: int, convolution kernel size.
stride: Integer, stride used in the convolution.
padding: str, the type of padding to use.
"""
super(ResidualBlock, self).__init__()
self.conv_block = ConvolutionalBlock(
kernel_size=kernel_size, out_channels=out_channels, stride=stride)
self.conv = tf.keras.layers.Conv2D(
filters=out_channels, kernel_size=kernel_size, use_bias=False,
strides=1, padding=padding)
self.norm = batchnorm()
if skip_conv:
self.skip = SkipConvolution(out_channels=out_channels,
stride=stride)
else:
self.skip = IdentityLayer()
self.relu = tf.keras.layers.ReLU()
def call(self, inputs):
net = self.conv_block(inputs)
net = self.conv(net)
net = self.norm(net)
net_skip = self.skip(inputs)
return self.relu(net + net_skip)
class InputDownsampleBlock(tf.keras.layers.Layer):
"""Block for the initial feature downsampling."""
def __init__(self, out_channels_initial_conv, out_channels_residual_block):
"""Initializes the downsample block.
Args:
out_channels_initial_conv: int, the desired number of output channels
in the initial conv layer.
out_channels_residual_block: int, the desired number of output channels
in the underlying residual block.
"""
super(InputDownsampleBlock, self).__init__()
self.conv_block = ConvolutionalBlock(
kernel_size=7, out_channels=out_channels_initial_conv, stride=2,
padding='valid')
self.residual_block = ResidualBlock(
out_channels=out_channels_residual_block, stride=2, skip_conv=True)
def call(self, inputs):
return self.residual_block(self.conv_block(inputs))
class InputConvBlock(tf.keras.layers.Layer):
"""Block for the initial feature convolution.
This block is used in the hourglass network when we don't want to downsample
the input.
"""
def __init__(self, out_channels_initial_conv, out_channels_residual_block):
"""Initializes the downsample block.
Args:
out_channels_initial_conv: int, the desired number of output channels
in the initial conv layer.
out_channels_residual_block: int, the desired number of output channels
in the underlying residual block.
"""
super(InputConvBlock, self).__init__()
self.conv_block = ConvolutionalBlock(
kernel_size=3, out_channels=out_channels_initial_conv, stride=1,
padding='valid')
self.residual_block = ResidualBlock(
out_channels=out_channels_residual_block, stride=1, skip_conv=True)
def call(self, inputs):
return self.residual_block(self.conv_block(inputs))
def _make_repeated_residual_blocks(out_channels, num_blocks,
initial_stride=1, residual_channels=None,
initial_skip_conv=False):
"""Stack Residual blocks one after the other.
Args:
out_channels: int, the desired number of output channels.
num_blocks: int, the number of residual blocks to be stacked.
initial_stride: int, the stride of the initial residual block.
residual_channels: int, the desired number of output channels in the
intermediate residual blocks. If not specifed, we use out_channels.
initial_skip_conv: bool, if set, the first residual block uses a skip
convolution. This is useful when the number of channels in the input
are not the same as residual_channels.
Returns:
blocks: A list of residual blocks to be applied in sequence.
"""
blocks = []
if residual_channels is None:
residual_channels = out_channels
for i in range(num_blocks - 1):
# Only use the stride at the first block so we don't repeatedly downsample
# the input
stride = initial_stride if i == 0 else 1
# If the stide is more than 1, we cannot use an identity layer for the
# skip connection and are forced to use a conv for the skip connection.
skip_conv = stride > 1
if i == 0 and initial_skip_conv:
skip_conv = True
blocks.append(
ResidualBlock(out_channels=residual_channels, stride=stride,
skip_conv=skip_conv)
)
if num_blocks == 1:
# If there is only 1 block, the for loop above is not run,
# therefore we honor the requested stride in the last residual block
stride = initial_stride
# We are forced to use a conv in the skip connection if stride > 1
skip_conv = stride > 1
else:
stride = 1
skip_conv = residual_channels != out_channels
blocks.append(ResidualBlock(out_channels=out_channels, skip_conv=skip_conv,
stride=stride))
return blocks
def _apply_blocks(inputs, blocks):
net = inputs
for block in blocks:
net = block(net)
return net
class EncoderDecoderBlock(tf.keras.layers.Layer):
"""An encoder-decoder block which recursively defines the hourglass network."""
def __init__(self, num_stages, channel_dims, blocks_per_stage,
stagewise_downsample=True, encoder_decoder_shortcut=True):
"""Initializes the encoder-decoder block.
Args:
num_stages: int, Number of stages in the network. At each stage we have 2
encoder and 1 decoder blocks. The second encoder block downsamples the
input.
channel_dims: int list, the output channels dimensions of stages in
the network. `channel_dims[0]` is used to define the number of
channels in the first encoder block and `channel_dims[1]` is used to
define the number of channels in the second encoder block. The channels
in the recursive inner layers are defined using `channel_dims[1:]`
blocks_per_stage: int list, number of residual blocks to use at each
stage. `blocks_per_stage[0]` defines the number of blocks at the
current stage and `blocks_per_stage[1:]` is used at further stages.
stagewise_downsample: bool, whether or not to downsample before passing
inputs to the next stage.
encoder_decoder_shortcut: bool, whether or not to use shortcut
connections between encoder and decoder.
"""
super(EncoderDecoderBlock, self).__init__()
out_channels = channel_dims[0]
out_channels_downsampled = channel_dims[1]
self.encoder_decoder_shortcut = encoder_decoder_shortcut
if encoder_decoder_shortcut:
self.merge_features = tf.keras.layers.Add()
self.encoder_block1 = _make_repeated_residual_blocks(
out_channels=out_channels, num_blocks=blocks_per_stage[0],
initial_stride=1)
initial_stride = 2 if stagewise_downsample else 1
self.encoder_block2 = _make_repeated_residual_blocks(
out_channels=out_channels_downsampled,
num_blocks=blocks_per_stage[0], initial_stride=initial_stride,
initial_skip_conv=out_channels != out_channels_downsampled)
if num_stages > 1:
self.inner_block = [
EncoderDecoderBlock(num_stages - 1, channel_dims[1:],
blocks_per_stage[1:],
stagewise_downsample=stagewise_downsample,
encoder_decoder_shortcut=encoder_decoder_shortcut)
]
else:
self.inner_block = _make_repeated_residual_blocks(
out_channels=out_channels_downsampled,
num_blocks=blocks_per_stage[1])
self.decoder_block = _make_repeated_residual_blocks(
residual_channels=out_channels_downsampled,
out_channels=out_channels, num_blocks=blocks_per_stage[0])
self.upsample = tf.keras.layers.UpSampling2D(initial_stride)
def call(self, inputs):
if self.encoder_decoder_shortcut:
encoded_outputs = _apply_blocks(inputs, self.encoder_block1)
encoded_downsampled_outputs = _apply_blocks(inputs, self.encoder_block2)
inner_block_outputs = _apply_blocks(
encoded_downsampled_outputs, self.inner_block)
decoded_outputs = _apply_blocks(inner_block_outputs, self.decoder_block)
upsampled_outputs = self.upsample(decoded_outputs)
if self.encoder_decoder_shortcut:
return self.merge_features([encoded_outputs, upsampled_outputs])
else:
return upsampled_outputs
class HourglassNetwork(tf.keras.Model):
"""The hourglass network."""
def __init__(self, num_stages, input_channel_dims, channel_dims_per_stage,
blocks_per_stage, num_hourglasses, initial_downsample=True,
stagewise_downsample=True, encoder_decoder_shortcut=True):
"""Intializes the feature extractor.
Args:
num_stages: int, Number of stages in the network. At each stage we have 2
encoder and 1 decoder blocks. The second encoder block downsamples the
input.
input_channel_dims: int, the number of channels in the input conv blocks.
channel_dims_per_stage: int list, the output channel dimensions of each
stage in the hourglass network.
blocks_per_stage: int list, number of residual blocks to use at each
stage in the hourglass network
num_hourglasses: int, number of hourglas networks to stack
sequentially.
initial_downsample: bool, if set, downsamples the input by a factor of 4
before applying the rest of the network. Downsampling is done with a 7x7
convolution kernel, otherwise a 3x3 kernel is used.
stagewise_downsample: bool, whether or not to downsample before passing
inputs to the next stage.
encoder_decoder_shortcut: bool, whether or not to use shortcut
connections between encoder and decoder.
"""
super(HourglassNetwork, self).__init__()
self.num_hourglasses = num_hourglasses
self.initial_downsample = initial_downsample
if initial_downsample:
self.downsample_input = InputDownsampleBlock(
out_channels_initial_conv=input_channel_dims,
out_channels_residual_block=channel_dims_per_stage[0]
)
else:
self.conv_input = InputConvBlock(
out_channels_initial_conv=input_channel_dims,
out_channels_residual_block=channel_dims_per_stage[0]
)
self.hourglass_network = []
self.output_conv = []
for _ in range(self.num_hourglasses):
self.hourglass_network.append(
EncoderDecoderBlock(
num_stages=num_stages, channel_dims=channel_dims_per_stage,
blocks_per_stage=blocks_per_stage,
stagewise_downsample=stagewise_downsample,
encoder_decoder_shortcut=encoder_decoder_shortcut)
)
self.output_conv.append(
ConvolutionalBlock(kernel_size=3,
out_channels=channel_dims_per_stage[0])
)
self.intermediate_conv1 = []
self.intermediate_conv2 = []
self.intermediate_residual = []
for _ in range(self.num_hourglasses - 1):
self.intermediate_conv1.append(
ConvolutionalBlock(
kernel_size=1, out_channels=channel_dims_per_stage[0], relu=False)
)
self.intermediate_conv2.append(
ConvolutionalBlock(
kernel_size=1, out_channels=channel_dims_per_stage[0], relu=False)
)
self.intermediate_residual.append(
ResidualBlock(out_channels=channel_dims_per_stage[0])
)
self.intermediate_relu = tf.keras.layers.ReLU()
def call(self, inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
if self.initial_downsample:
inputs = self.downsample_input(inputs)
else:
inputs = self.conv_input(inputs)
outputs = []
for i in range(self.num_hourglasses):
hourglass_output = self.hourglass_network[i](inputs)
output = self.output_conv[i](hourglass_output)
outputs.append(output)
if i < self.num_hourglasses - 1:
secondary_output = (self.intermediate_conv1[i](inputs) +
self.intermediate_conv2[i](output))
secondary_output = self.intermediate_relu(secondary_output)
inputs = self.intermediate_residual[i](secondary_output)
return outputs
@property
def out_stride(self):
"""The stride in the output image of the network."""
return 4
@property
def num_feature_outputs(self):
"""Ther number of feature outputs returned by the feature extractor."""
return self.num_hourglasses
def _layer_depth(layer):
"""Compute depth of Conv/Residual blocks or lists of them."""
if isinstance(layer, list):
return sum([_layer_depth(l) for l in layer])
elif isinstance(layer, ConvolutionalBlock):
return 1
elif isinstance(layer, ResidualBlock):
return 2
else:
raise ValueError('Unknown layer - {}'.format(layer))
def _encoder_decoder_depth(network):
"""Helper function to compute depth of encoder-decoder blocks."""
encoder_block2_layers = _layer_depth(network.encoder_block2)
decoder_block_layers = _layer_depth(network.decoder_block)
if isinstance(network.inner_block[0], EncoderDecoderBlock):
assert len(network.inner_block) == 1, 'Inner block is expected as length 1.'
inner_block_layers = _encoder_decoder_depth(network.inner_block[0])
return inner_block_layers + encoder_block2_layers + decoder_block_layers
elif isinstance(network.inner_block[0], ResidualBlock):
return (encoder_block2_layers + decoder_block_layers +
_layer_depth(network.inner_block))
else:
raise ValueError('Unknown inner block type.')
def hourglass_depth(network):
"""Helper function to verify depth of hourglass backbone."""
input_conv_layers = 3 # 1 ResidualBlock and 1 ConvBlock
# Only intermediate_conv2 and intermediate_residual are applied before
# sending inputs to the later stages.
intermediate_layers = (
_layer_depth(network.intermediate_conv2) +
_layer_depth(network.intermediate_residual)
)
# network.output_conv is applied before sending input to the later stages
output_layers = _layer_depth(network.output_conv)
encoder_decoder_layers = sum(_encoder_decoder_depth(net) for net in
network.hourglass_network)
return (input_conv_layers + encoder_decoder_layers + intermediate_layers
+ output_layers)
def hourglass_104():
"""The Hourglass-104 backbone.
The architecture parameters are taken from [1].
Returns:
network: An HourglassNetwork object implementing the Hourglass-104
backbone.
[1]: https://arxiv.org/abs/1904.07850
"""
return HourglassNetwork(
input_channel_dims=128,
channel_dims_per_stage=[256, 256, 384, 384, 384, 512],
num_hourglasses=2,
num_stages=5,
blocks_per_stage=[2, 2, 2, 2, 2, 4],
)
def single_stage_hourglass(input_channel_dims, channel_dims_per_stage,
blocks_per_stage, initial_downsample=True,
stagewise_downsample=True,
encoder_decoder_shortcut=True):
assert len(channel_dims_per_stage) == len(blocks_per_stage)
return HourglassNetwork(
input_channel_dims=input_channel_dims,
channel_dims_per_stage=channel_dims_per_stage,
num_hourglasses=1,
num_stages=len(channel_dims_per_stage) - 1,
blocks_per_stage=blocks_per_stage,
initial_downsample=initial_downsample,
stagewise_downsample=stagewise_downsample,
encoder_decoder_shortcut=encoder_decoder_shortcut
)
def hourglass_10(num_channels, initial_downsample=True):
nc = num_channels
return single_stage_hourglass(
input_channel_dims=nc,
initial_downsample=initial_downsample,
blocks_per_stage=[1, 1],
channel_dims_per_stage=[nc * 2, nc * 2])
def hourglass_20(num_channels, initial_downsample=True):
nc = num_channels
return single_stage_hourglass(
input_channel_dims=nc,
initial_downsample=initial_downsample,
blocks_per_stage=[1, 2, 2],
channel_dims_per_stage=[nc * 2, nc * 2, nc * 3])
def hourglass_32(num_channels, initial_downsample=True):
nc = num_channels
return single_stage_hourglass(
input_channel_dims=nc,
initial_downsample=initial_downsample,
blocks_per_stage=[2, 2, 2, 2],
channel_dims_per_stage=[nc * 2, nc * 2, nc * 3, nc * 3])
def hourglass_52(num_channels, initial_downsample=True):
nc = num_channels
return single_stage_hourglass(
input_channel_dims=nc,
initial_downsample=initial_downsample,
blocks_per_stage=[2, 2, 2, 2, 2, 4],
channel_dims_per_stage=[nc * 2, nc * 2, nc * 3, nc * 3, nc * 3, nc*4])
def hourglass_100(num_channels, initial_downsample=True):
nc = num_channels
return single_stage_hourglass(
input_channel_dims=nc,
initial_downsample=initial_downsample,
blocks_per_stage=[4, 4, 4, 4, 4, 8],
channel_dims_per_stage=[nc * 2, nc * 2, nc * 3, nc * 3, nc * 3, nc*4])
def hourglass_20_uniform_size(num_channels):
nc = num_channels
return single_stage_hourglass(
input_channel_dims=nc,
blocks_per_stage=[1, 2, 2],
channel_dims_per_stage=[nc * 2, nc * 2, nc * 3],
initial_downsample=False,
stagewise_downsample=False)
def hourglass_20_no_shortcut(num_channels):
nc = num_channels
return single_stage_hourglass(
input_channel_dims=nc,
blocks_per_stage=[1, 2, 2],
channel_dims_per_stage=[nc * 2, nc * 2, nc * 3],
initial_downsample=False,
encoder_decoder_shortcut=False)
| 21,705 | 33.021944 | 100 | py |
models | models-master/official/projects/deepmac_maskrcnn/modeling/heads/instance_heads.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Instance prediction heads."""
# Import libraries
from absl import logging
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.deepmac_maskrcnn.modeling.heads import hourglass_network
class DeepMaskHead(tf.keras.layers.Layer):
"""Creates a mask head."""
def __init__(self,
num_classes,
upsample_factor=2,
num_convs=4,
num_filters=256,
use_separable_conv=False,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_regularizer=None,
bias_regularizer=None,
class_agnostic=False,
convnet_variant='default',
**kwargs):
"""Initializes a mask head.
Args:
num_classes: An `int` of the number of classes.
upsample_factor: An `int` that indicates the upsample factor to generate
the final predicted masks. It should be >= 1.
num_convs: An `int` number that represents the number of the intermediate
convolution layers before the mask prediction layers.
num_filters: An `int` number that represents the number of filters of the
intermediate convolution layers.
use_separable_conv: A `bool` that indicates whether the separable
convolution layers is used.
activation: A `str` that indicates which activation is used, e.g. 'relu',
'swish', etc.
use_sync_bn: A `bool` that indicates whether to use synchronized batch
normalization across different replicas.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
class_agnostic: A `bool`. If set, we use a single channel mask head that
is shared between all classes.
convnet_variant: A `str` denoting the architecture of network used in the
head. Supported options are 'default', 'hourglass20', 'hourglass52'
and 'hourglass100'.
**kwargs: Additional keyword arguments to be passed.
"""
super(DeepMaskHead, self).__init__(**kwargs)
self._config_dict = {
'num_classes': num_classes,
'upsample_factor': upsample_factor,
'num_convs': num_convs,
'num_filters': num_filters,
'use_separable_conv': use_separable_conv,
'activation': activation,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
'class_agnostic': class_agnostic,
'convnet_variant': convnet_variant,
}
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation = tf_utils.get_activation(activation)
def _get_conv_op_and_kwargs(self):
conv_op = (tf.keras.layers.SeparableConv2D
if self._config_dict['use_separable_conv']
else tf.keras.layers.Conv2D)
conv_kwargs = {
'filters': self._config_dict['num_filters'],
'kernel_size': 3,
'padding': 'same',
}
if self._config_dict['use_separable_conv']:
conv_kwargs.update({
'depthwise_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'pointwise_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'bias_initializer': tf.zeros_initializer(),
'depthwise_regularizer': self._config_dict['kernel_regularizer'],
'pointwise_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
})
else:
conv_kwargs.update({
'kernel_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'bias_initializer': tf.zeros_initializer(),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
})
return conv_op, conv_kwargs
def _get_bn_op_and_kwargs(self):
bn_op = (tf.keras.layers.experimental.SyncBatchNormalization
if self._config_dict['use_sync_bn']
else tf.keras.layers.BatchNormalization)
bn_kwargs = {
'axis': self._bn_axis,
'momentum': self._config_dict['norm_momentum'],
'epsilon': self._config_dict['norm_epsilon'],
}
return bn_op, bn_kwargs
def build(self, input_shape):
"""Creates the variables of the head."""
conv_op, conv_kwargs = self._get_conv_op_and_kwargs()
self._build_convnet_variant()
self._deconv = tf.keras.layers.Conv2DTranspose(
filters=self._config_dict['num_filters'],
kernel_size=self._config_dict['upsample_factor'],
strides=self._config_dict['upsample_factor'],
padding='valid',
kernel_initializer=tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'],
name='mask-upsampling')
bn_op, bn_kwargs = self._get_bn_op_and_kwargs()
self._deconv_bn = bn_op(name='mask-deconv-bn', **bn_kwargs)
if self._config_dict['class_agnostic']:
num_filters = 1
else:
num_filters = self._config_dict['num_classes']
conv_kwargs = {
'filters': num_filters,
'kernel_size': 1,
'padding': 'valid',
}
if self._config_dict['use_separable_conv']:
conv_kwargs.update({
'depthwise_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'pointwise_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'bias_initializer': tf.zeros_initializer(),
'depthwise_regularizer': self._config_dict['kernel_regularizer'],
'pointwise_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
})
else:
conv_kwargs.update({
'kernel_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'bias_initializer': tf.zeros_initializer(),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
})
self._mask_regressor = conv_op(name='mask-logits', **conv_kwargs)
super(DeepMaskHead, self).build(input_shape)
def call(self, inputs, training=None):
"""Forward pass of mask branch for the Mask-RCNN model.
Args:
inputs: A `list` of two tensors where
inputs[0]: A `tf.Tensor` of shape [batch_size, num_instances,
roi_height, roi_width, roi_channels], representing the ROI features.
inputs[1]: A `tf.Tensor` of shape [batch_size, num_instances],
representing the classes of the ROIs.
training: A `bool` indicating whether it is in `training` mode.
Returns:
mask_outputs: A `tf.Tensor` of shape
[batch_size, num_instances, roi_height * upsample_factor,
roi_width * upsample_factor], representing the mask predictions.
"""
roi_features, roi_classes = inputs
features_shape = tf.shape(roi_features)
num_rois, height, width, filters = (
features_shape[1],
features_shape[2],
features_shape[3],
features_shape[4],
)
x = tf.reshape(roi_features, [-1, height, width, filters])
x = self._call_convnet_variant(x)
x = self._deconv(x)
x = self._deconv_bn(x)
x = self._activation(x)
logits = self._mask_regressor(x)
mask_height = height * self._config_dict['upsample_factor']
mask_width = width * self._config_dict['upsample_factor']
if self._config_dict['class_agnostic']:
return tf.reshape(logits, [-1, num_rois, mask_height, mask_width])
else:
logits = tf.reshape(
logits,
[-1, num_rois, mask_height, mask_width,
self._config_dict['num_classes']])
return tf.gather(
logits, tf.cast(roi_classes, dtype=tf.int32), axis=-1, batch_dims=2
)
def _build_convnet_variant(self):
variant = self._config_dict['convnet_variant']
if variant == 'default':
bn_op, bn_kwargs = self._get_bn_op_and_kwargs()
self._convs = []
self._conv_norms = []
for i in range(self._config_dict['num_convs']):
conv_name = 'mask-conv_{}'.format(i)
conv_op, conv_kwargs = self._get_conv_op_and_kwargs()
self._convs.append(conv_op(name=conv_name, **conv_kwargs))
bn_name = 'mask-conv-bn_{}'.format(i)
self._conv_norms.append(bn_op(name=bn_name, **bn_kwargs))
elif variant == 'hourglass20':
logging.info('Using hourglass 20 network.')
self._hourglass = hourglass_network.hourglass_20(
self._config_dict['num_filters'], initial_downsample=False)
elif variant == 'hourglass52':
logging.info('Using hourglass 52 network.')
self._hourglass = hourglass_network.hourglass_52(
self._config_dict['num_filters'], initial_downsample=False)
elif variant == 'hourglass100':
logging.info('Using hourglass 100 network.')
self._hourglass = hourglass_network.hourglass_100(
self._config_dict['num_filters'], initial_downsample=False)
else:
raise ValueError('Unknown ConvNet variant - {}'.format(variant))
def _call_convnet_variant(self, x):
variant = self._config_dict['convnet_variant']
if variant == 'default':
for conv, bn in zip(self._convs, self._conv_norms):
x = conv(x)
x = bn(x)
x = self._activation(x)
return x
elif variant == 'hourglass20':
return self._hourglass(x)[-1]
elif variant == 'hourglass52':
return self._hourglass(x)[-1]
elif variant == 'hourglass100':
return self._hourglass(x)[-1]
else:
raise ValueError('Unknown ConvNet variant - {}'.format(variant))
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
| 11,466 | 37.351171 | 80 | py |
models | models-master/official/projects/deepmac_maskrcnn/tasks/deep_mask_head_rcnn.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mask R-CNN variant with support for deep mask heads."""
import tensorflow as tf
from official.core import task_factory
from official.projects.deepmac_maskrcnn.configs import deep_mask_head_rcnn as deep_mask_head_rcnn_config
from official.projects.deepmac_maskrcnn.modeling import maskrcnn_model as deep_maskrcnn_model
from official.projects.deepmac_maskrcnn.modeling.heads import instance_heads as deep_instance_heads
from official.vision.modeling import backbones
from official.vision.modeling.decoders import factory as decoder_factory
from official.vision.modeling.heads import dense_prediction_heads
from official.vision.modeling.heads import instance_heads
from official.vision.modeling.layers import detection_generator
from official.vision.modeling.layers import mask_sampler
from official.vision.modeling.layers import roi_aligner
from official.vision.modeling.layers import roi_generator
from official.vision.modeling.layers import roi_sampler
from official.vision.tasks import maskrcnn
# Taken from modeling/factory.py
def build_maskrcnn(input_specs: tf.keras.layers.InputSpec,
model_config: deep_mask_head_rcnn_config.DeepMaskHeadRCNN,
l2_regularizer: tf.keras.regularizers.Regularizer = None): # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds Mask R-CNN model."""
norm_activation_config = model_config.norm_activation
backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=model_config.backbone,
norm_activation_config=norm_activation_config,
l2_regularizer=l2_regularizer)
decoder = decoder_factory.build_decoder(
input_specs=backbone.output_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
rpn_head_config = model_config.rpn_head
roi_generator_config = model_config.roi_generator
roi_sampler_config = model_config.roi_sampler
roi_aligner_config = model_config.roi_aligner
detection_head_config = model_config.detection_head
generator_config = model_config.detection_generator
num_anchors_per_location = (
len(model_config.anchor.aspect_ratios) * model_config.anchor.num_scales)
rpn_head = dense_prediction_heads.RPNHead(
min_level=model_config.min_level,
max_level=model_config.max_level,
num_anchors_per_location=num_anchors_per_location,
num_convs=rpn_head_config.num_convs,
num_filters=rpn_head_config.num_filters,
use_separable_conv=rpn_head_config.use_separable_conv,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
detection_head = instance_heads.DetectionHead(
num_classes=model_config.num_classes,
num_convs=detection_head_config.num_convs,
num_filters=detection_head_config.num_filters,
use_separable_conv=detection_head_config.use_separable_conv,
num_fcs=detection_head_config.num_fcs,
fc_dims=detection_head_config.fc_dims,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
roi_generator_obj = roi_generator.MultilevelROIGenerator(
pre_nms_top_k=roi_generator_config.pre_nms_top_k,
pre_nms_score_threshold=roi_generator_config.pre_nms_score_threshold,
pre_nms_min_size_threshold=(
roi_generator_config.pre_nms_min_size_threshold),
nms_iou_threshold=roi_generator_config.nms_iou_threshold,
num_proposals=roi_generator_config.num_proposals,
test_pre_nms_top_k=roi_generator_config.test_pre_nms_top_k,
test_pre_nms_score_threshold=(
roi_generator_config.test_pre_nms_score_threshold),
test_pre_nms_min_size_threshold=(
roi_generator_config.test_pre_nms_min_size_threshold),
test_nms_iou_threshold=roi_generator_config.test_nms_iou_threshold,
test_num_proposals=roi_generator_config.test_num_proposals,
use_batched_nms=roi_generator_config.use_batched_nms)
roi_sampler_obj = roi_sampler.ROISampler(
mix_gt_boxes=roi_sampler_config.mix_gt_boxes,
num_sampled_rois=roi_sampler_config.num_sampled_rois,
foreground_fraction=roi_sampler_config.foreground_fraction,
foreground_iou_threshold=roi_sampler_config.foreground_iou_threshold,
background_iou_high_threshold=(
roi_sampler_config.background_iou_high_threshold),
background_iou_low_threshold=(
roi_sampler_config.background_iou_low_threshold))
roi_aligner_obj = roi_aligner.MultilevelROIAligner(
crop_size=roi_aligner_config.crop_size,
sample_offset=roi_aligner_config.sample_offset)
detection_generator_obj = detection_generator.DetectionGenerator(
apply_nms=True,
pre_nms_top_k=generator_config.pre_nms_top_k,
pre_nms_score_threshold=generator_config.pre_nms_score_threshold,
nms_iou_threshold=generator_config.nms_iou_threshold,
max_num_detections=generator_config.max_num_detections,
nms_version=generator_config.nms_version,
use_sigmoid_probability=generator_config.use_sigmoid_probability)
if model_config.include_mask:
mask_head = deep_instance_heads.DeepMaskHead(
num_classes=model_config.num_classes,
upsample_factor=model_config.mask_head.upsample_factor,
num_convs=model_config.mask_head.num_convs,
num_filters=model_config.mask_head.num_filters,
use_separable_conv=model_config.mask_head.use_separable_conv,
activation=model_config.norm_activation.activation,
norm_momentum=model_config.norm_activation.norm_momentum,
norm_epsilon=model_config.norm_activation.norm_epsilon,
kernel_regularizer=l2_regularizer,
class_agnostic=model_config.mask_head.class_agnostic,
convnet_variant=model_config.mask_head.convnet_variant)
mask_sampler_obj = mask_sampler.MaskSampler(
mask_target_size=(
model_config.mask_roi_aligner.crop_size *
model_config.mask_head.upsample_factor),
num_sampled_masks=model_config.mask_sampler.num_sampled_masks)
mask_roi_aligner_obj = roi_aligner.MultilevelROIAligner(
crop_size=model_config.mask_roi_aligner.crop_size,
sample_offset=model_config.mask_roi_aligner.sample_offset)
else:
mask_head = None
mask_sampler_obj = None
mask_roi_aligner_obj = None
model = deep_maskrcnn_model.DeepMaskRCNNModel(
backbone=backbone,
decoder=decoder,
rpn_head=rpn_head,
detection_head=detection_head,
roi_generator=roi_generator_obj,
roi_sampler=roi_sampler_obj,
roi_aligner=roi_aligner_obj,
detection_generator=detection_generator_obj,
mask_head=mask_head,
mask_sampler=mask_sampler_obj,
mask_roi_aligner=mask_roi_aligner_obj,
class_agnostic_bbox_pred=detection_head_config.class_agnostic_bbox_pred,
cascade_class_ensemble=detection_head_config.cascade_class_ensemble,
min_level=model_config.min_level,
max_level=model_config.max_level,
num_scales=model_config.anchor.num_scales,
aspect_ratios=model_config.anchor.aspect_ratios,
anchor_size=model_config.anchor.anchor_size,
outer_boxes_scale=model_config.outer_boxes_scale,
use_gt_boxes_for_masks=model_config.use_gt_boxes_for_masks)
return model
@task_factory.register_task_cls(deep_mask_head_rcnn_config.DeepMaskHeadRCNNTask)
class DeepMaskHeadRCNNTask(maskrcnn.MaskRCNNTask):
"""Mask R-CNN with support for deep mask heads."""
def build_model(self):
"""Builds Mask R-CNN model."""
input_specs = tf.keras.layers.InputSpec(
shape=[None] + self.task_config.model.input_size)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (tf.keras.regularizers.l2(
l2_weight_decay / 2.0) if l2_weight_decay else None)
model = build_maskrcnn(
input_specs=input_specs,
model_config=self.task_config.model,
l2_regularizer=l2_regularizer)
if self.task_config.freeze_backbone:
model.backbone.trainable = False
# Builds the model through warm-up call.
dummy_images = tf.keras.Input(self.task_config.model.input_size)
dummy_image_shape = tf.keras.layers.Input([2])
_ = model(dummy_images, image_shape=dummy_image_shape, training=False)
return model
| 9,426 | 44.105263 | 136 | py |
models | models-master/official/projects/unified_detector/run_inference.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""A binary to run unified detector."""
import json
import os
from typing import Any, Dict, Sequence, Union
from absl import app
from absl import flags
from absl import logging
import cv2
import gin
import numpy as np
import tensorflow as tf
import tqdm
from official.projects.unified_detector import external_configurables # pylint: disable=unused-import
from official.projects.unified_detector.modeling import universal_detector
from official.projects.unified_detector.utils import utilities
# group two lines into a paragraph if affinity score higher than this
_PARA_GROUP_THR = 0.5
# MODEL spec
_GIN_FILE = flags.DEFINE_string(
'gin_file', None, 'Path to the Gin file that defines the model.')
_CKPT_PATH = flags.DEFINE_string(
'ckpt_path', None, 'Path to the checkpoint directory.')
_IMG_SIZE = flags.DEFINE_integer(
'img_size', 1024, 'Size of the image fed to the model.')
# Input & Output
# Note that, all images specified by `img_file` and `img_dir` will be processed.
_IMG_FILE = flags.DEFINE_multi_string('img_file', [], 'Paths to the images.')
_IMG_DIR = flags.DEFINE_multi_string(
'img_dir', [], 'Paths to the image directories.')
_OUTPUT_PATH = flags.DEFINE_string('output_path', None, 'Path for the output.')
_VIS_DIR = flags.DEFINE_string(
'vis_dir', None, 'Path for the visualization output.')
def _preprocess(raw_image: np.ndarray) -> Union[np.ndarray, float]:
"""Convert a raw image to properly resized, padded, and normalized ndarray."""
# (1) convert to tf.Tensor and float32.
img_tensor = tf.convert_to_tensor(raw_image, dtype=tf.float32)
# (2) pad to square.
height, width = img_tensor.shape[:2]
maximum_side = tf.maximum(height, width)
height_pad = maximum_side - height
width_pad = maximum_side - width
img_tensor = tf.pad(
img_tensor, [[0, height_pad], [0, width_pad], [0, 0]],
constant_values=127)
ratio = maximum_side / _IMG_SIZE.value
# (3) resize long side to the maximum length.
img_tensor = tf.image.resize(
img_tensor, (_IMG_SIZE.value, _IMG_SIZE.value))
img_tensor = tf.cast(img_tensor, tf.uint8)
# (4) normalize
img_tensor = utilities.normalize_image_to_range(img_tensor)
# (5) Add batch dimension and return as numpy array.
return tf.expand_dims(img_tensor, 0).numpy(), float(ratio)
def load_model() -> tf.keras.layers.Layer:
gin.parse_config_file(_GIN_FILE.value)
model = universal_detector.UniversalDetector()
ckpt = tf.train.Checkpoint(model=model)
ckpt_path = _CKPT_PATH.value
logging.info('Load ckpt from: %s', ckpt_path)
ckpt.restore(ckpt_path).expect_partial()
return model
def inference(img_file: str, model: tf.keras.layers.Layer) -> Dict[str, Any]:
"""Inference step."""
img = cv2.cvtColor(cv2.imread(img_file), cv2.COLOR_BGR2RGB)
img_ndarray, ratio = _preprocess(img)
output_dict = model.serve(img_ndarray)
class_tensor = output_dict['classes'].numpy()
mask_tensor = output_dict['masks'].numpy()
group_tensor = output_dict['groups'].numpy()
indices = np.where(class_tensor[0])[0].tolist() # indices of positive slots.
mask_list = [
mask_tensor[0, :, :, index] for index in indices] # List of mask ndarray.
# Form lines and words
lines = []
line_indices = []
for index, mask in tqdm.tqdm(zip(indices, mask_list)):
line = {
'words': [],
'text': '',
}
contours, _ = cv2.findContours(
(mask > 0.).astype(np.uint8),
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)[-2:]
for contour in contours:
if (isinstance(contour, np.ndarray) and
len(contour.shape) == 3 and
contour.shape[0] > 2 and
contour.shape[1] == 1 and
contour.shape[2] == 2):
cnt_list = (contour[:, 0] * ratio).astype(np.int32).tolist()
line['words'].append({'text': '', 'vertices': cnt_list})
else:
logging.error('Invalid contour: %s, discarded', str(contour))
if line['words']:
lines.append(line)
line_indices.append(index)
# Form paragraphs
line_grouping = utilities.DisjointSet(len(line_indices))
affinity = group_tensor[0][line_indices][:, line_indices]
for i1, i2 in zip(*np.where(affinity > _PARA_GROUP_THR)):
line_grouping.union(i1, i2)
line_groups = line_grouping.to_group()
paragraphs = []
for line_group in line_groups:
paragraph = {'lines': []}
for id_ in line_group:
paragraph['lines'].append(lines[id_])
if paragraph:
paragraphs.append(paragraph)
return paragraphs
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
# Get list of images
img_lists = []
img_lists.extend(_IMG_FILE.value)
for img_dir in _IMG_DIR.value:
img_lists.extend(tf.io.gfile.glob(os.path.join(img_dir, '*')))
logging.info('Total number of input images: %d', len(img_lists))
model = load_model()
vis_dis = _VIS_DIR.value
output = {'annotations': []}
for img_file in tqdm.tqdm(img_lists):
output['annotations'].append({
'image_id': img_file.split('/')[-1].split('.')[0],
'paragraphs': inference(img_file, model),
})
if vis_dis:
key = output['annotations'][-1]['image_id']
paragraphs = output['annotations'][-1]['paragraphs']
img = cv2.cvtColor(cv2.imread(img_file), cv2.COLOR_BGR2RGB)
word_bnds = []
line_bnds = []
para_bnds = []
for paragraph in paragraphs:
paragraph_points_list = []
for line in paragraph['lines']:
line_points_list = []
for word in line['words']:
word_bnds.append(
np.array(word['vertices'], np.int32).reshape((-1, 1, 2)))
line_points_list.extend(word['vertices'])
paragraph_points_list.extend(line_points_list)
line_points = np.array(line_points_list, np.int32) # (N,2)
left = int(np.min(line_points[:, 0]))
top = int(np.min(line_points[:, 1]))
right = int(np.max(line_points[:, 0]))
bottom = int(np.max(line_points[:, 1]))
line_bnds.append(
np.array([[[left, top]], [[right, top]], [[right, bottom]],
[[left, bottom]]], np.int32))
para_points = np.array(paragraph_points_list, np.int32) # (N,2)
left = int(np.min(para_points[:, 0]))
top = int(np.min(para_points[:, 1]))
right = int(np.max(para_points[:, 0]))
bottom = int(np.max(para_points[:, 1]))
para_bnds.append(
np.array([[[left, top]], [[right, top]], [[right, bottom]],
[[left, bottom]]], np.int32))
for name, bnds in zip(['paragraph', 'line', 'word'],
[para_bnds, line_bnds, word_bnds]):
vis = cv2.polylines(img, bnds, True, (0, 0, 255), 2)
cv2.imwrite(os.path.join(vis_dis, f'{key}-{name}.jpg'),
cv2.cvtColor(vis, cv2.COLOR_RGB2BGR))
with tf.io.gfile.GFile(_OUTPUT_PATH.value, mode='w') as f:
f.write(json.dumps(output, ensure_ascii=False, indent=2))
if __name__ == '__main__':
flags.mark_flags_as_required(['gin_file', 'ckpt_path', 'output_path'])
app.run(main)
| 7,791 | 33.941704 | 102 | py |
models | models-master/official/projects/unified_detector/external_configurables.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrap external code in gin."""
import gin
import gin.tf.external_configurables
import tensorflow as tf
# Tensorflow.
gin.external_configurable(tf.keras.layers.experimental.SyncBatchNormalization)
| 809 | 34.217391 | 78 | py |
models | models-master/official/projects/unified_detector/data_loaders/autoaugment.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoAugment and RandAugment policies for enhanced image preprocessing.
AutoAugment Reference: https://arxiv.org/abs/1805.09501
RandAugment Reference: https://arxiv.org/abs/1909.13719
This library is adapted from:
`https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py`.
Several changes are made. They are inspired by the TIMM library:
https://github.com/rwightman/pytorch-image-models/tree/master/timm/data
Changes include:
(1) Random Erasing / Cutout is added, and separated from the random augmentation
pool (not sampled as an operation).
(2) For `posterize` and `solarize`, the arguments are changed such that the
level of corruption increases as the `magnitude` argument increases.
(3) `color`, `contrast`, `brightness`, `sharpness` are randomly enhanced or
diminished.
(4) Magnitude is randomly sampled from a normal distribution.
(5) Operations are applied with a probability.
"""
import inspect
import math
import tensorflow as tf
import tensorflow_addons.image as tfa_image
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
def policy_v0():
"""Autoaugment policy that was used in AutoAugment Paper."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('Equalize', 0.8, 1), ('ShearY', 0.8, 4)],
[('Color', 0.4, 9), ('Equalize', 0.6, 3)],
[('Color', 0.4, 1), ('Rotate', 0.6, 8)],
[('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
[('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
[('Color', 0.2, 0), ('Equalize', 0.8, 8)],
[('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
[('ShearX', 0.2, 9), ('Rotate', 0.6, 8)],
[('Color', 0.6, 1), ('Equalize', 1.0, 2)],
[('Invert', 0.4, 9), ('Rotate', 0.6, 0)],
[('Equalize', 1.0, 9), ('ShearY', 0.6, 3)],
[('Color', 0.4, 7), ('Equalize', 0.6, 0)],
[('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],
[('Solarize', 0.6, 8), ('Color', 0.6, 9)],
[('Solarize', 0.2, 4), ('Rotate', 0.8, 9)],
[('Rotate', 1.0, 7), ('TranslateY', 0.8, 9)],
[('ShearX', 0.0, 0), ('Solarize', 0.8, 4)],
[('ShearY', 0.8, 0), ('Color', 0.6, 4)],
[('Color', 1.0, 0), ('Rotate', 0.6, 2)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
[('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
[('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)],
[('Posterize', 0.8, 2), ('Solarize', 0.6, 10)],
[('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
[('Color', 0.8, 6), ('Rotate', 0.4, 5)],
]
return policy
def policy_vtest():
"""Autoaugment test policy for debugging."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX', 1.0, 4), ('Equalize', 1.0, 10)],
]
return policy
# pylint: disable=g-long-lambda
blend = tf.function(lambda i1, i2, factor: tf.cast(
tfa_image.blend(tf.cast(i1, tf.float32), tf.cast(i2, tf.float32), factor),
tf.uint8))
# pylint: enable=g-long-lambda
def random_erase(image,
prob,
min_area=0.02,
max_area=1 / 3,
min_aspect=1 / 3,
max_aspect=10 / 3,
mode='pixel'):
"""The random erasing augmentations: https://arxiv.org/pdf/1708.04896.pdf.
This augmentation is applied after image normalization.
Args:
image: Input image after all other augmentation and normalization. It has
type tf.float32.
prob: Probability of applying the random erasing operation.
min_area: As named.
max_area: As named.
min_aspect: As named.
max_aspect: As named.
mode: How the erased area is filled. 'pixel' means white noise (uniform
dist).
Returns:
Randomly erased image.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
image_area = tf.cast(image_width * image_height, tf.float32)
# Sample width, height
erase_area = tf.random.uniform([], min_area, max_area) * image_area
log_max_target_ar = tf.math.log(
tf.minimum(
tf.math.divide(
tf.math.square(tf.cast(image_width, tf.float32)), erase_area),
max_aspect))
log_min_target_ar = tf.math.log(
tf.maximum(
tf.math.divide(erase_area,
tf.math.square(tf.cast(image_height, tf.float32))),
min_aspect))
erase_aspect_ratio = tf.math.exp(
tf.random.uniform([], log_min_target_ar, log_max_target_ar))
erase_h = tf.cast(tf.math.sqrt(erase_area / erase_aspect_ratio), tf.int32)
erase_w = tf.cast(tf.math.sqrt(erase_area * erase_aspect_ratio), tf.int32)
# Sample (left, top) of the rectangle to erase
erase_left = tf.random.uniform(
shape=[], minval=0, maxval=image_width - erase_w, dtype=tf.int32)
erase_top = tf.random.uniform(
shape=[], minval=0, maxval=image_height - erase_h, dtype=tf.int32)
pad_right = image_width - erase_w - erase_left
pad_bottom = image_height - erase_h - erase_top
mask = tf.pad(
tf.zeros([erase_h, erase_w], dtype=image.dtype),
[[erase_top, pad_bottom], [erase_left, pad_right]],
constant_values=1)
mask = tf.expand_dims(mask, -1) # [H, W, 1]
if mode == 'pixel':
fill = tf.random.truncated_normal(
tf.shape(image), 0.0, 1.0, dtype=image.dtype)
else:
fill = tf.zeros(tf.shape(image), dtype=image.dtype)
should_apply_op = tf.cast(
tf.floor(tf.random.uniform([], dtype=tf.float32) + prob), tf.bool)
augmented_image = tf.cond(should_apply_op,
lambda: mask * image + (1 - mask) * fill,
lambda: image)
return augmented_image
def solarize(image, threshold=128):
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
return tf.where(image < threshold, image, 255 - image)
def solarize_add(image, addition=0, threshold=128):
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128.
added_image = tf.cast(image, tf.int64) + addition
added_image = tf.cast(tf.clip_by_value(added_image, 0, 255), tf.uint8)
return tf.where(image < threshold, added_image, image)
def color(image, factor):
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image, factor):
"""Equivalent of PIL Contrast."""
degenerate = tf.image.rgb_to_grayscale(image)
# Cast before calling tf.histogram.
degenerate = tf.cast(degenerate, tf.int32)
# Compute the grayscale histogram, then compute the mean pixel value,
# and create a constant image size of that value. Use that as the
# blending degenerate target of the original image.
hist = tf.histogram_fixed_width(degenerate, [0, 255], nbins=256)
mean = tf.reduce_sum(tf.cast(hist, tf.float32)) / 256.0
degenerate = tf.ones_like(degenerate, dtype=tf.float32) * mean
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.image.grayscale_to_rgb(tf.cast(degenerate, tf.uint8))
return blend(degenerate, image, factor)
def brightness(image, factor):
"""Equivalent of PIL Brightness."""
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image, bits):
"""Equivalent of PIL Posterize. Smaller `bits` means larger degradation."""
shift = 8 - bits
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def rotate(image, degrees, replace):
"""Rotates the image by degrees either clockwise or counterclockwise.
Args:
image: An image Tensor of type uint8.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels caused by the
rotate operation.
Returns:
The rotated version of image.
"""
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
if isinstance(replace, list) or isinstance(replace, tuple):
replace = replace[0]
image = tfa_image.rotate(image, radians, fill_value=replace)
return image
def translate_x(image, pixels, replace):
"""Equivalent of PIL Translate in X dimension."""
return tfa_image.translate_xy(image, [-pixels, 0], replace)
def translate_y(image, pixels, replace):
"""Equivalent of PIL Translate in Y dimension."""
return tfa_image.translate_xy(image, [0, -pixels], replace)
def autocontrast(image):
"""Implements Autocontrast function from PIL using TF ops.
Args:
image: A 3D uint8 tensor.
Returns:
The image after it has had autocontrast applied to it and will be of type
uint8.
"""
def scale_channel(image):
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.cast(tf.reduce_min(image), tf.float32)
hi = tf.cast(tf.reduce_max(image), tf.float32)
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.cast(im, tf.float32) * scale + offset
im = tf.clip_by_value(im, 0.0, 255.0)
return tf.cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
def sharpness(image, factor):
"""Implements Sharpness function from PIL using TF ops."""
orig_image = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation.
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel.
kernel = tf.constant([[1, 1, 1], [1, 5, 1], [1, 1, 1]],
dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension.
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
with tf.device('/cpu:0'):
# Some augmentation that uses depth-wise conv will cause crashing when
# training on GPU. See (b/156242594) for details.
degenerate = tf.nn.depthwise_conv2d(image, kernel, strides, padding='VALID')
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.squeeze(tf.cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)
# Blend the final result.
return blend(result, orig_image, factor)
def equalize(image):
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(
tf.equal(step, 0), lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def invert(image):
"""Inverts the image pixels."""
image = tf.convert_to_tensor(image)
return 255 - image
NAME_TO_FUNC = {
'AutoContrast': autocontrast,
'Equalize': equalize,
'Invert': invert,
'Rotate': rotate,
'Posterize': posterize,
'PosterizeIncreasing': posterize,
'Solarize': solarize,
'SolarizeIncreasing': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'ColorIncreasing': color,
'Contrast': contrast,
'ContrastIncreasing': contrast,
'Brightness': brightness,
'BrightnessIncreasing': brightness,
'Sharpness': sharpness,
'SharpnessIncreasing': sharpness,
'ShearX': tfa_image.shear_x,
'ShearY': tfa_image.shear_y,
'TranslateX': translate_x,
'TranslateY': translate_y,
'Cutout': tfa_image.random_cutout,
'Hue': tf.image.adjust_hue,
}
def _randomly_negate_tensor(tensor):
"""With 50% prob turn the tensor negative."""
should_flip = tf.cast(tf.floor(tf.random.uniform([]) + 0.5), tf.bool)
final_tensor = tf.cond(should_flip, lambda: -tensor, lambda: tensor)
return final_tensor
def _rotate_level_to_arg(level):
level = (level / _MAX_LEVEL) * 30.
level = _randomly_negate_tensor(level)
return (level,)
def _shrink_level_to_arg(level):
"""Converts level to ratio by which we shrink the image content."""
if level == 0:
return (1.0,) # if level is zero, do not shrink the image
# Maximum shrinking ratio is 2.9.
level = 2. / (_MAX_LEVEL / level) + 0.9
return (level,)
def _enhance_level_to_arg(level):
return ((level / _MAX_LEVEL) * 1.8 + 0.1,)
def _enhance_increasing_level_to_arg(level):
level = (level / _MAX_LEVEL) * .9
level = 1.0 + _randomly_negate_tensor(level)
return (level,)
def _shear_level_to_arg(level):
level = (level / _MAX_LEVEL) * 0.3
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _translate_level_to_arg(level, translate_const):
level = level / _MAX_LEVEL * translate_const
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _posterize_level_to_arg(level):
return (tf.cast(level / _MAX_LEVEL * 4, tf.uint8),)
def _posterize_increase_level_to_arg(level):
return (4 - _posterize_level_to_arg(level)[0],)
def _solarize_level_to_arg(level):
return (tf.cast(level / _MAX_LEVEL * 256, tf.uint8),)
def _solarize_increase_level_to_arg(level):
return (256 - _solarize_level_to_arg(level)[0],)
def _solarize_add_level_to_arg(level):
return (tf.cast(level / _MAX_LEVEL * 110, tf.int64),)
def _cutout_arg(level, cutout_size):
pad_size = tf.cast(level / _MAX_LEVEL * cutout_size, tf.int32)
return (2 * pad_size, 2 * pad_size)
def level_to_arg(hparams):
return {
'AutoContrast':
lambda level: (),
'Equalize':
lambda level: (),
'Invert':
lambda level: (),
'Rotate':
_rotate_level_to_arg,
'Posterize':
_posterize_level_to_arg,
'PosterizeIncreasing':
_posterize_increase_level_to_arg,
'Solarize':
_solarize_level_to_arg,
'SolarizeIncreasing':
_solarize_increase_level_to_arg,
'SolarizeAdd':
_solarize_add_level_to_arg,
'Color':
_enhance_level_to_arg,
'ColorIncreasing':
_enhance_increasing_level_to_arg,
'Contrast':
_enhance_level_to_arg,
'ContrastIncreasing':
_enhance_increasing_level_to_arg,
'Brightness':
_enhance_level_to_arg,
'BrightnessIncreasing':
_enhance_increasing_level_to_arg,
'Sharpness':
_enhance_level_to_arg,
'SharpnessIncreasing':
_enhance_increasing_level_to_arg,
'ShearX':
_shear_level_to_arg,
'ShearY':
_shear_level_to_arg,
# pylint:disable=g-long-lambda
'Cutout':
lambda level: _cutout_arg(level, hparams['cutout_const']),
# pylint:disable=g-long-lambda
'TranslateX':
lambda level: _translate_level_to_arg(level, hparams['translate_const'
]),
'TranslateY':
lambda level: _translate_level_to_arg(level, hparams['translate_const'
]),
'Hue':
lambda level: ((level / _MAX_LEVEL) * 0.25,),
# pylint:enable=g-long-lambda
}
def _parse_policy_info(name, prob, level, replace_value, augmentation_hparams):
"""Return the function that corresponds to `name` and update `level` param."""
func = NAME_TO_FUNC[name]
args = level_to_arg(augmentation_hparams)[name](level)
# Add in replace arg if it is required for the function that is being called.
# pytype:disable=wrong-arg-types
if 'replace' in inspect.signature(func).parameters.keys(): # pylint: disable=deprecated-method
args = tuple(list(args) + [replace_value])
# pytype:enable=wrong-arg-types
return (func, prob, args)
def _apply_func_with_prob(func, image, args, prob):
"""Apply `func` to image w/ `args` as input with probability `prob`."""
assert isinstance(args, tuple)
# Apply the function with probability `prob`.
should_apply_op = tf.cast(
tf.floor(tf.random.uniform([], dtype=tf.float32) + prob), tf.bool)
augmented_image = tf.cond(should_apply_op, lambda: func(image, *args),
lambda: image)
return augmented_image
def select_and_apply_random_policy(policies, image):
"""Select a random policy from `policies` and apply it to `image`."""
policy_to_select = tf.random.uniform([], maxval=len(policies), dtype=tf.int32)
# Note that using tf.case instead of tf.conds would result in significantly
# larger graphs and would even break export for some larger policies.
for (i, policy) in enumerate(policies):
image = tf.cond(
tf.equal(i, policy_to_select),
lambda selected_policy=policy: selected_policy(image),
lambda: image)
return image
def build_and_apply_nas_policy(policies, image, augmentation_hparams):
"""Build a policy from the given policies passed in and apply to image.
Args:
policies: list of lists of tuples in the form `(func, prob, level)`, `func`
is a string name of the augmentation function, `prob` is the probability
of applying the `func` operation, `level` is the input argument for
`func`.
image: tf.Tensor that the resulting policy will be applied to.
augmentation_hparams: Hparams associated with the NAS learned policy.
Returns:
A version of image that now has data augmentation applied to it based on
the `policies` pass into the function.
"""
replace_value = [128, 128, 128]
# func is the string name of the augmentation function, prob is the
# probability of applying the operation and level is the parameter associated
# with the tf op.
# tf_policies are functions that take in an image and return an augmented
# image.
tf_policies = []
for policy in policies:
tf_policy = []
# Link string name to the correct python function and make sure the correct
# argument is passed into that function.
for policy_info in policy:
policy_info = list(policy_info) + [replace_value, augmentation_hparams]
tf_policy.append(_parse_policy_info(*policy_info))
# Now build the tf policy that will apply the augmentation procedue
# on image.
def make_final_policy(tf_policy_):
def final_policy(image_):
for func, prob, args in tf_policy_:
image_ = _apply_func_with_prob(func, image_, args, prob)
return image_
return final_policy
tf_policies.append(make_final_policy(tf_policy))
augmented_image = select_and_apply_random_policy(tf_policies, image)
return augmented_image
def distort_image_with_autoaugment(image, augmentation_name):
"""Applies the AutoAugment policy to `image`.
AutoAugment is from the paper: https://arxiv.org/abs/1805.09501.
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
augmentation_name: The name of the AutoAugment policy to use. The available
options are `v0` and `test`. `v0` is the policy used for all of the
results in the paper and was found to achieve the best results on the COCO
dataset. `v1`, `v2` and `v3` are additional good policies found on the
COCO dataset that have slight variation in what operations were used
during the search procedure along with how many operations are applied in
parallel to a single image (2 vs 3).
Returns:
A tuple containing the augmented versions of `image`.
"""
available_policies = {'v0': policy_v0, 'test': policy_vtest}
if augmentation_name not in available_policies:
raise ValueError('Invalid augmentation_name: {}'.format(augmentation_name))
policy = available_policies[augmentation_name]()
# Hparams that will be used for AutoAugment.
augmentation_hparams = dict(cutout_const=100, translate_const=250)
return build_and_apply_nas_policy(policy, image, augmentation_hparams)
# Cutout is implemented separately.
_RAND_TRANSFORMS = [
'AutoContrast',
'Equalize',
'Invert',
'Rotate',
'Posterize',
'Solarize',
'Color',
'Contrast',
'Brightness',
'Sharpness',
'ShearX',
'ShearY',
'TranslateX',
'TranslateY',
'SolarizeAdd',
'Hue',
]
# Cutout is implemented separately.
_RAND_INCREASING_TRANSFORMS = [
'AutoContrast',
'Equalize',
'Invert',
'Rotate',
'PosterizeIncreasing',
'SolarizeIncreasing',
'SolarizeAdd',
'ColorIncreasing',
'ContrastIncreasing',
'BrightnessIncreasing',
'SharpnessIncreasing',
'ShearX',
'ShearY',
'TranslateX',
'TranslateY',
'Hue',
]
# These augmentations are not suitable for detection task.
_NON_COLOR_DISTORTION_OPS = [
'Rotate',
'ShearX',
'ShearY',
'TranslateX',
'TranslateY',
]
def distort_image_with_randaugment(image,
num_layers,
magnitude,
mag_std,
inc,
prob,
color_only=False):
"""Applies the RandAugment policy to `image`.
RandAugment is from the paper https://arxiv.org/abs/1909.13719,
Args:
image: `Tensor` of shape [height, width, 3] representing an image. The image
should have uint8 type in [0, 255].
num_layers: Integer, the number of augmentation transformations to apply
sequentially to an image. Represented as (N) in the paper. Usually best
values will be in the range [1, 3].
magnitude: Integer, shared magnitude across all augmentation operations.
Represented as (M) in the paper. Usually best values are in the range [5,
30].
mag_std: Randomness of magnitude. The magnitude will be sampled from a
normal distribution on the fly.
inc: Whether to select aug that increases as magnitude increases.
prob: Probability of any aug being applied.
color_only: Whether only apply operations that distort color and do not
change spatial layouts.
Returns:
The augmented version of `image`.
"""
replace_value = [128] * 3
augmentation_hparams = dict(cutout_const=40, translate_const=100)
available_ops = _RAND_INCREASING_TRANSFORMS if inc else _RAND_TRANSFORMS
if color_only:
available_ops = list(
filter(lambda op: op not in _NON_COLOR_DISTORTION_OPS, available_ops))
for layer_num in range(num_layers):
op_to_select = tf.random.uniform([],
maxval=len(available_ops),
dtype=tf.int32)
random_magnitude = tf.clip_by_value(
tf.random.normal([], magnitude, mag_std), 0., _MAX_LEVEL)
with tf.name_scope('randaug_layer_{}'.format(layer_num)):
for (i, op_name) in enumerate(available_ops):
func, _, args = _parse_policy_info(op_name, prob, random_magnitude,
replace_value, augmentation_hparams)
image = tf.cond(
tf.equal(i, op_to_select),
# pylint:disable=g-long-lambda
lambda s_func=func, s_args=args: _apply_func_with_prob(
s_func, image, s_args, prob),
# pylint:enable=g-long-lambda
lambda: image)
return image
| 26,254 | 33.820955 | 97 | py |
models | models-master/official/projects/unified_detector/modeling/universal_detector.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Universal detector implementation."""
from typing import Any, Dict, Optional, Sequence, Tuple, Union
import gin
import tensorflow as tf
from deeplab2 import config_pb2
from deeplab2.model.decoder import max_deeplab as max_deeplab_head
from deeplab2.model.encoder import axial_resnet_instances
from deeplab2.model.loss import matchers_ops
from official.legacy.transformer import transformer
from official.projects.unified_detector.utils import typing
from official.projects.unified_detector.utils import utilities
EPSILON = 1e-6
@gin.configurable
def universal_detection_loss_weights(
loss_segmentation_word: float = 1e0,
loss_inst_dist: float = 1e0,
loss_mask_id: float = 1e-4,
loss_pq: float = 3e0,
loss_para: float = 1e0) -> Dict[str, float]:
"""A function that returns a dict for the weights of loss terms."""
return {
"loss_segmentation_word": loss_segmentation_word,
"loss_inst_dist": loss_inst_dist,
"loss_mask_id": loss_mask_id,
"loss_pq": loss_pq,
"loss_para": loss_para,
}
@gin.configurable
class LayerNorm(tf.keras.layers.LayerNormalization):
"""A wrapper to allow passing the `training` argument.
The normalization layers in the MaX-DeepLab implementation are passed with
the `training` argument. This wrapper enables the usage of LayerNorm.
"""
def call(self,
inputs: tf.Tensor,
training: Optional[bool] = None) -> tf.Tensor:
del training
return super().call(inputs)
@gin.configurable
def get_max_deep_lab_backbone(num_slots: int = 128):
return axial_resnet_instances.get_model(
"max_deeplab_s",
bn_layer=LayerNorm,
block_group_config={
"drop_path_schedule": "linear",
"axial_use_recompute_grad": False
},
backbone_use_transformer_beyond_stride=16,
extra_decoder_use_transformer_beyond_stride=16,
num_mask_slots=num_slots,
max_num_mask_slots=num_slots)
@gin.configurable
class UniversalDetector(tf.keras.layers.Layer):
"""Univeral Detector."""
loss_items = ("loss_pq", "loss_inst_dist", "loss_para", "loss_mask_id",
"loss_segmentation_word")
def __init__(self,
backbone_fn: tf.keras.layers.Layer = get_max_deep_lab_backbone,
mask_threshold: float = 0.4,
class_threshold: float = 0.5,
filter_area: float = 32,
**kwargs: Any):
"""Constructor.
Args:
backbone_fn: The function to initialize a backbone.
mask_threshold: Masks are thresholded with this value.
class_threshold: Classification heads are thresholded with this value.
filter_area: In inference, detections with area smaller than this
threshold will be removed.
**kwargs: other keyword arguments passed to the base class.
"""
super().__init__(**kwargs)
# Model
self._backbone_fn = backbone_fn()
self._decoder = _get_decoder_head()
self._class_embed_head, self._para_embed_head = _get_embed_head()
self._para_head, self._para_proj = _get_para_head()
# Losses
# self._max_deeplab_loss = _get_max_deeplab_loss()
self._loss_weights = universal_detection_loss_weights()
# Post-processing
self._mask_threshold = mask_threshold
self._class_threshold = class_threshold
self._filter_area = filter_area
def _preprocess_labels(self, labels: typing.TensorDict):
# Preprocessing
# Converted the integer mask to one-hot embedded masks.
num_instances = utilities.resolve_shape(
labels["instance_labels"]["masks_sizes"])[1]
labels["instance_labels"]["masks"] = tf.one_hot(
labels["instance_labels"]["masks"],
depth=num_instances,
axis=1,
dtype=tf.float32) # (B, N, H, W)
def compute_losses(
self, labels: typing.NestedTensorDict, outputs: typing.NestedTensorDict
) -> Tuple[tf.Tensor, typing.NestedTensorDict]:
"""Computes the loss.
Args:
labels: A dictionary of ground-truth labels.
outputs: Output from self.call().
Returns:
A scalar total loss tensor and a dictionary for individual losses.
"""
loss_dict = {}
self._preprocess_labels(labels)
# Main loss: PQ loss.
_entity_mask_loss(loss_dict, labels["instance_labels"],
outputs["instance_output"])
# Auxiliary loss 1: semantic loss
_semantic_loss(loss_dict, labels["segmentation_output"],
outputs["segmentation_output"])
# Auxiliary loss 2: instance discrimination
_instance_discrimination_loss(loss_dict, labels["instance_labels"], outputs)
# Auxiliary loss 3: mask id
_mask_id_xent_loss(loss_dict, labels["instance_labels"], outputs)
# Auxiliary loss 4: paragraph grouping
_paragraph_grouping_loss(loss_dict, labels, outputs)
weighted_loss = [self._loss_weights[k] * v for k, v in loss_dict.items()]
total_loss = sum(weighted_loss)
return total_loss, loss_dict
def call(self,
features: typing.TensorDict,
training: bool = False) -> typing.NestedTensorDict:
"""Forward pass of the model.
Args:
features: The input features: {"images": tf.Tensor}. Shape = [B, H, W, C]
training: Whether it's training mode.
Returns:
A dictionary of output with this structure:
{
"max_deep_lab": {
All the max deeplab outputs are here, including both backbone and
decoder.
}
"segmentation_output": {
"word_score": tf.Tensor, [B, h, w],
}
"instance_output": {
"cls_logits": tf.Tensor, [B, N, C],
"mask_id_logits": tf.Tensor, [B, H, W, N],
"cls_prob": tf.Tensor, [B, N, C],
"mask_id_prob": tf.Tensor, [B, H, W, N],
}
"postprocessed": {
"classes": A (B, N) tensor for the class ids. Zero for non-firing
slots.
"binary_masks": A (B, H, W, N) tensor for the N binary masks. Masks
for void cls are set to zero.
"confidence": A (B, N) float tensor for the confidence of "classes".
"mask_area": A (B, N) float tensor for the area of each mask.
}
"transformer_group_feature": (B, N, C) float tensor (normalized),
"para_affinity": (B, N, N) float tensor.
}
Class-0 is for void. Class-(C-1) is for background. Class-1~(C-2) is for
valid classes.
"""
# backbone
backbone_output = self._backbone_fn(features["images"], training)
# split instance embedding and paragraph embedding;
# then perform paragraph grouping
para_fts = self._get_para_outputs(backbone_output, training)
affinity = tf.linalg.matmul(para_fts, para_fts, transpose_b=True)
# text detection head
decoder_output = self._decoder(backbone_output, training)
output_dict = {
"max_deep_lab": decoder_output,
"transformer_group_feature": para_fts,
"para_affinity": affinity,
}
input_shape = utilities.resolve_shape(features["images"])
self._get_semantic_outputs(output_dict, input_shape)
self._get_instance_outputs(output_dict, input_shape)
self._postprocess(output_dict)
return output_dict
def _get_para_outputs(self, outputs: typing.TensorDict,
training: bool) -> tf.Tensor:
"""Apply the paragraph head.
This function first splits the features for instance classification and
instance grouping. Then, the additional grouping branch (transformer layers)
is applied to further encode the grouping features. Finally, a tensor of
normalized grouping features is returned.
Args:
outputs: output dictionary from the backbone.
training: training / eval mode mark.
Returns:
The normalized paragraph embedding vector of shape (B, N, C).
"""
# Project the object embeddings into classification feature and grouping
# feature.
fts = outputs["transformer_class_feature"] # B,N,C
class_feature = self._class_embed_head(fts, training)
group_feature = self._para_embed_head(fts, training)
outputs["transformer_class_feature"] = class_feature
outputs["transformer_group_feature"] = group_feature
# Feed the grouping features into additional group encoding branch.
# First we need to build the attention_bias which is used the standard
# transformer encoder.
input_shape = utilities.resolve_shape(group_feature)
b = input_shape[0]
n = int(input_shape[1])
seq_len = tf.constant(n, shape=(b,))
padding_mask = utilities.get_padding_mask_from_valid_lengths(
seq_len, n, tf.float32)
attention_bias = utilities.get_transformer_attention_bias(padding_mask)
group_feature = self._para_proj(
self._para_head(group_feature, attention_bias, None, training))
return tf.math.l2_normalize(group_feature, axis=-1)
def _get_semantic_outputs(self, outputs: typing.NestedTensorDict,
input_shape: tf.TensorShape):
"""Add `segmentation_output` to outputs.
Args:
outputs: A dictionary of outputs.
input_shape: The shape of the input images.
"""
h, w = input_shape[1:3]
# B, H/4, W/4, C
semantic_logits = outputs["max_deep_lab"]["semantic_logits"]
textness, unused_logits = tf.split(semantic_logits, [2, -1], -1)
# Channel[0:2], textness. c0: non-textness, c1: textness.
word_score = tf.nn.softmax(textness, -1, "word_score")[:, :, :, 1:2]
word_score = tf.squeeze(tf.image.resize(word_score, (h, w)), -1)
# Channel[2:] not used yet
outputs["segmentation_output"] = {"word_score": word_score}
def _get_instance_outputs(self, outputs: typing.NestedTensorDict,
input_shape: tf.TensorShape):
"""Add `instance_output` to outputs.
Args:
outputs: A dictionary of outputs.
input_shape: The shape of the input images.
These following fields are added to outputs["instance_output"]:
"cls_logits": tf.Tensor, [B, N, C].
"mask_id_logits": tf.Tensor, [B, H, W, N].
"cls_prob": tf.Tensor, [B, N, C], softmax probability.
"mask_id_prob": tf.Tensor, [B, H, W, N], softmax probability. They are
used in training. Masks are all resized to full resolution.
"""
# Get instance_output
h, w = input_shape[1:3]
## Classes
class_logits = outputs["max_deep_lab"]["transformer_class_logits"]
# The MaX-DeepLab repo uses the last logit for void; but we use 0.
# Therefore we shift the logits here.
class_logits = tf.roll(class_logits, shift=1, axis=-1)
class_prob = tf.nn.softmax(class_logits)
## Masks
mask_id_logits = outputs["max_deep_lab"]["pixel_space_mask_logits"]
mask_id_prob = tf.nn.softmax(mask_id_logits)
mask_id_logits = tf.image.resize(mask_id_logits, (h, w))
mask_id_prob = tf.image.resize(mask_id_prob, (h, w))
outputs["instance_output"] = {
"cls_logits": class_logits,
"mask_id_logits": mask_id_logits,
"cls_prob": class_prob,
"mask_id_prob": mask_id_prob,
}
def _postprocess(self, outputs: typing.NestedTensorDict):
"""Post-process (filtering) the outputs.
Args:
outputs: A dictionary of outputs.
These following fields are added to outputs["postprocessed"]:
"classes": A (B,N) integer tensor for the class ids.
"binary_masks": A (B, H, W, N) tensor for the N binarized 0/1 masks. Masks
for void cls are set to zero.
"confidence": A (B, N) float tensor for the confidence of "classes".
"mask_area": A (B, N) float tensor for the area of each mask. They are
used in inference / visualization.
"""
# Get postprocessed outputs
outputs["postprocessed"] = {}
## Masks:
mask_id_prob = outputs["instance_output"]["mask_id_prob"]
mask_max_prob = tf.reduce_max(mask_id_prob, axis=-1, keepdims=True)
thresholded_binary_masks = tf.cast(
tf.math.logical_and(
tf.equal(mask_max_prob, mask_id_prob),
tf.greater_equal(mask_max_prob, self._mask_threshold)), tf.float32)
area = tf.reduce_sum(thresholded_binary_masks, axis=(1, 2)) # (B, N)
## Classification:
cls_prob = outputs["instance_output"]["cls_prob"]
cls_max_prob = tf.reduce_max(cls_prob, axis=-1) # B, N
cls_max_id = tf.cast(tf.argmax(cls_prob, axis=-1), tf.float32) # B, N
## filtering
c = utilities.resolve_shape(cls_prob)[2]
non_void = tf.reduce_all(
tf.stack(
[
tf.greater_equal(area, self._filter_area), # mask large enough.
tf.not_equal(cls_max_id, 0), # class-0 is for non-object.
tf.not_equal(cls_max_id,
c - 1), # class-(c-1) is for background (last).
tf.greater_equal(cls_max_prob,
self._class_threshold) # prob >= thr
],
axis=-1),
axis=-1)
non_void = tf.cast(non_void, tf.float32)
# Storing
outputs["postprocessed"]["classes"] = tf.cast(cls_max_id * non_void,
tf.int32)
b, n = utilities.resolve_shape(non_void)
outputs["postprocessed"]["binary_masks"] = (
thresholded_binary_masks * tf.reshape(non_void, (b, 1, 1, n)))
outputs["postprocessed"]["confidence"] = cls_max_prob
outputs["postprocessed"]["mask_area"] = area
def _coloring(self, masks: tf.Tensor) -> tf.Tensor:
"""Coloring segmentation masks.
Used in visualization.
Args:
masks: A float binary tensor of shape (B, H, W, N), representing `B`
samples, with `N` masks of size `H*W` each. Each of the `N` masks will
be assigned a random color.
Returns:
A (b, h, w, 3) float tensor in [0., 1.] for the coloring result.
"""
b, h, w, n = utilities.resolve_shape(masks)
palette = tf.random.uniform((1, n, 3), 0.5, 1.)
colored = tf.reshape(
tf.matmul(tf.reshape(masks, (b, -1, n)), palette), (b, h, w, 3))
return colored
def visualize(self,
outputs: typing.NestedTensorDict,
labels: Optional[typing.TensorDict] = None):
"""Visualizes the outputs and labels.
Args:
outputs: A dictionary of outputs.
labels: A dictionary of labels.
The following dict is added to outputs["visualization"]: {
"instance": {
"pred": A (B, H, W, 3) tensor for the visualized map in [0,1].
"gt": A (B, H, W, 3) tensor for the visualized map in [0,1], if labels
is present.
"concat": Concatenation of "prediction" and "gt" along width axis, if
labels is present. }
"seg-text": {... Similar to above, but the shape is (B, H, W, 1).} } All
of these tensors have a rank of 4 (B, H, W, C).
"""
outputs["visualization"] = {}
# 1. prediction
# 1.1 instance mask
binary_masks = outputs["postprocessed"]["binary_masks"]
outputs["visualization"]["instance"] = {
"pred": self._coloring(binary_masks),
}
# 1.2 text-seg
outputs["visualization"]["seg-text"] = {
"pred":
tf.expand_dims(outputs["segmentation_output"]["word_score"], -1),
}
# 2. labels
if labels is not None:
# 2.1 instance mask
# (B, N, H, W) -> (B, H, W, N); the first one is bkg so removed.
gt_masks = tf.transpose(labels["instance_labels"]["masks"][:, 1:],
(0, 2, 3, 1))
outputs["visualization"]["instance"]["gt"] = self._coloring(gt_masks)
# 2.2 text-seg
outputs["visualization"]["seg-text"]["gt"] = tf.expand_dims(
labels["segmentation_output"]["gt_word_score"], -1)
# 3. concat
for v in outputs["visualization"].values():
# Resize to make the size align. The prediction always has stride=1
# resolution, so we make gt align with pred instead of vice versa.
v["concat"] = tf.concat(
[v["pred"],
tf.image.resize(v["gt"],
tf.shape(v["pred"])[1:3])],
axis=2)
@tf.function
def serve(self, image_tensor: tf.Tensor) -> typing.NestedTensorDict:
"""Method to be exported for SavedModel.
Args:
image_tensor: A float32 normalized tensor representing an image of shape
[1, height, width, channels].
Returns:
Dict of output:
classes: (B, N) int32 tensor == o["postprocessed"]["classes"]
masks: (B, H, W, N) float32 tensor == o["postprocessed"]["binary_masks"]
groups: (B, N, N) float32 tensor == o["para_affinity"]
confidence: A (B, N) float tensor == o["postprocessed"]["confidence"]
mask_area: A (B, N) float tensor == o["postprocessed"]["mask_area"]
"""
features = {"images": image_tensor}
nn_outputs = self(features, False)
outputs = {
"classes": nn_outputs["postprocessed"]["classes"],
"masks": nn_outputs["postprocessed"]["binary_masks"],
"confidence": nn_outputs["postprocessed"]["confidence"],
"mask_area": nn_outputs["postprocessed"]["mask_area"],
"groups": nn_outputs["para_affinity"],
}
return outputs
@gin.configurable()
def _get_decoder_head(
atrous_rates: Sequence[int] = (6, 12, 18),
pixel_space_dim: int = 128,
pixel_space_intermediate: int = 256,
low_level: Sequence[Dict[str, Union[str, int]]] = ({
"feature_key": "res3",
"channels_project": 64,
}, {
"feature_key": "res2",
"channels_project": 32,
}),
num_classes=3,
aux_sem_intermediate=256,
norm_fn=tf.keras.layers.BatchNormalization,
) -> max_deeplab_head.MaXDeepLab:
"""Get the MaX-DeepLab prediction head.
Args:
atrous_rates: Dilation rate for astrou conv in the semantic head.
pixel_space_dim: The dimension for the final panoptic features.
pixel_space_intermediate: The dimension for the layer before
`pixel_space_dim` (i.e. the separable 5x5 layer).
low_level: A list of dicts for the feature pyramid in forming the semantic
output. Each dict represents one skip-path from the backbone.
num_classes: Number of classes (entities + bkg) including void. For example,
if we only want to detect word, then `num_classes` = 3 (1 for word, 1 for
bkg, and 1 for void).
aux_sem_intermediate: Similar to `pixel_space_intermediate`, but for the
auxiliary semantic output head.
norm_fn: The normalization function used in the head.
Returns:
A MaX-DeepLab decoder head (as a keras layer).
"""
# Initialize the configs.
configs = config_pb2.ModelOptions()
configs.decoder.feature_key = "feature_semantic"
configs.decoder.atrous_rates.extend(atrous_rates)
configs.max_deeplab.pixel_space_head.output_channels = pixel_space_dim
configs.max_deeplab.pixel_space_head.head_channels = pixel_space_intermediate
for low_level_config in low_level:
low_level_ = configs.max_deeplab.auxiliary_low_level.add()
low_level_.feature_key = low_level_config["feature_key"]
low_level_.channels_project = low_level_config["channels_project"]
configs.max_deeplab.auxiliary_semantic_head.output_channels = num_classes
configs.max_deeplab.auxiliary_semantic_head.head_channels = aux_sem_intermediate
return max_deeplab_head.MaXDeepLab(configs.decoder,
configs.max_deeplab, 0, norm_fn)
class PseudoLayer(tf.keras.layers.Layer):
"""Pseudo layer for ablation study.
The `call()` function has the same argument signature as a transformer
encoder stack. `unused_ph1` and `unused_ph2` are place holders for this
purpose. When studying the effectiveness of using transformer as the
grouping branch, we can use this PseudoLayer to replace the transformer to
use as a no-transformer baseline.
To use a single projection layer instead of transformer, simply set `extra_fc`
to True.
"""
def __init__(self, extra_fc: bool):
super().__init__(name="extra_fc")
self._extra_fc = extra_fc
if extra_fc:
self._layer = tf.keras.Sequential([
tf.keras.layers.Dense(256, activation="relu"),
tf.keras.layers.LayerNormalization(),
])
def call(self,
fts: tf.Tensor,
unused_ph1: Optional[tf.Tensor],
unused_ph2: Optional[tf.Tensor],
training: Optional[bool] = None) -> tf.Tensor:
"""See base class."""
if self._extra_fc:
return self._layer(fts, training)
return fts
@gin.configurable()
def _get_embed_head(
dimension=256,
norm_fn=tf.keras.layers.BatchNormalization
) -> Tuple[tf.keras.Sequential, tf.keras.Sequential]:
"""Projection layers to get instance & grouping features."""
instance_head = tf.keras.Sequential([
tf.keras.layers.Dense(dimension, use_bias=False),
norm_fn(),
tf.keras.layers.ReLU(),
])
grouping_head = tf.keras.Sequential([
tf.keras.layers.Dense(dimension, use_bias=False),
norm_fn(),
tf.keras.layers.ReLU(),
])
return instance_head, grouping_head
@gin.configurable()
def _get_para_head(
dimension=128,
num_layer=3,
extra_fc=False) -> Tuple[tf.keras.layers.Layer, tf.keras.layers.Layer]:
"""Get the additional para head.
Args:
dimension: the dimension of the final output.
num_layer: the number of transformer layer.
extra_fc: Whether an extra single fully-connected layer is used, when
num_layer=0.
Returns:
an encoder and a projection layer for the grouping features.
"""
if num_layer > 0:
encoder = transformer.EncoderStack(
params={
"hidden_size": 256,
"num_hidden_layers": num_layer,
"num_heads": 4,
"filter_size": 512,
"initializer_gain": 1.0,
"attention_dropout": 0.1,
"relu_dropout": 0.1,
"layer_postprocess_dropout": 0.1,
"allow_ffn_pad": True,
})
else:
encoder = PseudoLayer(extra_fc)
dense = tf.keras.layers.Dense(dimension)
return encoder, dense
def _dice_sim(pred: tf.Tensor, ground_truth: tf.Tensor) -> tf.Tensor:
"""Dice Coefficient for mask similarity.
Args:
pred: The predicted mask. [B, N, H, W], in [0, 1].
ground_truth: The ground-truth mask. [B, N, H, W], in [0, 1] or {0, 1}.
Returns:
A matrix for the losses: m[b, i, j] is the dice similarity between pred `i`
and gt `j` in batch `b`.
"""
b, n = utilities.resolve_shape(pred)[:2]
ground_truth = tf.reshape(
tf.transpose(ground_truth, (0, 2, 3, 1)), (b, -1, n)) # B, HW, N
pred = tf.reshape(pred, (b, n, -1)) # B, N, HW
numerator = tf.matmul(pred, ground_truth) * 2.
# TODO(longshangbang): The official implementation does not square the scores.
# Need to do experiment to determine which one is better.
denominator = (
tf.math.reduce_sum(tf.math.square(ground_truth), 1, keepdims=True) +
tf.math.reduce_sum(tf.math.square(pred), 2, keepdims=True))
return (numerator + EPSILON) / (denominator + EPSILON)
def _semantic_loss(
loss_dict: Dict[str, tf.Tensor],
labels: tf.Tensor,
outputs: tf.Tensor,
):
"""Auxiliary semantic loss.
Currently, these losses are added:
(1) text/non-text heatmap
Args:
loss_dict: A dictionary for the loss. The values are loss scalars.
labels: The label dictionary containing:
`gt_word_score`: (B, H, W) tensor for the text/non-text map.
outputs: The output dictionary containing:
`word_score`: (B, H, W) prediction tensor for `gt_word_score`
"""
pred = tf.expand_dims(outputs["word_score"], 1)
gt = tf.expand_dims(labels["gt_word_score"], 1)
loss_dict["loss_segmentation_word"] = 1. - tf.reduce_mean(_dice_sim(pred, gt))
@gin.configurable
def _entity_mask_loss(loss_dict: Dict[str, tf.Tensor],
labels: tf.Tensor,
outputs: tf.Tensor,
alpha: float = gin.REQUIRED):
"""PQ loss for entity-mask training.
This method adds the PQ loss term to loss_dict directly. The match result will
also be stored in outputs (As a [B, N_pred, N_gt] float tensor).
Args:
loss_dict: A dictionary for the loss. The values are loss scalars.
labels: A dict containing: `num_instance` - (B,) `masks` - (B, N, H, W)
`classes` - (B, N)
outputs: A dict containing:
`cls_prob`: (B, N, C)
`mask_id_prob`: (B, H, W, N)
`cls_logits`: (B, N, C)
`mask_id_logits`: (B, H, W, N)
alpha: Weight for pos/neg balance.
"""
# Classification score: (B, N, N)
# in batch b, the probability of prediction i being class of gt j, i.e.:
# score[b, i, j] = pred_cls[b, i, gt_cls[b, j]]
gt_cls = labels["classes"] # (B, N)
pred_cls = outputs["cls_prob"] # (B, N, C)
b, n = utilities.resolve_shape(pred_cls)[:2]
# indices[b, i, j] = gt_cls[b, j]
indices = tf.tile(tf.expand_dims(gt_cls, 1), (1, n, 1))
cls_score = tf.gather(pred_cls, tf.cast(indices, tf.int32), batch_dims=2)
# Mask score (dice): (B, N, N)
# mask_score[b, i, j]: dice-similarity for pred i and gt j in batch b.
mask_score = _dice_sim(
tf.transpose(outputs["mask_id_prob"], (0, 3, 1, 2)), labels["masks"])
# Get similarity matrix and matching.
# padded mask[b, j, i] = -1 << other scores, if i >= num_instance[b]
similarity = cls_score * mask_score
padded_mask = tf.cast(tf.reshape(tf.range(n), (1, 1, n)), tf.float32)
padded_mask = tf.cast(
tf.math.greater_equal(padded_mask,
tf.reshape(labels["num_instance"], (b, 1, 1))),
tf.float32)
# The constant value for padding has no effect.
masked_similarity = similarity * (1. - padded_mask) + padded_mask * (-1.)
matched_mask = matchers_ops.hungarian_matching(-masked_similarity)
matched_mask = tf.cast(matched_mask, tf.float32) * (1 - padded_mask)
outputs["matched_mask"] = matched_mask
# Pos loss
loss_pos = (
tf.stop_gradient(cls_score) * (-mask_score) +
tf.stop_gradient(mask_score) * (-tf.math.log(cls_score)))
loss_pos = tf.reduce_sum(loss_pos * matched_mask, axis=[1, 2]) # (B,)
# Neg loss
matched_pred = tf.cast(tf.reduce_sum(matched_mask, axis=2) > 0,
tf.float32) # (B, N)
# 0 for void class
log_loss = -tf.nn.log_softmax(outputs["cls_logits"])[:, :, 0] # (B, N)
loss_neg = tf.reduce_sum(log_loss * (1. - matched_pred), axis=-1) # (B,)
loss_pq = (alpha * loss_pos + (1 - alpha) * loss_neg) / n
loss_pq = tf.reduce_mean(loss_pq)
loss_dict["loss_pq"] = loss_pq
@gin.configurable
def _instance_discrimination_loss(loss_dict: Dict[str, Any],
labels: Dict[str, Any],
outputs: Dict[str, Any],
tau: float = gin.REQUIRED):
"""Instance discrimination loss.
This method adds the ID loss term to loss_dict directly.
Args:
loss_dict: A dictionary for the loss. The values are loss scalars.
labels: The label dictionary.
outputs: The output dictionary.
tau: The temperature term in the loss
"""
# The normalized feature, shape=(B, H/4, W/4, D)
g = outputs["max_deep_lab"]["pixel_space_normalized_feature"]
b, h, w = utilities.resolve_shape(g)[:3]
# The ground-truth masks, shape=(B, N, H, W) --> (B, N, H/4, W/4)
m = labels["masks"]
m = tf.image.resize(
tf.transpose(m, (0, 2, 3, 1)), (h, w),
tf.image.ResizeMethod.NEAREST_NEIGHBOR)
m = tf.transpose(m, (0, 3, 1, 2))
# The number of ground-truth instance (K), shape=(B,)
num = labels["num_instance"]
n = utilities.resolve_shape(m)[1] # max number of predictions
# is_void[b, i] = 1 if instance i in batch b is a padded slot.
is_void = tf.cast(tf.expand_dims(tf.range(n), 0), tf.float32) # (1, n)
is_void = tf.cast(
tf.math.greater_equal(is_void, tf.expand_dims(num, 1)), tf.float32)
# (B, N, D)
t = tf.math.l2_normalize(tf.einsum("bhwd,bnhw->bnd", g, m), axis=-1)
inst_dist_logits = tf.einsum("bhwd,bid->bhwi", g, t) / tau # (B, H, W, N)
inst_dist_logits = inst_dist_logits - 100. * tf.reshape(is_void, (b, 1, 1, n))
mask_id = tf.cast(
tf.einsum("bnhw,n->bhw", m, tf.range(n, dtype=tf.float32)), tf.int32)
loss_map = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=mask_id, logits=inst_dist_logits) # B, H, W
valid_mask = tf.reduce_sum(m, axis=1)
loss_inst_dist = (
(tf.reduce_sum(loss_map * valid_mask, axis=[1, 2]) + EPSILON) /
(tf.reduce_sum(valid_mask, axis=[1, 2]) + EPSILON))
loss_dict["loss_inst_dist"] = tf.reduce_mean(loss_inst_dist)
@gin.configurable
def _paragraph_grouping_loss(
loss_dict: Dict[str, Any],
labels: Dict[str, Any],
outputs: Dict[str, Any],
tau: float = gin.REQUIRED,
loss_mode="vanilla",
fl_alpha: float = 0.25,
fl_gamma: float = 2.,
):
"""Instance discrimination loss.
This method adds the para discrimination loss term to loss_dict directly.
Args:
loss_dict: A dictionary for the loss. The values are loss scalars.
labels: The label dictionary.
outputs: The output dictionary.
tau: The temperature term in the loss
loss_mode: The type of loss.
fl_alpha: alpha value in focal loss
fl_gamma: gamma value in focal loss
"""
if "paragraph_labels" not in labels:
loss_dict["loss_para"] = 0.
return
# step 1:
# obtain the paragraph labels for each prediction
# (batch, pred, gt)
matched_matrix = outputs["instance_output"]["matched_mask"] # B, N, N
para_label_gt = labels["paragraph_labels"]["paragraph_ids"] # B, N
has_para_label_gt = (
labels["paragraph_labels"]["has_para_ids"][:, tf.newaxis, tf.newaxis])
# '0' means no paragraph labels
pred_label_gt = tf.einsum("bij,bj->bi", matched_matrix,
tf.cast(para_label_gt + 1, tf.float32))
pred_label_gt_pad_col = tf.expand_dims(pred_label_gt, -1) # b,n,1
pred_label_gt_pad_row = tf.expand_dims(pred_label_gt, 1) # b,1,n
gt_affinity = tf.cast(
tf.equal(pred_label_gt_pad_col, pred_label_gt_pad_row), tf.float32)
gt_affinity_mask = (
has_para_label_gt * pred_label_gt_pad_col * pred_label_gt_pad_row)
gt_affinity_mask = tf.cast(tf.not_equal(gt_affinity_mask, 0.), tf.float32)
# step 2:
# get affinity matrix
affinity = outputs["para_affinity"]
# step 3:
# compute loss
loss_fn = tf.keras.losses.BinaryCrossentropy(
from_logits=True,
label_smoothing=0,
axis=-1,
reduction=tf.keras.losses.Reduction.NONE,
name="para_dist")
affinity = tf.reshape(affinity, (-1, 1)) # (b*n*n, 1)
gt_affinity = tf.reshape(gt_affinity, (-1, 1)) # (b*n*n, 1)
gt_affinity_mask = tf.reshape(gt_affinity_mask, (-1,)) # (b*n*n,)
pointwise_loss = loss_fn(gt_affinity, affinity / tau) # (b*n*n,)
if loss_mode == "vanilla":
loss = (
tf.reduce_sum(pointwise_loss * gt_affinity_mask) /
(tf.reduce_sum(gt_affinity_mask) + EPSILON))
elif loss_mode == "balanced":
# pos
pos_mask = gt_affinity_mask * gt_affinity[:, 0]
pos_loss = (
tf.reduce_sum(pointwise_loss * pos_mask) /
(tf.reduce_sum(pos_mask) + EPSILON))
# neg
neg_mask = gt_affinity_mask * (1. - gt_affinity[:, 0])
neg_loss = (
tf.reduce_sum(pointwise_loss * neg_mask) /
(tf.reduce_sum(neg_mask) + EPSILON))
loss = 0.25 * pos_loss + 0.75 * neg_loss
elif loss_mode == "focal":
alpha_wt = fl_alpha * gt_affinity + (1. - fl_alpha) * (1. - gt_affinity)
prob_pos = tf.math.sigmoid(affinity / tau)
pt = prob_pos * gt_affinity + (1. - prob_pos) * (1. - gt_affinity)
fl_loss_pw = tf.stop_gradient(
alpha_wt * tf.pow(1. - pt, fl_gamma))[:, 0] * pointwise_loss
loss = (
tf.reduce_sum(fl_loss_pw * gt_affinity_mask) /
(tf.reduce_sum(gt_affinity_mask) + EPSILON))
else:
raise ValueError(f"Not supported loss mode: {loss_mode}")
loss_dict["loss_para"] = loss
def _mask_id_xent_loss(loss_dict: Dict[str, Any], labels: Dict[str, Any],
outputs: Dict[str, Any]):
"""Mask ID loss.
This method adds the mask ID loss term to loss_dict directly.
Args:
loss_dict: A dictionary for the loss. The values are loss scalars.
labels: The label dictionary.
outputs: The output dictionary.
"""
# (B, N, H, W)
mask_gt = labels["masks"]
# B, H, W, N
mask_id_logits = outputs["instance_output"]["mask_id_logits"]
# B, N, N
matched_matrix = outputs["instance_output"]["matched_mask"]
# B, N
gt_to_pred_id = tf.cast(tf.math.argmax(matched_matrix, axis=1), tf.float32)
# B, H, W
mask_id_labels = tf.cast(
tf.einsum("bnhw,bn->bhw", mask_gt, gt_to_pred_id), tf.int32)
loss_map = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=mask_id_labels, logits=mask_id_logits)
valid_mask = tf.reduce_sum(mask_gt, axis=1)
loss_mask_id = (
(tf.reduce_sum(loss_map * valid_mask, axis=[1, 2]) + EPSILON) /
(tf.reduce_sum(valid_mask, axis=[1, 2]) + EPSILON))
loss_dict["loss_mask_id"] = tf.reduce_mean(loss_mask_id)
| 33,717 | 36.928009 | 82 | py |
models | models-master/official/projects/unified_detector/tasks/ocr_task.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Task definition for ocr."""
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
import gin
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.projects.unified_detector.configs import ocr_config
from official.projects.unified_detector.data_loaders import input_reader
from official.projects.unified_detector.tasks import all_models # pylint: disable=unused-import
from official.projects.unified_detector.utils import typing
NestedTensorDict = typing.NestedTensorDict
ModelType = Union[tf.keras.layers.Layer, tf.keras.Model]
@task_factory.register_task_cls(ocr_config.OcrTaskConfig)
@gin.configurable
class OcrTask(base_task.Task):
"""Defining the OCR training task."""
_loss_items = []
def __init__(self,
params: cfg.TaskConfig,
logging_dir: Optional[str] = None,
name: Optional[str] = None,
model_fn: Callable[..., ModelType] = gin.REQUIRED):
super().__init__(params, logging_dir, name)
self._modef_fn = model_fn
def build_model(self) -> ModelType:
"""Build and return the model, record the loss items as well."""
model = self._modef_fn()
self._loss_items.extend(model.loss_items)
return model
def build_inputs(
self,
params: cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None
) -> tf.data.Dataset:
"""Build the tf.data.Dataset instance."""
return input_reader.InputFn(is_training=params.is_training)({},
input_context)
def build_metrics(self,
training: bool = True) -> Sequence[tf.keras.metrics.Metric]:
"""Build the metrics (currently, only for loss summaries in TensorBoard)."""
del training
metrics = []
# Add loss items
for name in self._loss_items:
metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))
# TODO(longshangbang): add evaluation metrics
return metrics
def train_step(
self,
inputs: Tuple[NestedTensorDict, NestedTensorDict],
model: ModelType,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[Sequence[tf.keras.metrics.Metric]] = None
) -> Dict[str, tf.Tensor]:
features, labels = inputs
input_dict = {"features": features}
if self.task_config.model_call_needs_labels:
input_dict["labels"] = labels
is_mixed_precision = isinstance(optimizer,
tf.keras.mixed_precision.LossScaleOptimizer)
with tf.GradientTape() as tape:
outputs = model(**input_dict, training=True)
loss, loss_dict = model.compute_losses(labels=labels, outputs=outputs)
loss = loss / tf.distribute.get_strategy().num_replicas_in_sync
if is_mixed_precision:
loss = optimizer.get_scaled_loss(loss)
tvars = model.trainable_variables
grads = tape.gradient(loss, tvars)
if is_mixed_precision:
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {"loss": loss}
if metrics:
for m in metrics:
m.update_state(loss_dict[m.name])
return logs
| 3,888 | 34.678899 | 96 | py |
models | models-master/official/projects/mae/modeling/vit.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for ViT."""
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.mae.modeling import utils
from official.vision.modeling.backbones import vit
def to_patch(images, patch_height, patch_width):
"""Image (NHWC) to patches (N(H' W')(patch_height patch_width c))."""
batch_size, h, w, c = tf_utils.get_shape_list(images)
num_h = h // patch_height
num_w = w // patch_width
x = tf.reshape(images,
(batch_size, num_h, patch_height, num_w, patch_width, c))
x = tf.einsum('nhpwqc->nhwpqc', x)
x = tf.reshape(x, (batch_size, num_h, num_w, patch_height * patch_width * c))
return x
class ViTClassifier(tf.keras.Model):
"""ViT classifier for finetune."""
def __init__(self, encoder, num_classes, **kwargs):
super().__init__(**kwargs)
self.encoder = encoder
self.linear = tf.keras.layers.Dense(
num_classes,
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=2e-5))
def call(self, inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
encoded = self.encoder({'images': inputs})
return self.linear(encoded[:, 0])
class ViTLinearClassifier(tf.keras.Model):
"""ViT classifier for linear probing."""
def __init__(self, encoder, num_classes, use_sync_bn=True, **kwargs):
super().__init__(**kwargs)
self.encoder = encoder
self.linear = tf.keras.layers.Dense(
num_classes,
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01))
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
self.batch_norm = self._norm(
axis=-1, epsilon=1e-6, center=False, scale=False, momentum=0.9)
def call(self, inputs, training=False): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
encoded = self.encoder({'images': inputs})
features = self.batch_norm(encoded[:, 0], training=training)
return self.linear(features)
class VisionTransformer(tf.keras.Model):
"""ViT backbone."""
def __init__(self,
patch_h,
patch_w,
init_stochastic_depth_rate=0.0,
**kwargs):
super().__init__(**kwargs)
self.patch_h = patch_h
self.patch_w = patch_w
self.init_stochastic_depth_rate = init_stochastic_depth_rate
def build(self, input_shape):
self.patch_to_embed = tf.keras.layers.Dense(1024)
# ViT-L
self.encoder = vit.Encoder(
num_layers=24,
mlp_dim=4096,
num_heads=16,
dropout_rate=0.0,
attention_dropout_rate=0.0,
init_stochastic_depth_rate=self.init_stochastic_depth_rate,
add_pos_embed=False,
)
self.token_cls = vit.TokenLayer()
super().build(input_shape)
def to_embed(self, patches):
return self.patch_to_embed(patches)
def insert_cls(self, patch_embeds):
return self.token_cls(patch_embeds)
def add_position_embed(self, patch_embeds):
return patch_embeds + utils.position_embedding_sine(
tf.ones_like(patch_embeds[..., 0]), 1024, normalize=False)
def call(self, inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
if isinstance(inputs, dict):
images = inputs.get('images', None)
patch_embeds = inputs.get('embeddings', None)
else:
raise ValueError('Unexpected inputs type to %s.' % self.__class__)
if images is not None:
patches = to_patch(images, self.patch_h, self.patch_w)
patch_embeds = self.to_embed(patches)
patch_shape = tf.shape(patch_embeds)
patch_embeds = self.add_position_embed(patch_embeds)
patch_embeds = tf.reshape(patch_embeds,
(patch_shape[0], -1, patch_shape[-1]))
patch_embeds = self.insert_cls(patch_embeds)
return self.encoder(patch_embeds)
| 4,523 | 34.904762 | 116 | py |
models | models-master/official/projects/mae/modeling/masked_ae.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for MAE."""
import tensorflow as tf
from official.projects.mae.modeling import utils
from official.vision.modeling.backbones import vit
class MaskedAE(tf.keras.Model):
"""MAE model."""
def __init__(self,
encoder,
name=None,
**kwargs):
super(MaskedAE, self).__init__(name=name, **kwargs)
self.encoder = encoder
self.pixels_per_patch = self.encoder.patch_h * self.encoder.patch_w * 3
def build(self, input_shape):
self.decoder = vit.Encoder(
num_layers=8,
mlp_dim=2048,
num_heads=16,
dropout_rate=0.0,
attention_dropout_rate=0.0,
add_pos_embed=False
)
self.mask = self.add_weight(
'mask', (1, 1, 512),
initializer=tf.keras.initializers.RandomNormal(stddev=0.02))
self.to_pixels = tf.keras.layers.Dense(self.pixels_per_patch)
self.linear = tf.keras.layers.Dense(512)
super().build(input_shape)
def add_position_embed(self, patch_embeds, num_rows, num_cols):
# patch_embeds is 1d (N, 1+H*W, D) with cls token.
shape = tf.shape(patch_embeds)
position_embedding = utils.position_embedding_sine(
tf.ones((shape[0], num_rows, num_cols), dtype=patch_embeds.dtype),
512, normalize=False)
position_embedding = tf.reshape(
position_embedding, (shape[0], num_rows * num_cols, -1))
return patch_embeds + tf.concat(
[tf.zeros((shape[0], 1, shape[2]), dtype=patch_embeds.dtype),
position_embedding
], axis=1)
def call(self, inputs, training=None, masking=None):
patches = inputs['patches']
masked_indices = tf.cast(inputs['masked_indices'], tf.int32)
unmasked_indices = tf.cast(inputs['unmasked_indices'], tf.int32)
batch_size = tf.shape(patches)[0]
num_h_patches = tf.shape(patches)[1]
num_w_patches = tf.shape(patches)[2]
num_patches = num_h_patches * num_w_patches
num_masks = tf.shape(masked_indices)[1]
patch_embeds = self.encoder.to_embed(patches)
patch_embeds = self.encoder.add_position_embed(patch_embeds)
patch_embeds = tf.reshape(
patch_embeds,
(batch_size, num_patches, -1))
patch_embeds = self.encoder.insert_cls(patch_embeds)
unmasked_indices = tf.concat(
[tf.zeros((batch_size, 1), unmasked_indices.dtype),
unmasked_indices + 1],
axis=1)
masked_indices = masked_indices + 1
unmasked_patch_embeds = tf.gather(
patch_embeds, unmasked_indices, batch_dims=1)
encoded = self.encoder({'embeddings': unmasked_patch_embeds})
encoded = self.linear(encoded)
zeros = tf.zeros((batch_size, num_patches + 1, 512))
unmasked_embed = tf.tensor_scatter_nd_add(
zeros,
tf.stack([
tf.tile(
tf.expand_dims(tf.range(batch_size), axis=1),
[1, num_patches + 1 - num_masks]), unmasked_indices
],
axis=-1),
encoded)
mask_embeds = tf.tile(self.mask, [batch_size, num_masks, 1])
full_embed = tf.tensor_scatter_nd_add(
unmasked_embed,
tf.stack([
tf.tile(
tf.expand_dims(tf.range(batch_size), axis=1),
[1, num_masks]), masked_indices
],
axis=-1),
mask_embeds)
full_embed = self.add_position_embed(
full_embed, num_h_patches, num_w_patches)
decoded = self.decoder(full_embed)
pred_pixel_values = self.to_pixels(
tf.gather(decoded, masked_indices, batch_dims=1))
return pred_pixel_values
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(encoder=self.encoder)
return items
| 4,338 | 34.276423 | 75 | py |
models | models-master/official/projects/mae/tasks/linear_probe.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image classification task with ViT and linear probe."""
import dataclasses
from typing import Optional
import tensorflow as tf
from official.core import base_task
from official.core import input_reader
from official.core import task_factory
from official.projects.mae.modeling import vit
from official.projects.mae.tasks import image_classification
from official.vision.dataloaders import classification_input
from official.vision.dataloaders import tfds_factory
@dataclasses.dataclass
class ViTLinearProbeConfig(image_classification.ViTConfig):
"""The LinearProbe task config."""
@task_factory.register_task_cls(ViTLinearProbeConfig)
class ViTLinearProbeTask(base_task.Task):
"""Image classificaiton with ViT and load checkpoint if exists."""
def build_model(self) -> tf.keras.Model:
encoder = vit.VisionTransformer(
self.task_config.patch_h,
self.task_config.patch_w,
self.task_config.init_stochastic_depth_rate,
)
# Freeze backbone.
encoder.trainable = False
model = vit.ViTLinearClassifier(encoder, self.task_config.num_classes)
model(tf.ones((1, 224, 224, 3)))
return model
def build_inputs(
self, params, input_context: Optional[tf.distribute.InputContext] = None
):
num_classes = self.task_config.num_classes
input_size = self.task_config.input_size
image_field_key = self.task_config.train_data.image_field_key
label_field_key = self.task_config.train_data.label_field_key
decoder = tfds_factory.get_classification_decoder(params.tfds_name)
parser = classification_input.Parser(
output_size=input_size[:2],
num_classes=num_classes,
image_field_key=image_field_key,
label_field_key=label_field_key,
decode_jpeg_only=params.decode_jpeg_only,
aug_rand_hflip=params.aug_rand_hflip,
aug_type=params.aug_type,
color_jitter=params.color_jitter,
random_erasing=params.random_erasing,
dtype=params.dtype,
)
postprocess_fn = lambda images, labels: ( # pylint:disable=g-long-lambda
images,
tf.one_hot(labels, num_classes),
)
reader = input_reader.InputReader(
params=params,
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training),
postprocess_fn=postprocess_fn,
)
dataset = reader.read(input_context=input_context)
return dataset
def initialize(self, model: tf.keras.Model):
"""Load encoder if checkpoint exists.
Args:
model: The keras.Model built or used by this task.
"""
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
if not ckpt_dir_or_file:
return
checkpoint_items = dict(encoder=model.encoder)
ckpt = tf.train.Checkpoint(**checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
def build_metrics(self, training=None):
del training
metrics = [
tf.keras.metrics.CategoricalAccuracy(name='accuracy'),
]
return metrics
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
return tf.keras.losses.categorical_crossentropy(
labels, model_outputs, from_logits=True
)
| 3,952 | 33.373913 | 78 | py |
models | models-master/official/projects/mae/tasks/masked_ae.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Task for masked autoencoder pretraining."""
from typing import Optional
import tensorflow as tf
from official.core import base_task
from official.core import input_reader
from official.core import task_factory
from official.modeling import tf_utils
from official.projects.mae.configs import mae as mae_cfg
from official.projects.mae.modeling import masked_ae
from official.projects.mae.modeling import vit
from official.vision.dataloaders import classification_input
from official.vision.dataloaders import tfds_factory
@task_factory.register_task_cls(mae_cfg.MAEConfig)
class MaskedAETask(base_task.Task):
"""Task for masked autoencoder training."""
def build_model(self) -> tf.keras.Model:
encoder = vit.VisionTransformer(
self.task_config.patch_h,
self.task_config.patch_w,
0.0)
# trigger build to be called.
input_size = self.task_config.input_size
encoder({'images': tf.ones((1, input_size[0], input_size[1], 3))})
model = masked_ae.MaskedAE(encoder)
return model
def build_inputs(self,
params,
input_context: Optional[tf.distribute.InputContext] = None):
num_classes = self.task_config.num_classes
input_size = self.task_config.input_size
image_field_key = self.task_config.train_data.image_field_key
label_field_key = self.task_config.train_data.label_field_key
decoder = tfds_factory.get_classification_decoder(params.tfds_name)
parser = classification_input.Parser(
output_size=input_size[:2],
num_classes=num_classes,
image_field_key=image_field_key,
label_field_key=label_field_key,
decode_jpeg_only=params.decode_jpeg_only,
aug_rand_hflip=params.aug_rand_hflip,
aug_type=params.aug_type,
color_jitter=params.color_jitter,
random_erasing=params.random_erasing,
dtype=params.dtype,
crop_area_range=params.crop_area_range)
def patch_and_mask(images, labels):
del labels
patches = vit.to_patch(
images, self.task_config.patch_h, self.task_config.patch_w)
batch_size, num_h_patches, num_w_patches = tf_utils.get_shape_list(
patches)[:3]
num_patches = num_h_patches * num_w_patches
num_masked = tf.cast(
self.task_config.masking_ratio * num_patches, dtype=tf.int32)
r = tf.random.uniform((batch_size, num_patches))
rand_indices = tf.argsort(r)
masked_indices = rand_indices[:, :num_masked]
unmasked_indices = rand_indices[:, num_masked:]
patches_1d = tf.reshape(patches, (batch_size, num_patches, -1))
masked_patches = tf.gather(patches_1d, masked_indices, batch_dims=1)
if self.task_config.norm_target:
mean = tf.reduce_mean(masked_patches, axis=-1, keepdims=True)
var = tf.math.reduce_variance(masked_patches, axis=-1, keepdims=True)
std = (var + 1.e-6)**.5
masked_patches = (masked_patches - mean) / std
return {'patches': patches,
'masked_indices': masked_indices,
'unmasked_indices': unmasked_indices}, masked_patches
reader = input_reader.InputReader(
params=params,
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training),
postprocess_fn=patch_and_mask)
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
return tf.keras.metrics.mean_squared_error(
labels, model_outputs)
| 4,154 | 37.831776 | 79 | py |
models | models-master/official/projects/mae/tasks/image_classification.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image classification task with ViT."""
import dataclasses
from typing import Optional, Tuple
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import input_reader
from official.core import task_factory
from official.projects.mae.modeling import vit
from official.vision.dataloaders import classification_input
from official.vision.dataloaders import tfds_factory
from official.vision.ops import augment
@dataclasses.dataclass
class ViTConfig(cfg.TaskConfig):
"""The translation task config."""
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(
default_factory=cfg.DataConfig
)
patch_h: int = 14
patch_w: int = 14
num_classes: int = 1000
input_size: Tuple[int, int] = (224, 224)
init_stochastic_depth_rate: float = 0.2
@task_factory.register_task_cls(ViTConfig)
class ViTClassificationTask(base_task.Task):
"""Image classificaiton with ViT and load checkpoint if exists."""
def build_model(self) -> tf.keras.Model:
encoder = vit.VisionTransformer(
self.task_config.patch_h,
self.task_config.patch_w,
self.task_config.init_stochastic_depth_rate)
model = vit.ViTClassifier(encoder, self.task_config.num_classes)
model(tf.ones((1, 224, 224, 3)))
return model
def build_inputs(self,
params,
input_context: Optional[tf.distribute.InputContext] = None):
num_classes = self.task_config.num_classes
input_size = self.task_config.input_size
image_field_key = self.task_config.train_data.image_field_key
label_field_key = self.task_config.train_data.label_field_key
decoder = tfds_factory.get_classification_decoder(params.tfds_name)
parser = classification_input.Parser(
output_size=input_size[:2],
num_classes=num_classes,
image_field_key=image_field_key,
label_field_key=label_field_key,
decode_jpeg_only=params.decode_jpeg_only,
aug_rand_hflip=params.aug_rand_hflip,
aug_type=params.aug_type,
color_jitter=params.color_jitter,
random_erasing=params.random_erasing,
dtype=params.dtype)
if params.is_training:
postprocess_fn = augment.MixupAndCutmix(
mixup_alpha=0.8,
cutmix_alpha=1.0,
prob=1.0 if params.is_training else 0.0,
label_smoothing=0.1,
num_classes=num_classes)
else:
postprocess_fn = lambda images, labels: ( # pylint:disable=g-long-lambda
images, tf.one_hot(labels, num_classes))
reader = input_reader.InputReader(
params=params,
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training),
postprocess_fn=postprocess_fn)
dataset = reader.read(input_context=input_context)
return dataset
def initialize(self, model: tf.keras.Model):
"""Load encoder if checkpoint exists.
Args:
model: The keras.Model built or used by this task.
"""
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
if not ckpt_dir_or_file:
return
checkpoint_items = dict(encoder=model.encoder)
ckpt = tf.train.Checkpoint(**checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
def build_metrics(self, training=None):
del training
metrics = [
tf.keras.metrics.CategoricalAccuracy(name='accuracy'),
]
return metrics
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
return tf.keras.losses.categorical_crossentropy(
labels,
model_outputs,
from_logits=True)
| 4,476 | 33.705426 | 80 | py |
models | models-master/official/projects/longformer/longformer_encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Longformer encoder. Modified From huggingface/transformers."""
# pylint: disable=g-classes-have-attributes
from typing import Any, Callable, List, Optional, Union
from absl import logging
import tensorflow as tf
from official.modeling.tf_utils import get_shape_list
from official.nlp.modeling import layers
from official.projects.longformer.longformer_encoder_block import LongformerEncoderBlock
_Initializer = Union[str, tf.keras.initializers.Initializer]
_approx_gelu = lambda x: tf.keras.activations.gelu(x, approximate=True)
class LongformerEncoder(tf.keras.layers.Layer):
"""LongformerEncoder.
Args:
vocab_size: The size of the token vocabulary.
attention_window: list of ints representing the window size for each layer.
global_attention_size: the size of global attention used for each token.
pad_token_id: the token id for the pad token
hidden_size: The size of the transformer hidden layers.
num_layers: The number of transformer layers.
num_attention_heads: The number of attention heads for each transformer. The
hidden size must be divisible by the number of attention heads.
max_sequence_length: The maximum sequence length that this encoder can
consume. If None, max_sequence_length uses the value from sequence length.
This determines the variable shape for positional embeddings.
type_vocab_size: The number of types that the 'type_ids' input can take.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network for each transformer.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network for each transformer.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: The dropout rate to use for the attention layers within
the transformer layers.
initializer: The initialzer to use for all weights in this encoder.
output_range: The sequence output range, [0, output_range), by slicing the
target sequence of the last transformer layer. `None` means the entire
target sequence will attend to the source sequence, which yields the full
output.
embedding_width: The width of the word embeddings. If the embedding width is
not equal to hidden size, embedding parameters will be factorized into two
matrices in the shape of ['vocab_size', 'embedding_width'] and
['embedding_width', 'hidden_size'] ('embedding_width' is usually much
smaller than 'hidden_size').
embedding_layer: An optional Layer instance which will be called to generate
embeddings for the input word IDs.
norm_first: Whether to normalize inputs to attention and intermediate dense
layers. If set False, output of attention and intermediate dense layers is
normalized.
"""
def __init__(
self,
vocab_size: int,
attention_window: Union[List[int], int] = 512,
global_attention_size: int = 0,
pad_token_id: int = 1,
hidden_size: int = 768,
num_layers: int = 12,
num_attention_heads: int = 12,
max_sequence_length: int = 512,
type_vocab_size: int = 16,
inner_dim: int = 3072,
inner_activation: Callable[..., Any] = _approx_gelu,
output_dropout: float = 0.1,
attention_dropout: float = 0.1,
initializer: _Initializer = tf.keras.initializers.TruncatedNormal(
stddev=0.02),
output_range: Optional[int] = None,
embedding_width: Optional[int] = None,
embedding_layer: Optional[tf.keras.layers.Layer] = None,
norm_first: bool = False,
**kwargs):
super().__init__(**kwargs)
# Longformer args
self._attention_window = attention_window
self._global_attention_size = global_attention_size
self._pad_token_id = pad_token_id
activation = tf.keras.activations.get(inner_activation)
initializer = tf.keras.initializers.get(initializer)
if embedding_width is None:
embedding_width = hidden_size
if embedding_layer is None:
self._embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=initializer,
name='word_embeddings')
else:
self._embedding_layer = embedding_layer
self._position_embedding_layer = layers.PositionEmbedding(
initializer=initializer,
max_length=max_sequence_length,
name='position_embedding')
self._type_embedding_layer = layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=initializer,
use_one_hot=True,
name='type_embeddings')
self._embedding_norm_layer = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)
self._embedding_dropout = tf.keras.layers.Dropout(
rate=output_dropout, name='embedding_dropout')
# We project the 'embedding' output to 'hidden_size' if it is not already
# 'hidden_size'.
self._embedding_projection = None
if embedding_width != hidden_size:
self._embedding_projection = tf.keras.layers.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
kernel_initializer=initializer,
name='embedding_projection')
self._transformer_layers = []
self._attention_mask_layer = layers.SelfAttentionMask(
name='self_attention_mask')
for i in range(num_layers):
layer = LongformerEncoderBlock(
global_attention_size=global_attention_size,
num_attention_heads=num_attention_heads,
inner_dim=inner_dim,
inner_activation=inner_activation,
attention_window=attention_window[i],
layer_id=i,
output_dropout=output_dropout,
attention_dropout=attention_dropout,
norm_first=norm_first,
output_range=output_range if i == num_layers - 1 else None,
kernel_initializer=initializer,
name=f'transformer/layer_{i}')
self._transformer_layers.append(layer)
self._pooler_layer = tf.keras.layers.Dense(
units=hidden_size,
activation='tanh',
kernel_initializer=initializer,
name='pooler_transform')
self._config = {
'vocab_size': vocab_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'num_attention_heads': num_attention_heads,
'max_sequence_length': max_sequence_length,
'type_vocab_size': type_vocab_size,
'inner_dim': inner_dim,
'inner_activation': tf.keras.activations.serialize(activation),
'output_dropout': output_dropout,
'attention_dropout': attention_dropout,
'initializer': tf.keras.initializers.serialize(initializer),
'output_range': output_range,
'embedding_width': embedding_width,
'embedding_layer': embedding_layer,
'norm_first': norm_first,
'attention_window': attention_window,
'global_attention_size': global_attention_size,
'pad_token_id': pad_token_id,
}
self.inputs = dict(
input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32))
def call(self, inputs):
word_embeddings = None
if isinstance(inputs, dict):
word_ids = inputs.get('input_word_ids') # input_ids
mask = inputs.get('input_mask') # attention_mask
type_ids = inputs.get('input_type_ids') # token_type_ids
word_embeddings = inputs.get('input_word_embeddings',
None) # input_embeds
else:
raise ValueError(f'Unexpected inputs type to {self.__class__}.')
(
padding_len,
word_ids,
mask,
type_ids,
word_embeddings,
) = self._pad_to_window_size(
word_ids=word_ids,
mask=mask,
type_ids=type_ids,
word_embeddings=word_embeddings,
pad_token_id=self._pad_token_id)
if word_embeddings is None:
word_embeddings = self._embedding_layer(word_ids)
# absolute position embeddings.
position_embeddings = self._position_embedding_layer(word_embeddings)
type_embeddings = self._type_embedding_layer(type_ids)
embeddings = word_embeddings + position_embeddings + type_embeddings
embeddings = self._embedding_norm_layer(embeddings)
embeddings = self._embedding_dropout(embeddings)
if self._embedding_projection is not None:
embeddings = self._embedding_projection(embeddings)
batch_size, seq_len = get_shape_list(mask)
# create masks with fixed len global_attention_size
mask = tf.transpose(
tf.concat(
values=[
tf.ones(
(self._global_attention_size, batch_size), tf.int32) * 2,
tf.transpose(mask)[self._global_attention_size:]
],
axis=0))
is_index_masked = tf.math.less(mask, 1)
is_index_global_attn = tf.transpose(
tf.concat(
values=[
tf.ones((self._global_attention_size, batch_size), tf.bool),
tf.zeros((seq_len - self._global_attention_size, batch_size),
tf.bool)
],
axis=0))
# Longformer
attention_mask = mask
extended_attention_mask = tf.reshape(
attention_mask, (tf.shape(mask)[0], tf.shape(mask)[1], 1, 1))
attention_mask = tf.cast(
tf.math.abs(1 - extended_attention_mask), tf.dtypes.float32) * -10000.0
encoder_outputs = []
x = embeddings
# TFLongformerEncoder
for layer in self._transformer_layers:
x = layer([x, attention_mask, is_index_masked, is_index_global_attn])
encoder_outputs.append(x)
last_encoder_output = encoder_outputs[-1]
if padding_len > 0:
last_encoder_output = last_encoder_output[:, :-padding_len]
first_token_tensor = last_encoder_output[:, 0, :]
pooled_output = self._pooler_layer(first_token_tensor)
return dict(
sequence_output=last_encoder_output,
pooled_output=pooled_output,
encoder_outputs=encoder_outputs)
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_embedding_layer(self):
return self._embedding_layer
def get_config(self):
return dict(self._config)
@property
def transformer_layers(self):
"""List of Transformer layers in the encoder."""
return self._transformer_layers
@property
def pooler_layer(self):
"""The pooler dense layer after the transformer layers."""
return self._pooler_layer
@classmethod
def from_config(cls, config, custom_objects=None):
if 'embedding_layer' in config and config['embedding_layer'] is not None:
warn_string = (
'You are reloading a model that was saved with a '
'potentially-shared embedding layer object. If you contine to '
'train this model, the embedding layer will no longer be shared. '
'To work around this, load the model outside of the Keras API.')
print('WARNING: ' + warn_string)
logging.warn(warn_string)
return cls(**config)
def _pad_to_window_size(
self,
word_ids,
mask,
type_ids,
word_embeddings,
pad_token_id,
):
# padding
attention_window = max(self._attention_window)
assert (attention_window %
2 == 0), ('`attention_window` should be an even value.'
f'Given {attention_window}')
input_shape = get_shape_list(
word_ids) if word_ids is not None else get_shape_list(word_embeddings)
batch_size, seq_len = input_shape[:2]
if seq_len is not None:
padding_len = (attention_window -
seq_len % attention_window) % attention_window
else:
padding_len = 0
paddings = tf.convert_to_tensor([[0, 0], [0, padding_len]])
if word_ids is not None:
word_ids = tf.pad(word_ids, paddings, constant_values=pad_token_id)
if word_embeddings is not None:
def pad_embeddings():
word_ids_padding = tf.fill((batch_size, padding_len), self.pad_token_id)
word_embeddings_padding = self._embedding_layer(word_ids_padding)
return tf.concat([word_embeddings, word_embeddings_padding], axis=-2)
word_embeddings = tf.cond(
tf.math.greater(padding_len, 0), pad_embeddings,
lambda: word_embeddings)
mask = tf.pad(
mask, paddings,
constant_values=False) # no attention on the padding tokens
token_type_ids = tf.pad(
type_ids, paddings, constant_values=0) # pad with token_type_id = 0
return (
padding_len,
word_ids,
mask,
token_type_ids,
word_embeddings,
)
| 13,559 | 36.04918 | 88 | py |
models | models-master/official/projects/longformer/longformer_attention.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Longformer attention block. Modified From huggingface/transformers."""
# pylint: disable=g-classes-have-attributes
import math
import string
import numpy as np
import tensorflow as tf
from official.modeling.tf_utils import get_shape_list
_CHR_IDX = string.ascii_lowercase
def _build_attention_equation(rank, attn_axes):
"""Builds einsum equations for the attention computation.
Query, key, value inputs after projection are expected to have the shape as:
`(bs, <non-attention dims>, <attention dims>, num_heads, channels)`.
`bs` and `<non-attention dims>` are treated as `<batch dims>`.
The attention operations can be generalized:
(1) Query-key dot product:
`(<batch dims>, <query attention dims>, num_heads, channels), (<batch dims>,
<key attention dims>, num_heads, channels) -> (<batch dims>,
num_heads, <query attention dims>, <key attention dims>)`
(2) Combination:
`(<batch dims>, num_heads, <query attention dims>, <key attention dims>),
(<batch dims>, <value attention dims>, num_heads, channels) -> (<batch dims>,
<query attention dims>, num_heads, channels)`
Args:
rank: Rank of query, key, value tensors.
attn_axes: List/tuple of axes, `[-1, rank)`, that attention will be applied
to.
Returns:
Einsum equations.
"""
target_notation = _CHR_IDX[:rank]
# `batch_dims` includes the head dim.
batch_dims = tuple(np.delete(range(rank), attn_axes + (rank - 1,)))
letter_offset = rank
source_notation = ""
for i in range(rank):
if i in batch_dims or i == rank - 1:
source_notation += target_notation[i]
else:
source_notation += _CHR_IDX[letter_offset]
letter_offset += 1
product_notation = "".join([target_notation[i] for i in batch_dims] +
[target_notation[i] for i in attn_axes] +
[source_notation[i] for i in attn_axes])
dot_product_equation = f"{source_notation},{target_notation}->{product_notation}"
attn_scores_rank = len(product_notation)
combine_equation = f"{product_notation},{source_notation}->{target_notation}"
return dot_product_equation, combine_equation, attn_scores_rank
def _build_proj_equation(free_dims, bound_dims, output_dims):
"""Builds an einsum equation for projections inside multi-head attention."""
input_str = ""
kernel_str = ""
output_str = ""
bias_axes = ""
letter_offset = 0
for i in range(free_dims):
char = _CHR_IDX[i + letter_offset]
input_str += char
output_str += char
letter_offset += free_dims
for i in range(bound_dims):
char = _CHR_IDX[i + letter_offset]
input_str += char
kernel_str += char
letter_offset += bound_dims
for i in range(output_dims):
char = _CHR_IDX[i + letter_offset]
kernel_str += char
output_str += char
bias_axes += char
equation = f"{input_str},{kernel_str}->{output_str}"
return equation, bias_axes, len(output_str)
def _get_output_shape(output_rank, known_last_dims):
return [None] * (output_rank - len(known_last_dims)) + list(known_last_dims)
@tf.keras.utils.register_keras_serializable(package="Text")
class LongformerAttention(tf.keras.layers.MultiHeadAttention):
"""LongformerAttention.
Args:
attention_window: int representing the window size for attention.
layer_id: int of the id of the layer.
global_attention_size: the size of global attention used for each token.
"""
def __init__(self, attention_window, layer_id, global_attention_size,
**kwargs):
super().__init__(**kwargs)
self._layer_id = layer_id
self._attention_window = attention_window
assert (self._attention_window % 2 == 0), (
f"`attention_window` for layer {self._layer_id} has to be an even "
f"value. Given {self.attention_window}")
assert (self._attention_window > 0), (
f"`attention_window` for layer {self._layer_id} has to be positive. "
f"Given {self.attention_window}")
self._one_sided_attn_window_size = self._attention_window // 2
self.global_attention_size = global_attention_size
def _build_from_signature(self, query, value, key=None):
"""Builds layers and variables.
Once the method is called, self._built_from_signature will be set to True.
Args:
query: Query tensor or TensorShape.
value: Value tensor or TensorShape.
key: Key tensor or TensorShape.
"""
self._built_from_signature = True
if hasattr(query, "shape"):
self._query_shape = tf.TensorShape(query.shape)
else:
self._query_shape = tf.TensorShape(query)
if hasattr(value, "shape"):
self._value_shape = tf.TensorShape(value.shape)
else:
self._value_shape = tf.TensorShape(value)
if key is None:
self._key_shape = self._value_shape
elif hasattr(key, "shape"):
self._key_shape = tf.TensorShape(key.shape)
else:
self._key_shape = tf.TensorShape(key)
common_kwargs = dict(
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
# Any setup work performed only once should happen in an `init_scope`
# to avoid creating symbolic Tensors that will later pollute any eager
# operations.
# with tf_utils.maybe_init_scope(self):
# TODO(crickwu): check whether tf_utils.maybe_init_scope(self) (keras)
# is needed.
free_dims = self._query_shape.rank - 1
einsum_equation, bias_axes, output_rank = _build_proj_equation(
free_dims, bound_dims=1, output_dims=2)
self._query_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1,
[self._num_heads, self._key_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="query",
**common_kwargs)
self._global_query_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1,
[self._num_heads, self._key_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="global_query",
**common_kwargs)
einsum_equation, bias_axes, output_rank = _build_proj_equation(
self._key_shape.rank - 1, bound_dims=1, output_dims=2)
self._key_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1,
[self._num_heads, self._key_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="key",
**common_kwargs)
self._global_key_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1,
[self._num_heads, self._key_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="global_key",
**common_kwargs)
einsum_equation, bias_axes, output_rank = _build_proj_equation(
self._value_shape.rank - 1, bound_dims=1, output_dims=2)
self._value_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1,
[self._num_heads, self._value_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="value",
**common_kwargs)
self._global_value_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1,
[self._num_heads, self._value_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="global_value",
**common_kwargs)
# Builds the attention computations for multi-head dot product attention.
# These computations could be wrapped into the keras attention layer once
# it support mult-head einsum computations.
self._build_attention(output_rank)
self._global_dropout_layer = tf.keras.layers.Dropout(rate=self._dropout)
# self._output_dense = self._make_output_dense(
# free_dims, common_kwargs, "attention_output")
self._output_dense = tf.keras.layers.Dense(
units=self._num_heads * self._key_dim, name="dense", **common_kwargs)
def call(self,
hidden_states,
attention_mask=None,
is_index_masked=None,
is_index_global_attn=None,
training=None):
"""Applies Dot-product attention with query, key, value tensors.
This function defines the computation inside `call` with projected
multi-head Q, K, V inputs. Users can override this function for customized
attention implementation.
Args:
hidden_states: inputs for generating query, key and value tensors.
attention_mask: a boolean mask of shape `(B, T, S)`, that prevents
attention to certain positions.
is_index_masked: boolean indicating whether the index is masked.
is_index_global_attn: boolean indicating whether the index is global
attention.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Returns:
attention_output: Multi-headed outputs of attention computation.
"""
if not self._built_from_signature:
self._build_from_signature(
query=hidden_states, value=hidden_states, key=hidden_states)
# N = `num_attention_heads`
# H = `size_per_head`
# `query` = [B, T, N ,H]
query = self._query_dense(hidden_states)
# `key` = [B, S, N, H]
key = self._key_dense(hidden_states)
# `value` = [B, S, N, H]
value = self._value_dense(hidden_states)
# Note: Applying scalar multiply at the smaller end of einsum improves
# XLA performance, but may introduce slight numeric differences in
# the Transformer attention head.
query = tf.multiply(query, 1.0 / math.sqrt(float(self._key_dim)))
batch_size, seq_len, num_heads, head_dim = get_shape_list(query)
# attn_probs = (batch_size, seq_len, num_heads, window*2+1)
attn_scores = self._sliding_chunks_query_key_matmul(
query, key, self._one_sided_attn_window_size)
# diagonal mask with zeros everywhere and -inf inplace of padding
diagonal_mask = self._sliding_chunks_query_key_matmul(
tf.ones(get_shape_list(attention_mask)),
attention_mask,
self._one_sided_attn_window_size,
)
# pad local attention probs
attn_scores += diagonal_mask
if tf.executing_eagerly():
tf.debugging.assert_equal(
get_shape_list(attn_scores),
[
batch_size, seq_len, self._num_heads,
self._one_sided_attn_window_size * 2 + 1
],
message=f"attn_probs should be of size "
f"({batch_size}, {seq_len}, {num_heads}, "
f"{self._one_sided_attn_window_size * 2 + 1}),"
f" but is of size {get_shape_list(attn_scores)}",
)
# compute global attn indices required through out forward fn
(
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
) = self._get_global_attn_indices(is_index_global_attn,
self.global_attention_size)
# this function is only relevant for global attention
if self.global_attention_size > 0:
attn_scores = self._concat_with_global_key_attn_probs(
attn_scores=attn_scores,
query_vectors=query,
key_vectors=key,
max_num_global_attn_indices=max_num_global_attn_indices,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
)
else:
pass
attn_probs = tf.nn.softmax(attn_scores, axis=-1)
# softmax sometimes inserts NaN if all positions are masked,
# replace them with 0
# Make sure to create a mask with the proper shape:
# if is_global_attn==True => [batch_size, seq_len, self.num_heads,
# self.one_sided_attn_window_size * 2 + max_num_global_attn_indices + 1]
# if is_global_attn==False => [batch_size, seq_len, self.num_heads,
# self.one_sided_attn_window_size * 2 + 1]
if self.global_attention_size > 0:
masked_index = tf.tile(
is_index_masked[:, :, None, None],
(1, 1, self._num_heads, self._one_sided_attn_window_size * 2 +
max_num_global_attn_indices + 1),
)
else:
masked_index = tf.tile(
is_index_masked[:, :, None, None],
(1, 1, self._num_heads, self._one_sided_attn_window_size * 2 + 1),
)
attn_probs = tf.where(
masked_index,
tf.zeros(get_shape_list(masked_index), dtype=attn_probs.dtype),
attn_probs,
)
layer_head_mask = None
if layer_head_mask is not None:
if tf.executing_eagerly():
tf.debugging.assert_equal(
get_shape_list(layer_head_mask),
[self._num_heads],
message=f"Head mask for a single layer should be of size "
f"{(self._num_heads)}, but is "
f"{get_shape_list(layer_head_mask)}",
)
attn_probs = tf.reshape(layer_head_mask, (1, 1, -1, 1)) * attn_probs
# apply dropout
attn_probs = self._dropout_layer(attn_probs, training=training)
value_vectors = tf.reshape(
value, (batch_size, seq_len, self._num_heads, self._key_dim))
# if global attention, compute sum of global and local attn
if self.global_attention_size > 0:
attn_output = self._compute_attn_output_with_global_indices(
value_vectors=value_vectors,
attn_probs=attn_probs,
max_num_global_attn_indices=max_num_global_attn_indices,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
)
else:
attn_output = self._sliding_chunks_matmul_attn_probs_value(
attn_probs, value_vectors, self._one_sided_attn_window_size)
if tf.executing_eagerly():
tf.debugging.assert_equal(
get_shape_list(attn_output),
[batch_size, seq_len, self._num_heads, head_dim],
message="Unexpected size",
)
attn_output = tf.reshape(
attn_output,
(batch_size, seq_len, self._num_heads * self._key_dim)) # FIXME
# compute value for global attention and overwrite to attention output
# TODO(crickwu): remove the redundant computation
if self.global_attention_size > 0:
attn_output, global_attn_probs = self._compute_global_attn_output_from_hidden( # pylint: disable=unused-variable
attn_output=attn_output,
hidden_states=hidden_states,
max_num_global_attn_indices=max_num_global_attn_indices,
layer_head_mask=layer_head_mask,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
is_index_masked=is_index_masked,
training=training,
)
else:
global_attn_probs = tf.zeros(
(batch_size, self._num_heads, max_num_global_attn_indices, seq_len))
# make sure that local attention probabilities are set to 0 for indices of
# global attn
if self.global_attention_size > 0:
masked_global_attn_index = tf.tile(
is_index_global_attn[:, :, None, None],
(1, 1, self._num_heads, self._one_sided_attn_window_size * 2 +
max_num_global_attn_indices + 1),
)
else:
masked_global_attn_index = tf.tile(
is_index_global_attn[:, :, None, None],
(1, 1, self._num_heads, self._one_sided_attn_window_size * 2 + 1),
)
attn_probs = tf.where(
masked_global_attn_index,
tf.zeros(
get_shape_list(masked_global_attn_index), dtype=attn_probs.dtype),
attn_probs,
)
# we can return extra information here
# (attn_output, attn_probs, global_attn_probs)
return attn_output
def get_config(self):
config = {
"layer_id": self._layer_id,
"attention_window": self._one_sided_attn_window_size,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def _sliding_chunks_query_key_matmul(self, query, key, window_overlap):
"""Matrix multiplication of query and key tensors.
This multiplication uses a sliding window attention pattern.
This implementation splits the input into overlapping chunks of size
2w (e.g. 512 for pretrained Longformer) with an overlap of size
window_overlap.
Args:
query: query tensor.
key: key tensor.
window_overlap: int.
Returns:
diagonal_attention_scores: tensor.
"""
batch_size, seq_len, num_heads, head_dim = get_shape_list(query)
if tf.executing_eagerly():
tf.debugging.assert_equal(
seq_len % (window_overlap * 2),
0,
message=f"Sequence length should be multiple of {window_overlap * 2}. "
f"Given {seq_len}",
)
tf.debugging.assert_equal(
get_shape_list(query),
get_shape_list(key),
message=f"Shape of query and key should be equal, but got query: "
f"{get_shape_list(query)} and key: {get_shape_list(key)}",
)
chunks_count = seq_len // window_overlap - 1
# group batch_size and num_heads dimensions into one,
# then chunk seq_len into chunks of size window_overlap * 2
query = tf.reshape(
tf.transpose(query, (0, 2, 1, 3)),
(batch_size * num_heads, seq_len, head_dim),
)
key = tf.reshape(
tf.transpose(key, (0, 2, 1, 3)),
(batch_size * num_heads, seq_len, head_dim))
chunked_query = self._chunk(query, window_overlap)
chunked_key = self._chunk(key, window_overlap)
# matrix multiplication
# bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcxy: batch_size * num_heads x chunks x 2window_overlap x 2window_overlap
chunked_query = tf.cast(chunked_query, dtype=chunked_key.dtype)
chunked_attention_scores = tf.einsum("bcxd,bcyd->bcxy", chunked_query,
chunked_key) # multiply
# convert diagonals into columns
paddings = tf.convert_to_tensor([[0, 0], [0, 0], [0, 1], [0, 0]])
diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims(
chunked_attention_scores, paddings)
# allocate space for the overall attention matrix where the chunks are
# combined. The last dimension
# has (window_overlap * 2 + 1) columns. The first (window_overlap) columns
# are the window_overlap lower triangles (attention from a word to
# window_overlap previous words). The following column is attention score
# from each word to itself, then
# followed by window_overlap columns for the upper triangle.
# copy parts from diagonal_chunked_attention_scores into the combined matrix
# of attentions - copying the main diagonal and the upper triangle
# TODO(crickwu): This code is most likely not very efficient and should be
# improved.
diagonal_attn_scores_up_triang = tf.concat(
[
diagonal_chunked_attention_scores[:, :, :window_overlap, :
window_overlap + 1],
diagonal_chunked_attention_scores[:, -1:,
window_overlap:, :window_overlap +
1],
],
axis=1,
)
# - copying the lower triangle
diagonal_attn_scores_low_triang = tf.concat(
[
tf.zeros(
(batch_size * num_heads, 1, window_overlap, window_overlap),
dtype=diagonal_chunked_attention_scores.dtype,
),
diagonal_chunked_attention_scores[:, :, -(window_overlap + 1):-1,
window_overlap + 1:],
],
axis=1,
)
diagonal_attn_scores_first_chunk = tf.concat(
[
tf.roll(
diagonal_chunked_attention_scores,
shift=[1, window_overlap],
axis=[2, 3],
)[:, :, :window_overlap, :window_overlap],
tf.zeros(
(batch_size * num_heads, 1, window_overlap, window_overlap),
dtype=diagonal_chunked_attention_scores.dtype,
),
],
axis=1,
)
first_chunk_mask = (
tf.tile(
tf.range(chunks_count + 1)[None, :, None, None],
(batch_size * num_heads, 1, window_overlap, window_overlap),
) < 1)
diagonal_attn_scores_low_triang = tf.where(
first_chunk_mask,
diagonal_attn_scores_first_chunk,
diagonal_attn_scores_low_triang,
)
# merging upper and lower triangle
diagonal_attention_scores = tf.concat(
[diagonal_attn_scores_low_triang, diagonal_attn_scores_up_triang],
axis=-1)
# separate batch_size and num_heads dimensions again
diagonal_attention_scores = tf.transpose(
tf.reshape(
diagonal_attention_scores,
(batch_size, num_heads, seq_len, 2 * window_overlap + 1),
),
(0, 2, 1, 3),
)
diagonal_attention_scores = self._mask_invalid_locations(
diagonal_attention_scores, window_overlap)
return diagonal_attention_scores
@staticmethod
def _mask_invalid_locations(input_tensor, window_overlap):
# create correct upper triangle bool mask
mask_2d_upper = tf.reverse(
tf.linalg.band_part(
tf.ones(shape=(window_overlap, window_overlap + 1)), -1, 0),
axis=[0],
)
# pad to full matrix
padding = tf.convert_to_tensor(
[[0, get_shape_list(input_tensor)[1] - window_overlap],
[0, get_shape_list(input_tensor)[3] - window_overlap - 1]])
# create lower mask
mask_2d = tf.pad(mask_2d_upper, padding)
# combine with upper mask
mask_2d = mask_2d + tf.reverse(mask_2d, axis=[0, 1])
# broadcast to full matrix
mask_4d = tf.tile(mask_2d[None, :, None, :],
(get_shape_list(input_tensor)[0], 1, 1, 1))
# inf tensor used for masking
inf_tensor = -float("inf") * tf.ones_like(input_tensor)
# mask
input_tensor = tf.where(
tf.math.greater(mask_4d, 0), inf_tensor, input_tensor)
return input_tensor
def _sliding_chunks_matmul_attn_probs_value(self, attn_probs, value,
window_overlap):
"""Same as _sliding_chunks_query_key_matmul but for attn_probs and value."""
batch_size, seq_len, num_heads, head_dim = get_shape_list(value)
if tf.executing_eagerly():
tf.debugging.assert_equal(
seq_len % (window_overlap * 2),
0,
message="Seq_len has to be multiple of 2 * window_overlap",
)
tf.debugging.assert_equal(
get_shape_list(attn_probs)[:3],
get_shape_list(value)[:3],
message="value and attn_probs must have same dims (except head_dim)",
)
tf.debugging.assert_equal(
get_shape_list(attn_probs)[3],
2 * window_overlap + 1,
message="attn_probs last dim has to be 2 * window_overlap + 1",
)
chunks_count = seq_len // window_overlap - 1
# group batch_size and num_heads dimensions into one, then chunk seq_len
# into chunks of size 2 window overlap
chunked_attn_probs = tf.reshape(
tf.transpose(attn_probs, (0, 2, 1, 3)),
(
batch_size * num_heads,
seq_len // window_overlap,
window_overlap,
2 * window_overlap + 1,
),
)
# group batch_size and num_heads dimensions into one
value = tf.reshape(
tf.transpose(value, (0, 2, 1, 3)),
(batch_size * num_heads, seq_len, head_dim),
)
# pad seq_len with w at the beginning of the sequence and another window
# overlap at the end
paddings = tf.convert_to_tensor([[0, 0], [window_overlap, window_overlap],
[0, 0]])
padded_value = tf.pad(value, paddings, constant_values=-1)
# chunk padded_value into chunks of size 3 window overlap and an overlap of
# size window overlap
frame_size = 3 * window_overlap * head_dim
frame_hop_size = (get_shape_list(padded_value)[1] * head_dim -
frame_size) // chunks_count
chunked_value = tf.signal.frame(
tf.reshape(padded_value, (batch_size * num_heads, -1)),
frame_size,
frame_hop_size,
)
chunked_value = tf.reshape(
chunked_value,
(batch_size * num_heads, chunks_count + 1, 3 * window_overlap,
head_dim),
)
if tf.executing_eagerly():
tf.debugging.assert_equal(
get_shape_list(chunked_value),
[
batch_size * num_heads, chunks_count + 1, 3 * window_overlap,
head_dim
],
message="Chunked value has the wrong shape",
)
chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs)
context = tf.einsum("bcwd,bcdh->bcwh", chunked_attn_probs, chunked_value)
context = tf.transpose(
tf.reshape(context, (batch_size, num_heads, seq_len, head_dim)),
(0, 2, 1, 3),
)
return context
@staticmethod
def _pad_and_transpose_last_two_dims(hidden_states_padded, paddings):
"""Pads rows and then flips rows and columns."""
hidden_states_padded = tf.pad(
hidden_states_padded, paddings
) # padding value is not important because it will be overwritten
batch_size, chunk_size, seq_length, hidden_dim = get_shape_list(
hidden_states_padded)
hidden_states_padded = tf.reshape(
hidden_states_padded, (batch_size, chunk_size, hidden_dim, seq_length))
return hidden_states_padded
@staticmethod
def _pad_and_diagonalize(chunked_hidden_states):
"""Shifts every row 1 step right, converting columns into diagonals.
Example::
chunked_hidden_states: [ 0.4983, 2.6918, -0.0071, 1.0492,
-1.8348, 0.7672, 0.2986, 0.0285,
-0.7584, 0.4206, -0.0405, 0.1599,
2.0514, -1.1600, 0.5372, 0.2629 ]
window_overlap = num_rows = 4
(pad & diagonalize) =>
[ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000
0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000
0.0000, 0.0000, -0.7584, 0.4206, -0.0405, 0.1599, 0.0000
0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ]
Args:
chunked_hidden_states: tensor.
Returns:
padded_hidden_stategs: tensor.
"""
total_num_heads, num_chunks, window_overlap, hidden_dim = get_shape_list(
chunked_hidden_states)
paddings = tf.convert_to_tensor([[0, 0], [0, 0], [0, 0],
[0, window_overlap + 1]])
chunked_hidden_states = tf.pad(chunked_hidden_states, paddings)
chunked_hidden_states = tf.reshape(chunked_hidden_states,
(total_num_heads, num_chunks, -1))
chunked_hidden_states = chunked_hidden_states[:, :, :-window_overlap]
chunked_hidden_states = tf.reshape(
chunked_hidden_states,
(total_num_heads, num_chunks, window_overlap,
window_overlap + hidden_dim),
)
chunked_hidden_states = chunked_hidden_states[:, :, :, :-1]
return chunked_hidden_states
@staticmethod
def _chunk(hidden_states, window_overlap):
"""convert into overlapping chunks. Chunk size = 2w, overlap size = w."""
batch_size, seq_length, hidden_dim = get_shape_list(hidden_states)
num_output_chunks = 2 * (seq_length // (2 * window_overlap)) - 1
# define frame size and frame stride (similar to convolution)
frame_hop_size = window_overlap * hidden_dim
frame_size = 2 * frame_hop_size
hidden_states = tf.reshape(hidden_states,
(batch_size, seq_length * hidden_dim))
# chunk with overlap
chunked_hidden_states = tf.signal.frame(hidden_states, frame_size,
frame_hop_size)
if tf.executing_eagerly():
tf.debugging.assert_equal(
get_shape_list(chunked_hidden_states),
[batch_size, num_output_chunks, frame_size],
message=f"Make sure chunking is correctly applied. `Chunked hidden "
f"states should have output dimension"
f" {[batch_size, frame_size, num_output_chunks]}, but got "
f"{get_shape_list(chunked_hidden_states)}.",
)
chunked_hidden_states = tf.reshape(
chunked_hidden_states,
(batch_size, num_output_chunks, 2 * window_overlap, hidden_dim),
)
return chunked_hidden_states
@staticmethod
def _get_global_attn_indices(is_index_global_attn, global_attention_size):
"""Computes global attn indices required throughout forward pass."""
# All global attention size are fixed through global_attention_size
batch_size, _ = get_shape_list(is_index_global_attn)
max_num_global_attn_indices = global_attention_size
row_indices = tf.range(batch_size)
row_indices = tf.repeat(
tf.expand_dims(row_indices, axis=0),
repeats=[global_attention_size],
axis=0)
row_indices = tf.reshape(row_indices,
(batch_size * global_attention_size, 1))
col_indices = tf.range(global_attention_size)
col_indices = tf.repeat(
tf.expand_dims(col_indices, axis=1), repeats=[batch_size], axis=0)
is_index_global_attn_nonzero = tf.concat((row_indices, col_indices), axis=1)
# this is actually same as `is_index_global_attn_nonzero`,
# since we assume all global attention are the same size
is_local_index_global_attn_nonzero = tf.concat((row_indices, col_indices),
axis=1)
# empty tensor
is_local_index_no_global_attn_nonzero = tf.reshape(
tf.expand_dims(tf.range(0), axis=1), (0, 2))
return (
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
)
def _concat_with_global_key_attn_probs(
self,
attn_scores,
key_vectors,
query_vectors,
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
):
batch_size = get_shape_list(key_vectors)[0]
# select global key vectors
global_key_vectors = tf.gather_nd(key_vectors, is_index_global_attn_nonzero)
# create only global key vectors
key_vectors_only_global = tf.scatter_nd(
is_local_index_global_attn_nonzero,
global_key_vectors,
shape=(
batch_size,
max_num_global_attn_indices,
self._num_heads,
self._key_dim,
),
)
# (batch_size, seq_len, num_heads, max_num_global_attn_indices)
attn_probs_from_global_key = tf.einsum("blhd,bshd->blhs", query_vectors,
key_vectors_only_global)
# (batch_size, max_num_global_attn_indices, seq_len, num_heads)
attn_probs_from_global_key_trans = tf.transpose(attn_probs_from_global_key,
(0, 3, 1, 2))
mask_shape = (
get_shape_list(is_local_index_no_global_attn_nonzero)[0],) + tuple(
get_shape_list(attn_probs_from_global_key_trans)[-2:])
mask = tf.ones(mask_shape) * -10000.0
mask = tf.cast(mask, dtype=attn_probs_from_global_key_trans.dtype)
# scatter mask
attn_probs_from_global_key_trans = tf.tensor_scatter_nd_update(
attn_probs_from_global_key_trans,
is_local_index_no_global_attn_nonzero,
mask,
)
# (batch_size, seq_len, num_heads, max_num_global_attn_indices)
attn_probs_from_global_key = tf.transpose(attn_probs_from_global_key_trans,
(0, 2, 3, 1))
# concat to attn_probs
# (batch_size, seq_len, num_heads, extra attention count + 2*window+1)
attn_scores = tf.concat((attn_probs_from_global_key, attn_scores), axis=-1)
return attn_scores
def _compute_attn_output_with_global_indices(
self,
value_vectors,
attn_probs,
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
):
batch_size = get_shape_list(attn_probs)[0]
# cut local attn probs to global only
attn_probs_only_global = attn_probs[:, :, :, :max_num_global_attn_indices]
# select global value vectors
global_value_vectors = tf.gather_nd(value_vectors,
is_index_global_attn_nonzero)
# create only global value vectors
value_vectors_only_global = tf.scatter_nd(
is_local_index_global_attn_nonzero,
global_value_vectors,
shape=(
batch_size,
max_num_global_attn_indices,
self._num_heads,
self._key_dim,
),
)
# compute attn output only global
attn_output_only_global = tf.einsum("blhs,bshd->blhd",
attn_probs_only_global,
value_vectors_only_global)
# reshape attn probs
attn_probs_without_global = attn_probs[:, :, :,
max_num_global_attn_indices:]
# compute attn output with global
attn_output_without_global = self._sliding_chunks_matmul_attn_probs_value(
attn_probs_without_global, value_vectors,
self._one_sided_attn_window_size)
return attn_output_only_global + attn_output_without_global
def _compute_global_attn_output_from_hidden(
self,
attn_output,
hidden_states,
max_num_global_attn_indices,
layer_head_mask,
is_local_index_global_attn_nonzero,
is_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
is_index_masked,
training,
):
batch_size, seq_len = get_shape_list(hidden_states)[:2]
# prepare global hidden states
global_attn_hidden_states = tf.gather_nd(hidden_states,
is_index_global_attn_nonzero)
global_attn_hidden_states = tf.scatter_nd(
is_local_index_global_attn_nonzero,
global_attn_hidden_states,
shape=(batch_size, max_num_global_attn_indices,
self._num_heads * self._key_dim),
)
# global key, query, value
global_query_vectors_only_global = self._global_query_dense(
global_attn_hidden_states)
global_key_vectors = self._global_key_dense(hidden_states)
global_value_vectors = self._global_value_dense(hidden_states)
# normalize
global_query_vectors_only_global /= tf.math.sqrt(
tf.cast(self._key_dim, dtype=global_query_vectors_only_global.dtype))
global_query_vectors_only_global = self.reshape_and_transpose(
global_query_vectors_only_global, batch_size)
global_key_vectors = self.reshape_and_transpose(global_key_vectors,
batch_size)
global_value_vectors = self.reshape_and_transpose(global_value_vectors,
batch_size)
# compute attn scores
global_attn_scores = tf.matmul(
global_query_vectors_only_global, global_key_vectors, transpose_b=True)
if tf.executing_eagerly():
tf.debugging.assert_equal(
get_shape_list(global_attn_scores),
[batch_size * self._num_heads, max_num_global_attn_indices, seq_len],
message=f"global_attn_scores have the wrong size. Size should be"
f"{(batch_size * self._num_heads, max_num_global_attn_indices, seq_len)}, "
f"but is {get_shape_list(global_attn_scores)}.",
)
global_attn_scores = tf.reshape(
global_attn_scores,
(batch_size, self._num_heads, max_num_global_attn_indices, seq_len),
)
global_attn_scores_trans = tf.transpose(global_attn_scores, (0, 2, 1, 3))
mask_shape = (get_shape_list(is_local_index_no_global_attn_nonzero)[0],
) + tuple(get_shape_list(global_attn_scores_trans)[-2:])
global_attn_mask = tf.ones(mask_shape) * -10000.0
global_attn_mask = tf.cast(
global_attn_mask, dtype=global_attn_scores_trans.dtype)
# scatter mask
global_attn_scores_trans = tf.tensor_scatter_nd_update(
global_attn_scores_trans,
is_local_index_no_global_attn_nonzero,
global_attn_mask,
)
global_attn_scores = tf.transpose(global_attn_scores_trans, (0, 2, 1, 3))
# mask global attn scores
attn_mask = tf.tile(is_index_masked[:, None, None, :],
(1, get_shape_list(global_attn_scores)[1], 1, 1))
global_attn_scores = tf.where(attn_mask, -10000.0, global_attn_scores)
global_attn_scores = tf.reshape(
global_attn_scores,
(batch_size * self._num_heads, max_num_global_attn_indices, seq_len),
)
# compute global attn probs
global_attn_probs_float = tf.nn.softmax(global_attn_scores, axis=-1)
# apply layer head masking
if layer_head_mask is not None:
if tf.executing_eagerly():
tf.debugging.assert_equal(
get_shape_list(layer_head_mask),
[self._num_heads],
message=f"Head mask for a single layer should be of size "
f"{(self._num_heads)}, but is {get_shape_list(layer_head_mask)}",
)
global_attn_probs_float = tf.reshape(
layer_head_mask,
(1, -1, 1, 1)) * tf.reshape(global_attn_probs_float,
(batch_size, self._num_heads,
max_num_global_attn_indices, seq_len))
global_attn_probs_float = tf.reshape(
global_attn_probs_float,
(batch_size * self._num_heads, max_num_global_attn_indices, seq_len))
# dropout
global_attn_probs = self._global_dropout_layer(
global_attn_probs_float, training=training)
# global attn output
global_attn_output = tf.matmul(global_attn_probs, global_value_vectors)
if tf.executing_eagerly():
tf.debugging.assert_equal(
get_shape_list(global_attn_output),
[
batch_size * self._num_heads, max_num_global_attn_indices,
self._key_dim
],
message=f"global_attn_output tensor has the wrong size. Size should be "
f"{(batch_size * self._num_heads, max_num_global_attn_indices, self._key_dim)}, "
f"but is {get_shape_list(global_attn_output)}.",
)
global_attn_output = tf.reshape(
global_attn_output,
(batch_size, self._num_heads, max_num_global_attn_indices,
self._key_dim),
)
# get only non zero global attn output
nonzero_global_attn_output = tf.gather_nd(
tf.transpose(global_attn_output, (0, 2, 1, 3)),
is_local_index_global_attn_nonzero,
)
nonzero_global_attn_output = tf.reshape(
nonzero_global_attn_output,
(get_shape_list(is_local_index_global_attn_nonzero)[0], -1),
)
# overwrite values with global attention
attn_output = tf.tensor_scatter_nd_update(attn_output,
is_index_global_attn_nonzero,
nonzero_global_attn_output)
global_attn_probs = tf.reshape(
global_attn_probs,
(batch_size, self._num_heads, max_num_global_attn_indices, seq_len))
attn_output = self._output_dense(attn_output)
return attn_output, global_attn_probs
def reshape_and_transpose(self, vector, batch_size):
return tf.reshape(
tf.transpose(
tf.reshape(vector,
(batch_size, -1, self._num_heads, self._key_dim)),
(0, 2, 1, 3),
),
(batch_size * self._num_heads, -1, self._key_dim),
)
| 41,493 | 37.313943 | 119 | py |
models | models-master/official/projects/longformer/longformer_encoder_block.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Longformer attention layer. Modified From huggingface/transformers."""
import tensorflow as tf
from official.projects.longformer.longformer_attention import LongformerAttention
@tf.keras.utils.register_keras_serializable(package="Text")
class LongformerEncoderBlock(tf.keras.layers.Layer):
"""LongformerEncoderBlock.
Args:
num_attention_heads: Number of attention heads.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network.
output_range: the sequence output range, [0, output_range) for slicing the
target sequence. `None` means the target sequence is not sliced.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
use_bias: Whether to enable use_bias in attention layer. If set False,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate
dense layers. If set False, output of attention and intermediate dense
layers is normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: Dropout probability for within the attention layer.
inner_dropout: Dropout probability for the first Dense layer in a
two-layer feedforward network.
attention_initializer: Initializer for kernels of attention layers. If set
`None`, attention layers use kernel_initializer as initializer for
kernel.
attention_axes: axes over which the attention is applied. `None` means
attention over all axes, but batch, heads, and features.
**kwargs: keyword arguments/
"""
def __init__(
self,
global_attention_size,
num_attention_heads,
inner_dim,
inner_activation,
# Longformer
attention_window,
layer_id=0,
output_range=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_bias=True,
norm_first=False,
norm_epsilon=1e-12,
output_dropout=0.0,
attention_dropout=0.0,
inner_dropout=0.0,
attention_initializer=None,
attention_axes=None,
**kwargs):
super().__init__(**kwargs)
self.global_attention_size = global_attention_size
self._num_heads = num_attention_heads
self._inner_dim = inner_dim
self._inner_activation = inner_activation
# Longformer
self._attention_window = attention_window
self._layer_id = layer_id
self._attention_dropout = attention_dropout
self._attention_dropout_rate = attention_dropout
self._output_dropout = output_dropout
self._output_dropout_rate = output_dropout
self._output_range = output_range
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._inner_dropout = inner_dropout
if attention_initializer:
self._attention_initializer = tf.keras.initializers.get(
attention_initializer)
else:
self._attention_initializer = self._kernel_initializer
self._attention_axes = attention_axes
def build(self, input_shape):
if isinstance(input_shape, tf.TensorShape):
input_tensor_shape = input_shape
elif isinstance(input_shape, (list, tuple)):
input_tensor_shape = tf.TensorShape(input_shape[0])
else:
raise ValueError(
f"The type of input shape argument is not supported, got: "
f"{type(input_shape)}")
einsum_equation = "abc,cd->abd"
if len(input_tensor_shape.as_list()) > 3:
einsum_equation = "...bc,cd->...bd"
hidden_size = input_tensor_shape[-1]
if hidden_size % self._num_heads != 0:
raise ValueError(
f"The input size ({hidden_size}) is not a multiple of the number of attention "
f"heads ({self._num_heads})")
self._attention_head_size = int(hidden_size // self._num_heads)
common_kwargs = dict(
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
# TFLongformerSelfAttention + TFLongformerSelfOutput.dense
self._attention_layer = LongformerAttention(
# Longformer
layer_id=self._layer_id,
global_attention_size=self.global_attention_size,
attention_window=self._attention_window,
num_heads=self._num_heads,
key_dim=self._attention_head_size,
dropout=self._attention_dropout,
use_bias=self._use_bias,
kernel_initializer=self._attention_initializer,
attention_axes=self._attention_axes,
name="self_attention",
**common_kwargs)
# TFLongformerSelfOutput.dropout
self._attention_dropout = tf.keras.layers.Dropout(rate=self._output_dropout)
# Use float32 in layernorm for numeric stability.
# It is probably safe in mixed_float16, but we haven't validated this yet.
# TFLongformerSelfOutput.Layernorm
self._attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32))
# TFLongformerIntermediate
# TFLongformerIntermediate.dense
self._intermediate_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=(None, self._inner_dim),
bias_axes="d",
kernel_initializer=self._kernel_initializer,
name="intermediate",
**common_kwargs)
policy = tf.keras.mixed_precision.global_policy()
if policy.name == "mixed_bfloat16":
# bfloat16 causes BERT with the LAMB optimizer to not converge
# as well, so we use float32.
# TODO(b/154538392): Investigate this.
policy = tf.float32
# TFLongformerIntermediate.intermediate_act_fn
self._intermediate_activation_layer = tf.keras.layers.Activation(
self._inner_activation, dtype=policy)
self._inner_dropout_layer = tf.keras.layers.Dropout(
rate=self._inner_dropout)
# TFLongformerOutput
# TFLongformerOutput.dense
self._output_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=(None, hidden_size),
bias_axes="d",
name="output",
kernel_initializer=self._kernel_initializer,
**common_kwargs)
# TFLongformerOutput.dropout
self._output_dropout = tf.keras.layers.Dropout(rate=self._output_dropout)
# Use float32 in layernorm for numeric stability.
# TFLongformerOutput.layernorm
self._output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32)
super().build(input_shape)
def get_config(self):
config = {
"num_attention_heads":
self._num_heads,
"inner_dim":
self._inner_dim,
"inner_activation":
self._inner_activation,
"output_dropout":
self._output_dropout_rate,
"attention_dropout":
self._attention_dropout_rate,
"output_range":
self._output_range,
"kernel_initializer":
tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer":
tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer":
tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer":
tf.keras.regularizers.serialize(self._bias_regularizer),
"activity_regularizer":
tf.keras.regularizers.serialize(self._activity_regularizer),
"kernel_constraint":
tf.keras.constraints.serialize(self._kernel_constraint),
"bias_constraint":
tf.keras.constraints.serialize(self._bias_constraint),
"use_bias":
self._use_bias,
"norm_first":
self._norm_first,
"norm_epsilon":
self._norm_epsilon,
"inner_dropout":
self._inner_dropout,
"attention_initializer":
tf.keras.initializers.serialize(self._attention_initializer),
"attention_axes":
self._attention_axes,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
"""Transformer self-attention encoder block call.
Args:
inputs: a single tensor or a list of tensors. `input tensor` as the single
sequence of embeddings. [`input tensor`, `attention mask`] to have the
additional attention mask. [`query tensor`, `key value tensor`,
`attention mask`] to have separate input streams for the query, and
key/value to the multi-head attention.
Returns:
An output tensor with the same dimensions as input/query tensor.
"""
if isinstance(inputs, (list, tuple)):
if len(inputs) == 4:
(
input_tensor,
attention_mask,
is_index_masked,
is_index_global_attn,
) = inputs
key_value = None
elif len(inputs) == 5:
assert False # No key_value
else:
raise ValueError(
f"Unexpected inputs to {self.__class__} with length at {len(inputs)}"
)
else:
input_tensor = inputs
attention_mask = None
is_index_masked = None
is_index_global_attn = None
key_value = None
if self._output_range:
if self._norm_first:
source_tensor = input_tensor[:, 0:self._output_range, :]
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm(key_value)
target_tensor = input_tensor[:, 0:self._output_range, :]
if attention_mask is not None:
attention_mask = attention_mask[:, 0:self._output_range, :]
if is_index_masked is not None:
is_index_masked = is_index_masked[:, 0:self._output_range]
if is_index_global_attn is not None:
is_index_global_attn = is_index_global_attn[:, 0:self._output_range]
else:
if self._norm_first:
source_tensor = input_tensor
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm(key_value)
target_tensor = input_tensor
if key_value is None:
key_value = input_tensor
attention_output = self._attention_layer(
hidden_states=target_tensor,
attention_mask=attention_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
)
# TFLongformerAttention.TFLongformerSelfOutput.* - {.dense}
attention_output = self._attention_dropout(attention_output)
if self._norm_first:
attention_output = source_tensor + attention_output
else:
attention_output = self._attention_layer_norm(target_tensor +
attention_output)
if self._norm_first:
source_attention_output = attention_output
attention_output = self._output_layer_norm(attention_output)
# TFLongformerIntermediate
inner_output = self._intermediate_dense(attention_output)
inner_output = self._intermediate_activation_layer(inner_output)
inner_output = self._inner_dropout_layer(inner_output)
# TFLongformerOutput
layer_output = self._output_dense(inner_output)
layer_output = self._output_dropout(layer_output)
if self._norm_first:
return source_attention_output + layer_output
# During mixed precision training, layer norm output is always fp32 for now.
# Casts fp32 for the subsequent add.
layer_output = tf.cast(layer_output, tf.float32)
return self._output_layer_norm(layer_output + attention_output)
| 13,814 | 39.513196 | 89 | py |
models | models-master/official/projects/longformer/longformer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Longformer model configurations and instantiation methods."""
import dataclasses
from typing import List
import tensorflow as tf
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.projects.longformer.longformer_encoder import LongformerEncoder
@dataclasses.dataclass
class LongformerEncoderConfig(encoders.BertEncoderConfig):
"""Extra paramerters for Longformer configs.
Attributes:
attention_window: list of ints representing the window size for each layer.
global_attention_size: the size of global attention used for each token.
pad_token_id: the token id for the pad token
"""
attention_window: List[int] = dataclasses.field(default_factory=list)
global_attention_size: int = 0
pad_token_id: int = 1
@base_config.bind(LongformerEncoderConfig)
def get_encoder(encoder_cfg: LongformerEncoderConfig):
"""Gets a 'LongformerEncoder' object.
Args:
encoder_cfg: A 'LongformerEncoderConfig'.
Returns:
A encoder object.
"""
encoder = LongformerEncoder(
attention_window=encoder_cfg.attention_window,
global_attention_size=encoder_cfg.global_attention_size,
vocab_size=encoder_cfg.vocab_size,
hidden_size=encoder_cfg.hidden_size,
num_layers=encoder_cfg.num_layers,
num_attention_heads=encoder_cfg.num_attention_heads,
inner_dim=encoder_cfg.intermediate_size,
inner_activation=tf_utils.get_activation(encoder_cfg.hidden_activation),
output_dropout=encoder_cfg.dropout_rate,
attention_dropout=encoder_cfg.attention_dropout_rate,
max_sequence_length=encoder_cfg.max_position_embeddings,
type_vocab_size=encoder_cfg.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
output_range=encoder_cfg.output_range,
embedding_width=encoder_cfg.embedding_size,
norm_first=encoder_cfg.norm_first)
return encoder
| 2,615 | 36.371429 | 79 | py |
models | models-master/official/projects/longformer/utils/convert_pretrained_pytorch_checkpoint_to_tf.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts pre-trained pytorch checkpoint into a tf encoder checkpoint."""
import os
from absl import app
import numpy as np
import tensorflow as tf
import transformers
from official.modeling import tf_utils
from official.projects.longformer.longformer import LongformerEncoderConfig
from official.projects.longformer.longformer_encoder import LongformerEncoder
def _get_pytorch_longformer_model():
pretrained_lm = "allenai/longformer-base-4096"
model = transformers.AutoModel.from_pretrained(pretrained_lm)
return {n: p.data.numpy() for n, p in model.named_parameters()}
def _create_longformer_model():
"""Creates a Longformer model."""
encoder_cfg = LongformerEncoderConfig
encoder_cfg.vocab_size = 50265
encoder_cfg.max_position_embeddings = 4098
encoder_cfg.attention_window = [2] * encoder_cfg.num_layers
encoder_cfg.global_attention_size = 1
encoder = LongformerEncoder(
attention_window=encoder_cfg.attention_window,
global_attention_size=encoder_cfg.global_attention_size,
vocab_size=encoder_cfg.vocab_size,
hidden_size=encoder_cfg.hidden_size,
num_layers=encoder_cfg.num_layers,
num_attention_heads=encoder_cfg.num_attention_heads,
inner_dim=encoder_cfg.intermediate_size,
inner_activation=tf_utils.get_activation(encoder_cfg.hidden_activation),
output_dropout=encoder_cfg.dropout_rate,
attention_dropout=encoder_cfg.attention_dropout_rate,
max_sequence_length=encoder_cfg.max_position_embeddings,
type_vocab_size=encoder_cfg.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
output_range=encoder_cfg.output_range,
embedding_width=encoder_cfg.embedding_size,
norm_first=encoder_cfg.norm_first)
return encoder
# pylint: disable=protected-access
def convert(encoder, allenai_model):
"""Convert AllenAI Longformer to the one in the codebase."""
num_layers = encoder._config["num_layers"]
num_attention_heads = encoder._config["num_attention_heads"]
hidden_size = encoder._config["hidden_size"]
head_size = hidden_size // num_attention_heads
assert head_size * num_attention_heads == hidden_size
encoder._embedding_layer.set_weights(
[allenai_model["embeddings.word_embeddings.weight"]])
encoder._embedding_norm_layer.set_weights([
allenai_model["embeddings.LayerNorm.weight"],
allenai_model["embeddings.LayerNorm.bias"]
])
encoder._type_embedding_layer.set_weights([
np.repeat(
allenai_model["embeddings.token_type_embeddings.weight"], 2, axis=0)
])
encoder._position_embedding_layer.set_weights(
[allenai_model["embeddings.position_embeddings.weight"]])
encoder._pooler_layer.set_weights([
allenai_model["pooler.dense.weight"], allenai_model["pooler.dense.bias"]
])
for layer_num in range(num_layers):
encoder._transformer_layers[
layer_num]._attention_layer._global_key_dense.set_weights([
allenai_model[
f"encoder.layer.{layer_num}.attention.self.key_global.weight"].T
.reshape(
(hidden_size, num_attention_heads, head_size)), allenai_model[
f"encoder.layer.{layer_num}.attention.self.key_global.bias"]
.reshape((num_attention_heads, head_size))
])
encoder._transformer_layers[
layer_num]._attention_layer._global_query_dense.set_weights([
allenai_model[
f"encoder.layer.{layer_num}.attention.self.query_global.weight"]
.T.reshape((hidden_size, num_attention_heads, head_size)),
allenai_model[
f"encoder.layer.{layer_num}.attention.self.query_global.bias"]
.reshape((num_attention_heads, head_size))
])
encoder._transformer_layers[
layer_num]._attention_layer._global_value_dense.set_weights([
allenai_model[
f"encoder.layer.{layer_num}.attention.self.value_global.weight"]
.T.reshape((hidden_size, num_attention_heads, head_size)),
allenai_model[
f"encoder.layer.{layer_num}.attention.self.value_global.bias"]
.reshape((num_attention_heads, head_size))
])
encoder._transformer_layers[
layer_num]._attention_layer._key_dense.set_weights([
allenai_model[
f"encoder.layer.{layer_num}.attention.self.key.weight"].T
.reshape(
(hidden_size, num_attention_heads, head_size)), allenai_model[
f"encoder.layer.{layer_num}.attention.self.key_global.bias"]
.reshape((num_attention_heads, head_size))
])
encoder._transformer_layers[
layer_num]._attention_layer._query_dense.set_weights([
allenai_model[
f"encoder.layer.{layer_num}.attention.self.query.weight"].T
.reshape((hidden_size, num_attention_heads, head_size)),
allenai_model[
f"encoder.layer.{layer_num}.attention.self.query.bias"].reshape(
(num_attention_heads, head_size))
])
encoder._transformer_layers[
layer_num]._attention_layer._value_dense.set_weights([
allenai_model[
f"encoder.layer.{layer_num}.attention.self.value.weight"].T
.reshape((hidden_size, num_attention_heads, head_size)),
allenai_model[
f"encoder.layer.{layer_num}.attention.self.value.bias"].reshape(
(num_attention_heads, head_size))
])
encoder._transformer_layers[
layer_num]._attention_layer._output_dense.set_weights([
allenai_model[
f"encoder.layer.{layer_num}.attention.output.dense.weight"].T,
allenai_model[
f"encoder.layer.{layer_num}.attention.output.dense.bias"]
])
encoder._transformer_layers[layer_num]._attention_layer_norm.set_weights([
allenai_model[
f"encoder.layer.{layer_num}.attention.output.LayerNorm.weight"],
allenai_model[
f"encoder.layer.{layer_num}.attention.output.LayerNorm.bias"]
])
encoder._transformer_layers[layer_num]._intermediate_dense.set_weights([
allenai_model[f"encoder.layer.{layer_num}.intermediate.dense.weight"].T,
allenai_model[f"encoder.layer.{layer_num}.intermediate.dense.bias"]
])
encoder._transformer_layers[layer_num]._output_dense.set_weights([
allenai_model[f"encoder.layer.{layer_num}.output.dense.weight"].T,
allenai_model[f"encoder.layer.{layer_num}.output.dense.bias"]
])
encoder._transformer_layers[layer_num]._output_layer_norm.set_weights([
allenai_model[f"encoder.layer.{layer_num}.output.LayerNorm.weight"],
allenai_model[f"encoder.layer.{layer_num}.output.LayerNorm.bias"]
])
def convert_checkpoint(output_path):
"""Converts and save the checkpoint."""
output_dir, _ = os.path.split(output_path)
tf.io.gfile.makedirs(output_dir)
encoder = _create_longformer_model()
allenai_model = _get_pytorch_longformer_model()
sequence_length = 128
batch_size = 2
word_id_data = np.random.randint(
10, size=(batch_size, sequence_length), dtype=np.int32)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length), dtype=np.int32)
type_id_data = np.random.randint(
2, size=(batch_size, sequence_length), dtype=np.int32)
inputs = {
"input_word_ids": word_id_data,
"input_mask": mask_data,
"input_type_ids": type_id_data,
}
encoder(inputs)
convert(encoder, allenai_model)
tf.train.Checkpoint(encoder=encoder).write(output_path)
def main(_):
convert_checkpoint("longformer-4096/longformer")
if __name__ == "__main__":
app.run(main)
| 8,403 | 40.810945 | 80 | py |
models | models-master/official/projects/qat/nlp/quantization/schemes.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantization schemes."""
# Import libraries
import numpy as np
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.modeling import tf_utils
from official.projects.qat.nlp.modeling.layers import mobile_bert_layers
from official.projects.qat.nlp.modeling.layers import transformer_encoder_block
from official.projects.qat.nlp.quantization import configs
keras = tf.keras
default_8bit_transforms = tfmot.quantization.keras.default_8bit.default_8bit_transforms
LayerNode = tfmot.quantization.keras.graph_transformations.transforms.LayerNode
LayerPattern = tfmot.quantization.keras.graph_transformations.transforms.LayerPattern
class TransformerEncoderBlockQuantize(
tfmot.quantization.keras.graph_transformations.transforms.Transform):
"""Add QAT support for Keras Custom layer."""
_QUANTIZATION_AWARE_TRAINING_WEIGHT_NAMES = frozenset({
'optimizer_step',
'output_max', 'output_min',
'kernel_min', 'kernel_max',
'depthwise_kernel_min', 'depthwise_kernel_max',
'query_min', 'query_max',
'attention_scores_min', 'attention_scores_max',
'attention_output_min', 'attention_output_max',
'masked_softmax_attention_mask_min',
'masked_softmax_attention_mask_max',
'masked_softmax_sub1_min', 'masked_softmax_sub1_max',
'masked_softmax_mask1_min', 'masked_softmax_mask1_max',
'masked_softmax_sub2_min', 'masked_softmax_sub2_max',
'masked_softmax_clamp_min', 'masked_softmax_clamp_max',
'masked_softmax_mask2_min', 'masked_softmax_mask2_max',
'masked_softmax_adder_sub_min', 'masked_softmax_adder_sub_max',
'masked_softmax_adder_mul_min', 'masked_softmax_adder_mul_max',
'masked_softmax_add_min', 'masked_softmax_add_max',
'post_activation_min', 'post_activation_max',
'word_embedding_out_min', 'word_embedding_out_max',
'pos_embedding_out_min', 'pos_embedding_out_max',
'type_embedding_out_min', 'type_embedding_out_max',
'bias_min', 'bias_max'
})
_SUPPOTRED_MODEL_WEIGHT_NAMES = frozenset({
'kernel', 'depthwise_kernel', 'bias',
'gamma', 'beta', 'moving_mean', 'moving_variance',
'embeddings'
})
def __init__(self):
super().__init__()
self._original_layer_pattern = 'modeling>TransformerEncoderBlock'
self._quantized_layer_class = transformer_encoder_block.TransformerEncoderBlockQuantized
def pattern(self) -> LayerPattern:
"""See base class."""
return LayerPattern(self._original_layer_pattern)
def _is_quantization_weight_name(self, name):
simple_name = name.split('/')[-1].split(':')[0]
if simple_name in self._QUANTIZATION_AWARE_TRAINING_WEIGHT_NAMES:
return True
if simple_name in self._SUPPOTRED_MODEL_WEIGHT_NAMES:
return False
raise ValueError('Variable name {} is not supported on '
'CustomLayerQuantize({}) transform.'.format(
simple_name,
self._original_layer_pattern))
def replacement(self, match_layer: LayerNode) -> LayerNode:
"""See base class."""
bottleneck_layer = match_layer.layer
bottleneck_config = bottleneck_layer['config']
bottleneck_names_and_weights = list(match_layer.names_and_weights)
quantized_layer = self._quantized_layer_class(
**bottleneck_config)
quantized_layer_config = quantized_layer.get_config()
if 'hidden_size' in quantized_layer_config:
dummy_input_shape = [
1, 1, quantized_layer_config['hidden_size']]
quantized_layer.compute_output_shape(dummy_input_shape)
elif 'num_attention_heads' in quantized_layer_config:
dummy_input_shape = [
1, 1, quantized_layer_config['num_attention_heads']]
quantized_layer.compute_output_shape(dummy_input_shape)
else:
dummy_input_shape = [1, 1]
quantized_layer(np.zeros(shape=dummy_input_shape, dtype=np.int32),
np.zeros(shape=dummy_input_shape, dtype=np.int32),
training=False)
quantized_names_and_weights = zip(
[weight.name for weight in quantized_layer.weights],
quantized_layer.get_weights())
match_idx = 0
names_and_weights = []
for name_and_weight in quantized_names_and_weights:
if not self._is_quantization_weight_name(name=name_and_weight[0]):
name_and_weight = bottleneck_names_and_weights[match_idx]
match_idx = match_idx + 1
names_and_weights.append(name_and_weight)
if match_idx != len(bottleneck_names_and_weights):
raise ValueError('{}/{} of Bottleneck weights is transformed.'.format(
match_idx, len(bottleneck_names_and_weights)))
quantized_layer_config = tf_utils.serialize_layer(
quantized_layer, use_legacy_format=True
)
quantized_layer_config['name'] = quantized_layer_config['config']['name']
layer_metadata = {
'quantize_config':
configs.NoQuantizeConfig()}
return LayerNode(
quantized_layer_config,
metadata=layer_metadata,
names_and_weights=names_and_weights)
class MobileBertTransformerQuantize(TransformerEncoderBlockQuantize):
def __init__(self):
super().__init__()
self._original_layer_pattern = 'Text>MobileBertTransformer'
self._quantized_layer_class = mobile_bert_layers.MobileBertTransformerQuantized
class MobileBertEmbeddingQuantize(TransformerEncoderBlockQuantize):
def __init__(self):
super().__init__()
self._original_layer_pattern = 'Text>MobileBertEmbedding'
self._quantized_layer_class = mobile_bert_layers.MobileBertEmbeddingQuantized
class QuantizeLayoutTransform(
tfmot.quantization.keras.QuantizeLayoutTransform):
"""Default model transformations."""
def apply(self, model, layer_quantize_map):
"""Implement default 8-bit transforms.
Currently this means the following.
1. Pull activations into layers, and apply fuse activations. (TODO)
2. Modify range in incoming layers for Concat. (TODO)
3. Fuse Conv2D/DepthwiseConv2D + BN into single layer.
Args:
model: Keras model to be quantized.
layer_quantize_map: Map with keys as layer names, and values as dicts
containing custom `QuantizeConfig`s which may have been passed with
layers.
Returns:
(Transformed Keras model to better match TensorFlow Lite backend, updated
layer quantize map.)
"""
transforms = [
default_8bit_transforms.SeparableConv1DQuantize(),
default_8bit_transforms.SeparableConvQuantize(),
default_8bit_transforms.Conv2DReshapeBatchNormReLUQuantize(),
default_8bit_transforms.Conv2DReshapeBatchNormActivationQuantize(),
default_8bit_transforms.Conv2DBatchNormReLUQuantize(),
default_8bit_transforms.Conv2DBatchNormActivationQuantize(),
default_8bit_transforms.Conv2DReshapeBatchNormQuantize(),
default_8bit_transforms.Conv2DBatchNormQuantize(),
default_8bit_transforms.ConcatTransform6Inputs(),
default_8bit_transforms.ConcatTransform5Inputs(),
default_8bit_transforms.ConcatTransform4Inputs(),
default_8bit_transforms.ConcatTransform3Inputs(),
default_8bit_transforms.ConcatTransform(),
default_8bit_transforms.LayerReLUQuantize(),
default_8bit_transforms.LayerReluActivationQuantize(),
TransformerEncoderBlockQuantize(),
MobileBertTransformerQuantize(),
MobileBertEmbeddingQuantize(),
]
return tfmot.quantization.keras.graph_transformations.model_transformer.ModelTransformer(
model, transforms,
set(layer_quantize_map.keys()), layer_quantize_map).transform()
class Default8BitQuantizeScheme(
tfmot.quantization.keras.default_8bit.Default8BitQuantizeScheme):
def get_layout_transformer(self):
return QuantizeLayoutTransform()
| 8,465 | 39.507177 | 93 | py |
models | models-master/official/projects/qat/nlp/quantization/helper.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantization helpers."""
import tensorflow_model_optimization as tfmot
class LayerQuantizerHelper(object):
"""Helper class that handles quantizers."""
def __init__(self, *args, **kwargs):
self._quantizers = {}
self._quantizer_vars = {}
super().__init__(*args, **kwargs)
def _all_value_quantizer(self):
return tfmot.quantization.keras.quantizers.AllValuesQuantizer(
num_bits=8, per_axis=False, symmetric=False, narrow_range=False)
def _moving_average_quantizer(self):
return tfmot.quantization.keras.quantizers.MovingAverageQuantizer(
num_bits=8, per_axis=False, symmetric=False, narrow_range=False)
def _add_quantizer(self, name, all_value_quantizer=False):
if all_value_quantizer:
self._quantizers[name] = self._all_value_quantizer()
else:
self._quantizers[name] = self._moving_average_quantizer()
def _apply_quantizer(self, name, inputs, training, **kwargs):
return self._quantizers[name](
inputs, training, self._quantizer_vars[name], **kwargs)
def _build_quantizer_vars(self):
for name in self._quantizers:
self._quantizer_vars[name] = self._quantizers[name].build(
tensor_shape=None, name=name, layer=self)
| 1,835 | 35.72 | 74 | py |
models | models-master/official/projects/qat/nlp/quantization/wrappers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantization Wrappers."""
import tensorflow_model_optimization as tfmot
class MultiHeadAttentionQuantizeWrapper(
tfmot.quantization.keras.QuantizeWrapperV2):
"""Custom quantize wrapper for the MultiHeadAttention layer."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._first_call_built = False
def build(self, input_shape):
self.layer.build(input_shape)
def call(self,
query,
value,
key=None,
attention_mask=None,
return_attention_scores=False,
training=None):
if not self._first_call_built:
# pylint: disable=protected-access
self.layer._build_from_signature(query=query, value=value, key=key)
# pylint: enable=protected-access
self.layer.call(
query, value, key=key, attention_mask=attention_mask,
return_attention_scores=return_attention_scores,
training=training)
super().build(input_shape=None)
self._first_call_built = True
return super().call(
query, value=value, key=key, attention_mask=attention_mask,
return_attention_scores=return_attention_scores,
training=training
)
| 1,820 | 33.358491 | 74 | py |
models | models-master/official/projects/qat/nlp/quantization/configs_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for configs.py."""
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.modeling import tf_utils
from official.projects.qat.nlp.quantization import configs
class _TestHelper(object):
def _convert_list(self, list_of_tuples):
"""Transforms a list of 2-tuples to a tuple of 2 lists.
`QuantizeConfig` methods return a list of 2-tuples in the form
[(weight1, quantizer1), (weight2, quantizer2)]. This function converts
it into a 2-tuple of lists. ([weight1, weight2]), (quantizer1, quantizer2).
Args:
list_of_tuples: List of 2-tuples.
Returns:
2-tuple of lists.
"""
list1 = []
list2 = []
for a, b in list_of_tuples:
list1.append(a)
list2.append(b)
return list1, list2
# TODO(pulkitb): Consider asserting on full equality for quantizers.
def _assert_weight_quantizers(self, quantizer_list):
for quantizer in quantizer_list:
self.assertIsInstance(
quantizer,
tfmot.quantization.keras.quantizers.LastValueQuantizer)
def _assert_activation_quantizers(self, quantizer_list):
for quantizer in quantizer_list:
self.assertIsInstance(
quantizer,
tfmot.quantization.keras.quantizers.MovingAverageQuantizer)
def _assert_kernel_equality(self, a, b):
self.assertAllEqual(a.numpy(), b.numpy())
class Default8BitQuantizeConfigTest(tf.test.TestCase, _TestHelper):
def _simple_dense_layer(self):
layer = tf.keras.layers.Dense(2)
layer.build(input_shape=(3,))
return layer
def testGetsQuantizeWeightsAndQuantizers(self):
layer = self._simple_dense_layer()
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
(weights, weight_quantizers) = self._convert_list(
quantize_config.get_weights_and_quantizers(layer))
self._assert_weight_quantizers(weight_quantizers)
self.assertEqual([layer.kernel], weights)
def testGetsQuantizeActivationsAndQuantizers(self):
layer = self._simple_dense_layer()
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
(activations, activation_quantizers) = self._convert_list(
quantize_config.get_activations_and_quantizers(layer))
self._assert_activation_quantizers(activation_quantizers)
self.assertEqual([layer.activation], activations)
def testSetsQuantizeWeights(self):
layer = self._simple_dense_layer()
quantize_kernel = tf.keras.backend.variable(
np.ones(layer.kernel.shape.as_list()))
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
quantize_config.set_quantize_weights(layer, [quantize_kernel])
self._assert_kernel_equality(layer.kernel, quantize_kernel)
def testSetsQuantizeActivations(self):
layer = self._simple_dense_layer()
quantize_activation = tf.keras.activations.relu
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
quantize_config.set_quantize_activations(layer, [quantize_activation])
self.assertEqual(layer.activation, quantize_activation)
def testSetsQuantizeWeights_ErrorOnWrongNumberOfWeights(self):
layer = self._simple_dense_layer()
quantize_kernel = tf.keras.backend.variable(
np.ones(layer.kernel.shape.as_list()))
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
with self.assertRaises(ValueError):
quantize_config.set_quantize_weights(layer, [])
with self.assertRaises(ValueError):
quantize_config.set_quantize_weights(layer,
[quantize_kernel, quantize_kernel])
def testSetsQuantizeWeights_ErrorOnWrongShapeOfWeight(self):
layer = self._simple_dense_layer()
quantize_kernel = tf.keras.backend.variable(np.ones([1, 2]))
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
with self.assertRaises(ValueError):
quantize_config.set_quantize_weights(layer, [quantize_kernel])
def testSetsQuantizeActivations_ErrorOnWrongNumberOfActivations(self):
layer = self._simple_dense_layer()
quantize_activation = tf.keras.activations.relu
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
with self.assertRaises(ValueError):
quantize_config.set_quantize_activations(layer, [])
with self.assertRaises(ValueError):
quantize_config.set_quantize_activations(
layer, [quantize_activation, quantize_activation])
def testGetsResultQuantizers_ReturnsQuantizer(self):
layer = self._simple_dense_layer()
quantize_config = configs.Default8BitQuantizeConfig(
[], [], True)
output_quantizers = quantize_config.get_output_quantizers(layer)
self.assertLen(output_quantizers, 1)
self._assert_activation_quantizers(output_quantizers)
def testGetsResultQuantizers_EmptyWhenFalse(self):
layer = self._simple_dense_layer()
quantize_config = configs.Default8BitQuantizeConfig(
[], [], False)
output_quantizers = quantize_config.get_output_quantizers(layer)
self.assertEqual([], output_quantizers)
def testSerialization(self):
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
expected_config = {
'class_name': 'Default8BitQuantizeConfig',
'config': {
'weight_attrs': ['kernel'],
'activation_attrs': ['activation'],
'quantize_output': False
}
}
serialized_quantize_config = tf_utils.serialize_keras_object(
quantize_config
)
self.assertEqual(expected_config, serialized_quantize_config)
quantize_config_from_config = (
tf_utils.deserialize_keras_object(
serialized_quantize_config,
module_objects=globals(),
custom_objects=configs._types_dict(),
)
)
self.assertEqual(quantize_config, quantize_config_from_config)
@parameterized.parameters(
configs.LastValueQuantizer,
configs.MovingAverageQuantizer,
configs.NoQuantizer)
class QuantizersTest(tf.test.TestCase, parameterized.TestCase):
def _simple_dense_layer(self):
layer = tf.keras.layers.Dense(2)
layer.build(input_shape=(3,))
return layer
def _get_quant_params(self, quantizer_type):
if quantizer_type == configs.NoQuantizer:
return {}
return {
'num_bits': 8,
'per_axis': False,
'symmetric': False,
'narrow_range': False
}
def _test_quantizer(self, quantizer):
inputs = tf.Variable(
np.array([[-1.0, 0.5], [0.0, 1.0]]),
name='inputs',
dtype=tf.dtypes.float32)
min_var = tf.Variable(0.0)
max_var = tf.Variable(0.0)
weights = {'min_var': min_var, 'max_var': max_var}
quant_tensor = quantizer(inputs, training=True, weights=weights)
results = self.evaluate(quant_tensor)
min_max_values = self.evaluate([min_var, max_var])
# TODO(pulkitb): Assert on expected values for testing.
# Since the underlying code is already tested in quant_ops_test.py, this
# just ensures the Quantizers code is wired properly.
print('Result: ', results)
print('min_var: ', min_max_values[0])
print('max_var: ', min_max_values[1])
layer = self._simple_dense_layer()
weights = quantizer.build(tf.TensorShape([1, 1, 1]), 'test', layer)
if isinstance(quantizer, (
configs.LastValueQuantizer, configs.MovingAverageQuantizer)):
self.assertLen(weights, 2)
self.assertFalse(weights['min_var'].trainable)
self.assertFalse(weights['max_var'].trainable)
elif isinstance(quantizer, configs.NoQuantizer):
self.assertEmpty(weights)
def testQuantizer(self, quantizer_type):
quantizer = quantizer_type(**self._get_quant_params(quantizer_type))
self._test_quantizer(quantizer)
def testSerialization(self, quantizer_type):
quantizer = quantizer_type(**self._get_quant_params(quantizer_type))
expected_config = {
'class_name': quantizer_type.__name__,
'config': self._get_quant_params(quantizer_type),
}
serialized_quantizer = tf_utils.serialize_keras_object(
quantizer
)
self.assertEqual(expected_config, serialized_quantizer)
quantizer_from_config = tf_utils.deserialize_keras_object(
serialized_quantizer,
module_objects=globals(),
custom_objects=configs._types_dict(),
)
self.assertEqual(quantizer, quantizer_from_config)
if __name__ == '__main__':
tf.test.main()
| 9,371 | 31.541667 | 79 | py |
models | models-master/official/projects/qat/nlp/quantization/configs.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom quantize configs."""
from typing import Sequence, Callable, Tuple, Any, Dict
import tensorflow as tf
import tensorflow_model_optimization as tfmot
Quantizer = tfmot.quantization.keras.quantizers.Quantizer
Layer = tf.keras.layers.Layer
Activation = Callable[[tf.Tensor], tf.Tensor]
WeightAndQuantizer = Tuple[tf.Variable, Quantizer]
ActivationAndQuantizer = Tuple[Activation, Quantizer]
class _QuantizeHelper(object):
"""Mixin with helper functions for quantizers."""
def _add_range_weights(self, layer, name, per_axis=False, tensor_shape=None):
"""Add min and max vars to layer."""
# Added naming index to avoid duplicated.
if hasattr(layer, 'quantize_helper_weight_idx'):
layer.quantize_helper_weight_idx += 1
name = '{}/{}'.format(layer.quantize_helper_weight_idx, name)
else:
layer.quantize_helper_weight_idx = 0
shape = None
if per_axis and tensor_shape is not None:
shape = (tensor_shape[-1])
min_weight = layer.add_weight(
name + '_min',
initializer=tf.keras.initializers.Constant(-6.0),
trainable=False,
shape=shape)
max_weight = layer.add_weight(
name + '_max',
initializer=tf.keras.initializers.Constant(6.0),
trainable=False,
shape=shape)
return {'min_var': min_weight, 'max_var': max_weight}
class LastValueQuantizer(
_QuantizeHelper,
tfmot.quantization.keras.quantizers.LastValueQuantizer):
pass
class MovingAverageQuantizer(
_QuantizeHelper,
tfmot.quantization.keras.quantizers.MovingAverageQuantizer):
pass
class NoQuantizer(tfmot.quantization.keras.quantizers.Quantizer):
"""Dummy quantizer do nothing."""
def __call__(self, inputs, training, weights, **kwargs):
return tf.identity(inputs)
def get_config(self):
return {}
def build(self, tensor_shape, name, layer):
return {}
def __eq__(self, other):
if not isinstance(other, NoQuantizer):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
class DefaultEinsumDenseQuantizeConfig(tfmot.quantization.keras.QuantizeConfig):
"""QuantizeConfig for EinsumDense layer."""
# Configure how to quantize weights.
def get_weights_and_quantizers(self, layer):
return [(layer.kernel, LastValueQuantizer(
num_bits=8, symmetric=True, narrow_range=False, per_axis=False))]
# Configure how to quantize activations.
def get_activations_and_quantizers(self, layer):
return [(layer.activation, MovingAverageQuantizer(
num_bits=8, symmetric=False, narrow_range=False, per_axis=False))]
def set_quantize_weights(self, layer, quantize_weights):
# Add this line for each item returned in `get_weights_and_quantizers`
# , in the same order
layer.kernel = quantize_weights[0]
def set_quantize_activations(self, layer, quantize_activations):
# Add this line for each item returned in `get_activations_and_quantizers`
# , in the same order.
layer.activation = quantize_activations[0]
# Configure how to quantize outputs (may be equivalent to activations).
def get_output_quantizers(self, layer):
return []
def get_config(self):
return {}
# pylint: disable=protected-access
class DefaultMultiHeadAttentionQuantizeConfig(
tfmot.quantization.keras.QuantizeConfig):
"""Default quantize config for MultiHeadAttention layer.
It only quantize child EinsumDense layers. It should be applied to
MultiHeadAttentionQuantized layer.
"""
def __init__(self):
self.einsum_dense_config = DefaultEinsumDenseQuantizeConfig()
self.num_weight_per_einsum_dense = 1
self.num_activation_per_einsum_dense = 1
def _get_einsum_dense_layers(self, layer):
return [
layer._query_dense,
layer._key_dense,
layer._value_dense,
layer._output_dense]
def get_weights_and_quantizers(self, layer):
ret = []
for einsum_dense_layer in self._get_einsum_dense_layers(layer):
ret += self.einsum_dense_config.get_weights_and_quantizers(
einsum_dense_layer)
return ret
def get_activations_and_quantizers(self, layer):
ret = []
for einsum_dense_layer in self._get_einsum_dense_layers(layer):
ret += self.einsum_dense_config.get_activations_and_quantizers(
einsum_dense_layer)
return ret
def set_quantize_weights(self, layer, quantize_weights):
idx = 0
for einsum_dense_layer in self._get_einsum_dense_layers(layer):
self.einsum_dense_config.set_quantize_weights(
einsum_dense_layer,
quantize_weights[idx:idx+self.num_weight_per_einsum_dense])
idx += self.num_weight_per_einsum_dense
def set_quantize_activations(self, layer, quantize_activations):
idx = 0
for einsum_dense_layer in self._get_einsum_dense_layers(layer):
self.einsum_dense_config.set_quantize_activations(
einsum_dense_layer,
quantize_activations[idx:idx+self.num_activation_per_einsum_dense])
idx += self.num_activation_per_einsum_dense
def get_output_quantizers(self, layer):
return []
def get_config(self):
return {}
# pylint: enable=protected-access
class Default8BitOutputQuantizeConfig(tfmot.quantization.keras.QuantizeConfig):
"""QuantizeConfig which only quantizes the output from a layer."""
def get_weights_and_quantizers(
self, layer: Layer) -> Sequence[WeightAndQuantizer]:
return []
def get_activations_and_quantizers(
self, layer: Layer) -> Sequence[ActivationAndQuantizer]:
return []
def set_quantize_weights(self,
layer: Layer,
quantize_weights: Sequence[tf.Tensor]):
pass
def set_quantize_activations(self,
layer: Layer,
quantize_activations: Sequence[Activation]):
pass
def get_output_quantizers(self, layer: Layer) -> Sequence[Quantizer]:
return [
MovingAverageQuantizer(
num_bits=8, per_axis=False, symmetric=False, narrow_range=False)
]
def get_config(self) -> Dict[str, Any]:
return {}
class Default8BitActivationQuantizeConfig(
tfmot.quantization.keras.QuantizeConfig):
"""QuantizeConfig for keras.layers.Activation.
`keras.layers.Activation` needs a separate `QuantizeConfig` since the
decision to quantize depends on the specific activation type.
"""
def _assert_activation_layer(self, layer: Layer):
if not isinstance(layer, tf.keras.layers.Activation):
raise RuntimeError(
'Default8BitActivationQuantizeConfig can only be used with '
'`keras.layers.Activation`.')
def get_weights_and_quantizers(
self, layer: Layer) -> Sequence[WeightAndQuantizer]:
"""See base class."""
self._assert_activation_layer(layer)
return []
def get_activations_and_quantizers(
self, layer: Layer) -> Sequence[ActivationAndQuantizer]:
"""See base class."""
self._assert_activation_layer(layer)
return []
def set_quantize_weights(
self,
layer: Layer,
quantize_weights: Sequence[tf.Tensor]):
"""See base class."""
self._assert_activation_layer(layer)
def set_quantize_activations(
self,
layer: Layer,
quantize_activations: Sequence[Activation]):
"""See base class."""
self._assert_activation_layer(layer)
def get_output_quantizers(self, layer: Layer) -> Sequence[Quantizer]:
"""See base class."""
self._assert_activation_layer(layer)
if not hasattr(layer.activation, '__name__'):
raise ValueError('Activation {} not supported by '
'Default8BitActivationQuantizeConfig.'.format(
layer.activation))
# This code is copied from TFMOT repo, but added relu6 to support mobilenet.
if layer.activation.__name__ in ['relu', 'relu6']:
# 'relu' should generally get fused into the previous layer.
return [MovingAverageQuantizer(
num_bits=8, per_axis=False, symmetric=False, narrow_range=False)]
elif layer.activation.__name__ in ['linear', 'softmax', 'sigmoid']:
return []
raise ValueError('Activation {} not supported by '
'Default8BitActivationQuantizeConfig.'.format(
layer.activation))
def get_config(self) -> Dict[str, Any]:
"""Get a config for this quantizer config."""
return {}
class NoQuantizeConfig(tfmot.quantization.keras.QuantizeConfig):
"""Empty quantize config."""
# Configure how to quantize weights.
def get_weights_and_quantizers(self, layer):
return []
# Configure how to quantize activations.
def get_activations_and_quantizers(self, layer):
return []
def set_quantize_weights(self, layer, quantize_weights):
# Add this line for each item returned in `get_weights_and_quantizers`
# , in the same order
pass
def set_quantize_activations(self, layer, quantize_activations):
# Add this line for each item returned in `get_activations_and_quantizers`
# , in the same order.
pass
# Configure how to quantize outputs (may be equivalent to activations).
def get_output_quantizers(self, layer):
return []
def get_config(self):
return {}
class Default8BitQuantizeConfig(tfmot.quantization.keras.QuantizeConfig):
"""QuantizeConfig for non recurrent Keras layers."""
def __init__(self, weight_attrs, activation_attrs, quantize_output):
self.weight_attrs = weight_attrs
self.activation_attrs = activation_attrs
self.quantize_output = quantize_output
# TODO(pulkitb): For some layers such as Conv2D, per_axis should be True.
# Add mapping for which layers support per_axis.
self.weight_quantizer = LastValueQuantizer(
num_bits=8, per_axis=False, symmetric=True, narrow_range=True)
self.activation_quantizer = MovingAverageQuantizer(
num_bits=8, per_axis=False, symmetric=False, narrow_range=False)
def get_weights_and_quantizers(self, layer):
return [(getattr(layer, weight_attr), self.weight_quantizer)
for weight_attr in self.weight_attrs]
def get_activations_and_quantizers(self, layer):
return [(getattr(layer, activation_attr), self.activation_quantizer)
for activation_attr in self.activation_attrs]
def set_quantize_weights(self, layer, quantize_weights):
if len(self.weight_attrs) != len(quantize_weights):
raise ValueError(
'`set_quantize_weights` called on layer {} with {} '
'weight parameters, but layer expects {} values.'.format(
layer.name, len(quantize_weights), len(self.weight_attrs)))
for weight_attr, weight in zip(self.weight_attrs, quantize_weights):
current_weight = getattr(layer, weight_attr)
if current_weight.shape != weight.shape:
raise ValueError('Existing layer weight shape {} is incompatible with'
'provided weight shape {}'.format(
current_weight.shape, weight.shape))
setattr(layer, weight_attr, weight)
def set_quantize_activations(self, layer, quantize_activations):
if len(self.activation_attrs) != len(quantize_activations):
raise ValueError(
'`set_quantize_activations` called on layer {} with {} '
'activation parameters, but layer expects {} values.'.format(
layer.name, len(quantize_activations),
len(self.activation_attrs)))
for activation_attr, activation in zip(
self.activation_attrs, quantize_activations):
setattr(layer, activation_attr, activation)
def get_output_quantizers(self, layer):
if self.quantize_output:
return [self.activation_quantizer]
return []
@classmethod
def from_config(cls, config):
"""Instantiates a `Default8BitQuantizeConfig` from its config.
Args:
config: Output of `get_config()`.
Returns:
A `Default8BitQuantizeConfig` instance.
"""
return cls(**config)
def get_config(self):
# TODO(pulkitb): Add weight and activation quantizer to config.
# Currently it's created internally, but ideally the quantizers should be
# part of the constructor and passed in from the registry.
return {
'weight_attrs': self.weight_attrs,
'activation_attrs': self.activation_attrs,
'quantize_output': self.quantize_output
}
def __eq__(self, other):
if not isinstance(other, Default8BitQuantizeConfig):
return False
return (self.weight_attrs == other.weight_attrs and
self.activation_attrs == self.activation_attrs and
self.weight_quantizer == other.weight_quantizer and
self.activation_quantizer == other.activation_quantizer and
self.quantize_output == other.quantize_output)
def __ne__(self, other):
return not self.__eq__(other)
def _types_dict():
return {
'NoQuantizer':
NoQuantizer,
'LastValueQuantizer':
LastValueQuantizer,
'MovingAverageQuantizer':
MovingAverageQuantizer,
'DefaultEinsumDenseQuantizeConfig':
DefaultEinsumDenseQuantizeConfig,
'DefaultMultiHeadAttentionQuantizeConfig':
DefaultMultiHeadAttentionQuantizeConfig,
'Default8BitOutputQuantizeConfig':
Default8BitOutputQuantizeConfig,
'Default8BitActivationQuantizeConfig':
Default8BitActivationQuantizeConfig,
'NoQuantizeConfig':
NoQuantizeConfig,
'Default8BitQuantizeConfig':
Default8BitQuantizeConfig,
}
| 14,177 | 32.597156 | 80 | py |
models | models-master/official/projects/qat/nlp/modeling/networks/span_labeling.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Span labeling network."""
# pylint: disable=g-classes-have-attributes
import collections
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.projects.qat.nlp.quantization import configs
def _apply_paragraph_mask(logits, paragraph_mask):
"""Applies a position mask to calculated logits."""
masked_logits = logits * (paragraph_mask) - 1e30 * (1 - paragraph_mask)
return tf.nn.log_softmax(masked_logits, -1), masked_logits
@tf.keras.utils.register_keras_serializable(package='Text')
class SpanLabelingQuantized(tf.keras.Model):
"""Span labeling network head for BERT modeling.
This network implements a simple single-span labeler based on a dense layer.
*Note* that the network is constructed by
[Keras Functional API](https://keras.io/guides/functional_api/).
Args:
input_width: The innermost dimension of the input tensor to this network.
activation: The activation, if any, for the dense layer in this network.
initializer: The initializer for the dense layer in this network. Defaults
to a Glorot uniform initializer.
output: The output style for this network. Can be either `logits` or
`predictions`.
"""
def __init__(self,
input_width,
activation=None,
initializer='glorot_uniform',
output='logits',
**kwargs):
sequence_data = tf.keras.layers.Input(
shape=(None, input_width), name='sequence_data', dtype=tf.float32)
logits_layer = tf.keras.layers.Dense(
2, # This layer predicts start location and end location.
activation=activation,
kernel_initializer=initializer,
name='predictions/transform/logits')
logits_layer = tfmot.quantization.keras.QuantizeWrapperV2(
logits_layer,
configs.Default8BitQuantizeConfig(['kernel'], ['activation'], False))
intermediate_logits = logits_layer(sequence_data)
start_logits, end_logits = self._split_output_tensor(intermediate_logits)
start_predictions = tf.keras.layers.Activation(tf.nn.log_softmax)(
start_logits)
end_predictions = tf.keras.layers.Activation(tf.nn.log_softmax)(end_logits)
if output == 'logits':
output_tensors = [start_logits, end_logits]
elif output == 'predictions':
output_tensors = [start_predictions, end_predictions]
else:
raise ValueError(
('Unknown `output` value "%s". `output` can be either "logits" or '
'"predictions"') % output)
# b/164516224
# Once we've created the network using the Functional API, we call
# super().__init__ as though we were invoking the Functional API Model
# constructor, resulting in this object having all the properties of a model
# created using the Functional API. Once super().__init__ is called, we
# can assign attributes to `self` - note that all `self` assignments are
# below this line.
super().__init__(
inputs=[sequence_data], outputs=output_tensors, **kwargs)
config_dict = {
'input_width': input_width,
'activation': activation,
'initializer': initializer,
'output': output,
}
# We are storing the config dict as a namedtuple here to ensure checkpoint
# compatibility with an earlier version of this model which did not track
# the config dict attribute. TF does not track immutable attrs which
# do not contain Trackables, so by creating a config namedtuple instead of
# a dict we avoid tracking it.
config_cls = collections.namedtuple('Config', config_dict.keys())
self._config = config_cls(**config_dict)
self.start_logits = start_logits
self.end_logits = end_logits
def _split_output_tensor(self, tensor):
transposed_tensor = tf.transpose(tensor, [2, 0, 1])
return tf.unstack(transposed_tensor)
def get_config(self):
return dict(self._config._asdict())
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 4,622 | 38.512821 | 80 | py |
models | models-master/official/projects/qat/nlp/modeling/models/bert_span_labeler.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT Question Answering model."""
# pylint: disable=g-classes-have-attributes
import collections
import tensorflow as tf
from official.projects.qat.nlp.modeling.networks import span_labeling
@tf.keras.utils.register_keras_serializable(package='Text')
class BertSpanLabelerQuantized(tf.keras.Model):
"""Span labeler model based on a BERT-style transformer-based encoder.
This is an implementation of the network structure surrounding a transformer
encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers
for Language Understanding" (https://arxiv.org/abs/1810.04805).
The BertSpanLabeler allows a user to pass in a transformer encoder, and
instantiates a span labeling network based on a single dense layer.
*Note* that the model is constructed by
[Keras Functional API](https://keras.io/guides/functional_api/).
Args:
network: A transformer network. This network should output a sequence output
and a classification output. Furthermore, it should expose its embedding
table via a `get_embedding_table` method.
initializer: The initializer (if any) to use in the span labeling network.
Defaults to a Glorot uniform initializer.
output: The output style for this network. Can be either `logit`' or
`predictions`.
"""
def __init__(self,
network,
initializer='glorot_uniform',
output='logits',
**kwargs):
# We want to use the inputs of the passed network as the inputs to this
# Model. To do this, we need to keep a handle to the network inputs for use
# when we construct the Model object at the end of init.
inputs = network.inputs
# Because we have a copy of inputs to create this Model object, we can
# invoke the Network object with its own input tensors to start the Model.
outputs = network(inputs)
if isinstance(outputs, list):
sequence_output = outputs[0]
else:
sequence_output = outputs['sequence_output']
# The input network (typically a transformer model) may get outputs from all
# layers. When this case happens, we retrieve the last layer output.
if isinstance(sequence_output, list):
sequence_output = sequence_output[-1]
# This is an instance variable for ease of access to the underlying task
# network.
span_labeling_quantized = span_labeling.SpanLabelingQuantized(
input_width=sequence_output.shape[-1],
initializer=initializer,
output=output,
name='span_labeling')
start_logits, end_logits = span_labeling_quantized(sequence_output)
# Use identity layers wrapped in lambdas to explicitly name the output
# tensors. This allows us to use string-keyed dicts in Keras fit/predict/
# evaluate calls.
start_logits = tf.keras.layers.Lambda(
tf.identity, name='start_positions')(
start_logits)
end_logits = tf.keras.layers.Lambda(
tf.identity, name='end_positions')(
end_logits)
logits = [start_logits, end_logits]
# b/164516224
# Once we've created the network using the Functional API, we call
# super().__init__ as though we were invoking the Functional API Model
# constructor, resulting in this object having all the properties of a model
# created using the Functional API. Once super().__init__ is called, we
# can assign attributes to `self` - note that all `self` assignments are
# below this line.
super().__init__(
inputs=inputs, outputs=logits, **kwargs)
self._network = network
config_dict = {
'network': network,
'initializer': initializer,
'output': output,
}
# We are storing the config dict as a namedtuple here to ensure checkpoint
# compatibility with an earlier version of this model which did not track
# the config dict attribute. TF does not track immutable attrs which
# do not contain Trackables, so by creating a config namedtuple instead of
# a dict we avoid tracking it.
config_cls = collections.namedtuple('Config', config_dict.keys())
self._config = config_cls(**config_dict)
self.span_labeling = span_labeling_quantized
@property
def checkpoint_items(self):
return dict(encoder=self._network)
def get_config(self):
return dict(self._config._asdict())
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 5,056 | 39.134921 | 80 | py |
models | models-master/official/projects/qat/nlp/modeling/layers/transformer_encoder_block.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based TransformerEncoder block layer."""
from typing import Optional
from absl import logging
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.projects.qat.nlp.modeling.layers.multi_head_attention import MultiHeadAttentionQuantized
from official.projects.qat.nlp.quantization import configs
from official.projects.qat.nlp.quantization import wrappers
def _quantized_multi_head_attention(*args, **kwargs):
layer = MultiHeadAttentionQuantized(*args, **kwargs)
return wrappers.MultiHeadAttentionQuantizeWrapper(
layer, configs.DefaultMultiHeadAttentionQuantizeConfig())
def _quantized_einsum_dense(*args, **kwargs):
layer = tf.keras.layers.EinsumDense(*args, **kwargs)
return tfmot.quantization.keras.QuantizeWrapperV2(
layer, configs.DefaultEinsumDenseQuantizeConfig())
def _output_quantize(layer):
return tfmot.quantization.keras.QuantizeWrapperV2(
layer, configs.Default8BitOutputQuantizeConfig())
class TransformerEncoderBlockQuantized(tf.keras.layers.Layer):
"""TransformerEncoderBlock layer.
This layer implements the Transformer Encoder from
"Attention Is All You Need". (https://arxiv.org/abs/1706.03762),
which combines a `tf.keras.layers.MultiHeadAttention` layer with a
two-layer feedforward network.
References:
[Attention Is All You Need](https://arxiv.org/abs/1706.03762)
[BERT: Pre-training of Deep Bidirectional Transformers for Language
Understanding](https://arxiv.org/abs/1810.04805)
"""
def __init__(self,
num_attention_heads,
inner_dim,
inner_activation,
output_range=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_bias=True,
norm_first=False,
norm_epsilon=1e-12,
output_dropout=0.0,
attention_dropout=0.0,
inner_dropout=0.0,
attention_initializer=None,
attention_axes=None,
**kwargs):
"""Initializes `TransformerEncoderBlock`.
Args:
num_attention_heads: Number of attention heads.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network.
output_range: the sequence output range, [0, output_range) for slicing the
target sequence. `None` means the target sequence is not sliced.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
use_bias: Whether to enable use_bias in attention layer. If set False,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate
dense layers. If set False, output of attention and intermediate dense
layers is normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: Dropout probability for within the attention layer.
inner_dropout: Dropout probability for the first Dense layer in a
two-layer feedforward network.
attention_initializer: Initializer for kernels of attention layers. If set
`None`, attention layers use kernel_initializer as initializer for
kernel.
attention_axes: axes over which the attention is applied. `None` means
attention over all axes, but batch, heads, and features.
**kwargs: keyword arguments/
"""
super().__init__(**kwargs)
if output_range is not None:
logging.warning("`output_range` is available as an argument for `call()`."
"The `output_range` as __init__ argument is deprecated.")
self._num_heads = num_attention_heads
self._inner_dim = inner_dim
self._inner_activation = inner_activation
self._attention_dropout = attention_dropout
self._attention_dropout_rate = attention_dropout
self._output_dropout = output_dropout
self._output_dropout_rate = output_dropout
self._output_range = output_range
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._inner_dropout = inner_dropout
if attention_initializer:
self._attention_initializer = tf.keras.initializers.get(
attention_initializer)
else:
self._attention_initializer = self._kernel_initializer
self._attention_axes = attention_axes
def build(self, input_shape):
if isinstance(input_shape, tf.TensorShape):
input_tensor_shape = input_shape
elif isinstance(input_shape, (list, tuple)):
input_tensor_shape = tf.TensorShape(input_shape[0])
else:
raise ValueError(
"The type of input shape argument is not supported, got: %s" %
type(input_shape))
if len(input_tensor_shape.as_list()) != 3:
raise ValueError("TransformerEncoderBlock expects a three-dimensional "
"input of shape [batch, sequence, width].")
hidden_size = input_tensor_shape[-1]
if hidden_size % self._num_heads != 0:
raise ValueError(
"The input size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, self._num_heads))
self._attention_head_size = int(hidden_size // self._num_heads)
common_kwargs = dict(
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
self._attention_layer = _quantized_multi_head_attention(
num_heads=self._num_heads,
key_dim=self._attention_head_size,
dropout=self._attention_dropout,
use_bias=self._use_bias,
kernel_initializer=self._attention_initializer,
attention_axes=self._attention_axes,
name="self_attention",
**common_kwargs)
self._attention_dropout = tf.keras.layers.Dropout(rate=self._output_dropout)
# Use float32 in layernorm for numeric stability.
# It is probably safe in mixed_float16, but we haven't validated this yet.
self._attention_layer_norm = _output_quantize(
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32))
self._intermediate_dense = _quantized_einsum_dense(
"abc,cd->abd",
output_shape=(None, self._inner_dim),
bias_axes="d",
kernel_initializer=self._kernel_initializer,
name="intermediate",
**common_kwargs)
policy = tf.keras.mixed_precision.global_policy()
if policy.name == "mixed_bfloat16":
# bfloat16 causes BERT with the LAMB optimizer to not converge
# as well, so we use float32.
# TODO(b/154538392): Investigate this.
policy = tf.float32
self._intermediate_activation_layer = _output_quantize(
tf.keras.layers.Activation(
self._inner_activation, dtype=policy))
self._inner_dropout_layer = tf.keras.layers.Dropout(
rate=self._inner_dropout)
self._output_dense = _quantized_einsum_dense(
"abc,cd->abd",
output_shape=(None, hidden_size),
bias_axes="d",
name="output",
kernel_initializer=self._kernel_initializer,
**common_kwargs)
self._output_dropout = tf.keras.layers.Dropout(rate=self._output_dropout)
# Use float32 in layernorm for numeric stability.
self._output_layer_norm = _output_quantize(
tf.keras.layers.LayerNormalization(
name="output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32))
self._add = _output_quantize(tf.keras.layers.Add())
self._output_add = tf.keras.layers.Add()
super().build(input_shape)
def get_config(self):
config = {
"num_attention_heads":
self._num_heads,
"inner_dim":
self._inner_dim,
"inner_activation":
self._inner_activation,
"output_dropout":
self._output_dropout_rate,
"attention_dropout":
self._attention_dropout_rate,
"output_range":
self._output_range,
"kernel_initializer":
tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer":
tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer":
tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer":
tf.keras.regularizers.serialize(self._bias_regularizer),
"activity_regularizer":
tf.keras.regularizers.serialize(self._activity_regularizer),
"kernel_constraint":
tf.keras.constraints.serialize(self._kernel_constraint),
"bias_constraint":
tf.keras.constraints.serialize(self._bias_constraint),
"use_bias":
self._use_bias,
"norm_first":
self._norm_first,
"norm_epsilon":
self._norm_epsilon,
"inner_dropout":
self._inner_dropout,
"attention_initializer":
tf.keras.initializers.serialize(self._attention_initializer),
"attention_axes": self._attention_axes,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, output_range: Optional[tf.Tensor] = None):
"""Transformer self-attention encoder block call.
Args:
inputs: a single tensor or a list of tensors. `input tensor` as the single
sequence of embeddings. [`input tensor`, `attention mask`] to have the
additional attention mask. [`query tensor`, `key value tensor`,
`attention mask`] to have separate input streams for the query, and
key/value to the multi-head attention.
output_range: the sequence output range, [0, output_range) for slicing the
target sequence. `None` means the target sequence is not sliced. If you
would like to have no change to the model training, it is better to only
set the `output_range` for serving.
Returns:
An ouput tensor with the same dimensions as input/query tensor.
"""
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
input_tensor, attention_mask = inputs
key_value = None
elif len(inputs) == 3:
input_tensor, key_value, attention_mask = inputs
else:
raise ValueError("Unexpected inputs to %s with length at %d" %
(self.__class__, len(inputs)))
else:
input_tensor, key_value, attention_mask = (inputs, None, None)
if output_range is None:
output_range = self._output_range
if output_range:
if self._norm_first:
source_tensor = input_tensor[:, 0:output_range, :]
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm(key_value)
target_tensor = input_tensor[:, 0:output_range, :]
if attention_mask is not None:
attention_mask = attention_mask[:, 0:output_range, :]
else:
if self._norm_first:
source_tensor = input_tensor
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm(key_value)
target_tensor = input_tensor
if key_value is None:
key_value = input_tensor
attention_output = self._attention_layer(
query=target_tensor, value=key_value, attention_mask=attention_mask)
attention_output = self._attention_dropout(attention_output)
if self._norm_first:
attention_output = self._add([source_tensor, attention_output])
else:
attention_output = self._attention_layer_norm(
self._add([target_tensor, attention_output]))
if self._norm_first:
source_attention_output = attention_output
attention_output = self._output_layer_norm(attention_output)
inner_output = self._intermediate_dense(attention_output)
inner_output = self._intermediate_activation_layer(inner_output)
inner_output = self._inner_dropout_layer(inner_output)
layer_output = self._output_dense(inner_output)
layer_output = self._output_dropout(layer_output)
if self._norm_first:
return self._output_add([source_attention_output, layer_output])
# During mixed precision training, layer norm output is always fp32 for now.
# Casts fp32 for the subsequent add.
layer_output = tf.cast(layer_output, tf.float32)
return self._output_layer_norm(
self._output_add([layer_output, attention_output]))
| 14,705 | 41.626087 | 102 | py |
models | models-master/official/projects/qat/nlp/modeling/layers/mobile_bert_layers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MobileBERT embedding and transformer layers."""
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.nlp import modeling
from official.projects.qat.nlp.modeling.layers.multi_head_attention import MultiHeadAttentionQuantized
from official.projects.qat.nlp.quantization import configs
from official.projects.qat.nlp.quantization import helper
from official.projects.qat.nlp.quantization import wrappers
def _quantized_multi_head_attention(*args, **kwargs):
layer = MultiHeadAttentionQuantized(*args, **kwargs)
return wrappers.MultiHeadAttentionQuantizeWrapper(
layer, configs.DefaultMultiHeadAttentionQuantizeConfig())
def _quantized_einsum_dense(*args, **kwargs):
layer = tf.keras.layers.EinsumDense(*args, **kwargs)
return tfmot.quantization.keras.QuantizeWrapperV2(
layer, configs.DefaultEinsumDenseQuantizeConfig())
def _output_quantize(layer):
return tfmot.quantization.keras.QuantizeWrapperV2(
layer, configs.Default8BitOutputQuantizeConfig())
@tf.keras.utils.register_keras_serializable(package='Text')
class NoNormQuantized(tf.keras.layers.Layer):
"""Apply element-wise linear transformation to the last dimension."""
def __init__(self, name=None):
super().__init__(name=name)
def build(self, shape):
kernal_size = shape[-1]
self.bias = self.add_weight('beta',
shape=[kernal_size],
initializer='zeros')
self.scale = self.add_weight('gamma',
shape=[kernal_size],
initializer='ones')
self.multiply = _output_quantize(
tf.keras.layers.Multiply())
def call(self, feature):
broadcast_shape = tf.shape(feature)
scale = tf.broadcast_to(self.scale, broadcast_shape)
output = self.multiply([feature, scale])
return output + self.bias
def _get_norm_layer(normalization_type='no_norm', name=None):
"""Get normlization layer.
Args:
normalization_type: String. The type of normalization_type, only `no_norm`
and `layer_norm` are supported.
name: Name for the norm layer.
Returns:
layer norm class.
"""
if normalization_type == 'no_norm':
layer = NoNormQuantized(name=name)
elif normalization_type == 'layer_norm':
layer = tf.keras.layers.LayerNormalization(
name=name,
axis=-1,
epsilon=1e-12,
dtype=tf.float32)
else:
raise NotImplementedError('Only "no_norm" and "layer_norm" are supported.')
return layer
class MobileBertEmbeddingQuantized(helper.LayerQuantizerHelper,
tf.keras.layers.Layer):
"""Performs an embedding lookup for MobileBERT.
This layer includes word embedding, token type embedding, position embedding.
"""
def __init__(self,
word_vocab_size,
word_embed_size,
type_vocab_size,
output_embed_size,
max_sequence_length=512,
normalization_type='no_norm',
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
dropout_rate=0.1,
**kwargs):
"""Class initialization.
Args:
word_vocab_size: Number of words in the vocabulary.
word_embed_size: Word embedding size.
type_vocab_size: Number of word types.
output_embed_size: Embedding size for the final embedding output.
max_sequence_length: Maximum length of input sequence.
normalization_type: String. The type of normalization_type, only `no_norm`
and `layer_norm` are supported.
initializer: The initializer to use for the embedding weights and linear
projection weights.
dropout_rate: Dropout rate.
**kwargs: keyword arguments.
"""
super().__init__(**kwargs)
self.word_vocab_size = word_vocab_size
self.word_embed_size = word_embed_size
self.type_vocab_size = type_vocab_size
self.output_embed_size = output_embed_size
self.max_sequence_length = max_sequence_length
self.normalization_type = normalization_type
self.initializer = tf.keras.initializers.get(initializer)
self.dropout_rate = dropout_rate
self.word_embedding = modeling.layers.OnDeviceEmbedding(
self.word_vocab_size,
self.word_embed_size,
initializer=initializer,
name='word_embedding')
self.type_embedding = modeling.layers.OnDeviceEmbedding(
self.type_vocab_size,
self.output_embed_size,
initializer=initializer,
name='type_embedding')
self.pos_embedding = modeling.layers.PositionEmbedding(
max_length=max_sequence_length,
initializer=initializer,
name='position_embedding')
self.word_embedding_proj = _quantized_einsum_dense(
'abc,cd->abd',
output_shape=[None, self.output_embed_size],
kernel_initializer=initializer,
bias_axes='d',
name='embedding_projection')
self.embedding_out_add_pos = _output_quantize(tf.keras.layers.Add())
self.layer_norm = _output_quantize(
_get_norm_layer(normalization_type, 'embedding_norm'))
self.dropout_layer = tf.keras.layers.Dropout(
self.dropout_rate,
name='embedding_dropout')
self.embedding_out_add_type = _output_quantize(tf.keras.layers.Add())
def build(self, input_shape):
self._add_quantizer('word_embedding_out')
self._add_quantizer('pos_embedding_out')
self._add_quantizer('type_embedding_out')
self._build_quantizer_vars()
def get_config(self):
config = {
'word_vocab_size': self.word_vocab_size,
'word_embed_size': self.word_embed_size,
'type_vocab_size': self.type_vocab_size,
'output_embed_size': self.output_embed_size,
'max_sequence_length': self.max_sequence_length,
'normalization_type': self.normalization_type,
'initializer': tf.keras.initializers.serialize(self.initializer),
'dropout_rate': self.dropout_rate
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, input_ids, token_type_ids=None, training=None):
word_embedding_out = self.word_embedding(input_ids)
word_embedding_out = self._apply_quantizer(
'word_embedding_out', word_embedding_out, training)
word_embedding_out = tf.concat(
[tf.pad(word_embedding_out[:, 1:], ((0, 0), (0, 1), (0, 0))),
word_embedding_out,
tf.pad(word_embedding_out[:, :-1], ((0, 0), (1, 0), (0, 0)))],
axis=2)
word_embedding_out = self.word_embedding_proj(word_embedding_out)
pos_embedding_out = self.pos_embedding(word_embedding_out)
pos_embedding_out = self._apply_quantizer(
'pos_embedding_out', pos_embedding_out, training)
embedding_out = self.embedding_out_add_pos([
word_embedding_out, pos_embedding_out])
if token_type_ids is not None:
type_embedding_out = self.type_embedding(token_type_ids)
type_embedding_out = self._apply_quantizer(
'type_embedding_out', type_embedding_out, training)
embedding_out = self.embedding_out_add_type([
embedding_out, type_embedding_out])
embedding_out = self.layer_norm(embedding_out)
embedding_out = self.dropout_layer(embedding_out)
return embedding_out
class MobileBertTransformerQuantized(tf.keras.layers.Layer):
"""Transformer block for MobileBERT.
An implementation of one layer (block) of Transformer with bottleneck and
inverted-bottleneck for MobilerBERT.
Original paper for MobileBERT:
https://arxiv.org/pdf/2004.02984.pdf
"""
def __init__(self,
hidden_size=512,
num_attention_heads=4,
intermediate_size=512,
intermediate_act_fn='relu',
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
intra_bottleneck_size=128,
use_bottleneck_attention=False,
key_query_shared_bottleneck=True,
num_feedforward_networks=4,
normalization_type='no_norm',
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
**kwargs):
"""Class initialization.
Args:
hidden_size: Hidden size for the Transformer input and output tensor.
num_attention_heads: Number of attention heads in the Transformer.
intermediate_size: The size of the "intermediate" (a.k.a., feed forward)
layer.
intermediate_act_fn: The non-linear activation function to apply to the
output of the intermediate/feed-forward layer.
hidden_dropout_prob: Dropout probability for the hidden layers.
attention_probs_dropout_prob: Dropout probability of the attention
probabilities.
intra_bottleneck_size: Size of bottleneck.
use_bottleneck_attention: Use attention inputs from the bottleneck
transformation. If true, the following `key_query_shared_bottleneck`
will be ignored.
key_query_shared_bottleneck: Whether to share linear transformation for
keys and queries.
num_feedforward_networks: Number of stacked feed-forward networks.
normalization_type: The type of normalization_type, only `no_norm` and
`layer_norm` are supported. `no_norm` represents the element-wise linear
transformation for the student model, as suggested by the original
MobileBERT paper. `layer_norm` is used for the teacher model.
initializer: The initializer to use for the embedding weights and linear
projection weights.
**kwargs: keyword arguments.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.intermediate_act_fn = intermediate_act_fn
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.intra_bottleneck_size = intra_bottleneck_size
self.use_bottleneck_attention = use_bottleneck_attention
self.key_query_shared_bottleneck = key_query_shared_bottleneck
self.num_feedforward_networks = num_feedforward_networks
self.normalization_type = normalization_type
self.initializer = tf.keras.initializers.get(initializer)
if intra_bottleneck_size % num_attention_heads != 0:
raise ValueError(
(f'The bottleneck size {intra_bottleneck_size} is not a multiple '
f'of the number of attention heads {num_attention_heads}.'))
attention_head_size = int(intra_bottleneck_size / num_attention_heads)
self.block_layers = {}
# add input bottleneck
dense_layer_2d = _quantized_einsum_dense(
'abc,cd->abd',
output_shape=[None, self.intra_bottleneck_size],
bias_axes='d',
kernel_initializer=initializer,
name='bottleneck_input/dense')
layer_norm = _output_quantize(
_get_norm_layer(self.normalization_type,
name='bottleneck_input/norm'))
self.block_layers['bottleneck_input'] = [dense_layer_2d,
layer_norm]
if self.key_query_shared_bottleneck:
dense_layer_2d = _quantized_einsum_dense(
'abc,cd->abd',
output_shape=[None, self.intra_bottleneck_size],
bias_axes='d',
kernel_initializer=initializer,
name='kq_shared_bottleneck/dense')
layer_norm = _output_quantize(
_get_norm_layer(self.normalization_type,
name='kq_shared_bottleneck/norm'))
self.block_layers['kq_shared_bottleneck'] = [dense_layer_2d,
layer_norm]
# add attention layer
attention_layer = _quantized_multi_head_attention(
num_heads=self.num_attention_heads,
key_dim=attention_head_size,
value_dim=attention_head_size,
dropout=self.attention_probs_dropout_prob,
output_shape=self.intra_bottleneck_size,
kernel_initializer=initializer,
name='attention')
layer_norm = _output_quantize(
_get_norm_layer(self.normalization_type,
name='attention/norm'))
self.block_layers['attention'] = [attention_layer,
layer_norm]
# add stacked feed-forward networks (ffn)
self.block_layers['ffn'] = []
self.ffn_add_layers = []
for ffn_layer_idx in range(self.num_feedforward_networks):
layer_prefix = f'ffn_layer_{ffn_layer_idx}'
layer_name = layer_prefix + '/intermediate_dense'
intermediate_layer = _quantized_einsum_dense(
'abc,cd->abd',
activation=self.intermediate_act_fn,
output_shape=[None, self.intermediate_size],
bias_axes='d',
kernel_initializer=initializer,
name=layer_name)
layer_name = layer_prefix + '/output_dense'
output_layer = _quantized_einsum_dense(
'abc,cd->abd',
output_shape=[None, self.intra_bottleneck_size],
bias_axes='d',
kernel_initializer=initializer,
name=layer_name)
layer_name = layer_prefix + '/norm'
layer_norm = _output_quantize(
_get_norm_layer(self.normalization_type,
name=layer_name))
self.block_layers['ffn'].append([intermediate_layer,
output_layer,
layer_norm])
self.ffn_add_layers.append(_output_quantize(
tf.keras.layers.Add()))
# add output bottleneck
bottleneck = _quantized_einsum_dense(
'abc,cd->abd',
output_shape=[None, self.hidden_size],
activation=None,
bias_axes='d',
kernel_initializer=initializer,
name='bottleneck_output/dense')
dropout_layer = tf.keras.layers.Dropout(
self.hidden_dropout_prob,
name='bottleneck_output/dropout')
layer_norm = _output_quantize(
_get_norm_layer(self.normalization_type,
name='bottleneck_output/norm'))
self.block_layers['bottleneck_output'] = [bottleneck,
dropout_layer,
layer_norm]
self.attention_output_add = _output_quantize(
tf.keras.layers.Add())
self.output_add = _output_quantize(
tf.keras.layers.Add())
def get_config(self):
config = {
'hidden_size': self.hidden_size,
'num_attention_heads': self.num_attention_heads,
'intermediate_size': self.intermediate_size,
'intermediate_act_fn': self.intermediate_act_fn,
'hidden_dropout_prob': self.hidden_dropout_prob,
'attention_probs_dropout_prob': self.attention_probs_dropout_prob,
'intra_bottleneck_size': self.intra_bottleneck_size,
'use_bottleneck_attention': self.use_bottleneck_attention,
'key_query_shared_bottleneck': self.key_query_shared_bottleneck,
'num_feedforward_networks': self.num_feedforward_networks,
'normalization_type': self.normalization_type,
'initializer': tf.keras.initializers.serialize(self.initializer),
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self,
input_tensor,
attention_mask=None,
return_attention_scores=False):
"""Implementes the forward pass.
Args:
input_tensor: Float tensor of shape `(batch_size, seq_length,
hidden_size)`.
attention_mask: (optional) int32 tensor of shape `(batch_size, seq_length,
seq_length)`, with 1 for positions that can be attended to and 0 in
positions that should not be.
return_attention_scores: If return attention score.
Returns:
layer_output: Float tensor of shape
`(batch_size, seq_length, hidden_size)`.
attention_scores (Optional): Only when return_attention_scores is True.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
input_width = input_tensor.shape.as_list()[-1]
if input_width != self.hidden_size:
raise ValueError(
(f'The width of the input tensor {input_width} != '
f'hidden size {self.hidden_size}'))
prev_output = input_tensor
# input bottleneck
dense_layer = self.block_layers['bottleneck_input'][0]
layer_norm = self.block_layers['bottleneck_input'][1]
layer_input = dense_layer(prev_output)
layer_input = layer_norm(layer_input)
if self.use_bottleneck_attention:
key_tensor = layer_input
query_tensor = layer_input
value_tensor = layer_input
elif self.key_query_shared_bottleneck:
dense_layer = self.block_layers['kq_shared_bottleneck'][0]
layer_norm = self.block_layers['kq_shared_bottleneck'][1]
shared_attention_input = dense_layer(prev_output)
shared_attention_input = layer_norm(shared_attention_input)
key_tensor = shared_attention_input
query_tensor = shared_attention_input
value_tensor = prev_output
else:
key_tensor = prev_output
query_tensor = prev_output
value_tensor = prev_output
# attention layer
attention_layer = self.block_layers['attention'][0]
layer_norm = self.block_layers['attention'][1]
attention_output, attention_scores = attention_layer(
query_tensor,
value_tensor,
key_tensor,
attention_mask,
return_attention_scores=True,
)
attention_output = layer_norm(
self.attention_output_add([attention_output, layer_input]))
# stacked feed-forward networks
layer_input = attention_output
for ffn_idx in range(self.num_feedforward_networks):
intermediate_layer = self.block_layers['ffn'][ffn_idx][0]
output_layer = self.block_layers['ffn'][ffn_idx][1]
layer_norm = self.block_layers['ffn'][ffn_idx][2]
intermediate_output = intermediate_layer(layer_input)
layer_output = output_layer(intermediate_output)
layer_output = layer_norm(
self.ffn_add_layers[ffn_idx]([layer_output, layer_input]))
layer_input = layer_output
# output bottleneck
bottleneck = self.block_layers['bottleneck_output'][0]
dropout_layer = self.block_layers['bottleneck_output'][1]
layer_norm = self.block_layers['bottleneck_output'][2]
layer_output = bottleneck(layer_output)
layer_output = dropout_layer(layer_output)
layer_output = layer_norm(self.output_add([layer_output, prev_output]))
if return_attention_scores:
return layer_output, attention_scores
else:
return layer_output
| 19,438 | 38.997942 | 102 | py |
models | models-master/official/projects/qat/nlp/modeling/layers/multi_head_attention.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantized multi head attention layer."""
import math
import tensorflow as tf
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
from official.projects.qat.nlp.quantization import helper
# -6 for mask adder before softmax on int8 model. (e^-6 < 1/256)
_MASK_CONSTANT_FOR_INT8_QUANTIZATION = 6
class MultiHeadAttentionQuantized(helper.LayerQuantizerHelper,
tf.keras.layers.MultiHeadAttention):
"""Quantized multi head attention layer.
This layer only quantized _compute_attention part. EinsumDense child layers
should be quantized from the QuantizeConfig.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._compute_attention_first_call = True
def _build_from_signature(self, *args, **kwargs):
super()._build_from_signature( # pytype: disable=attribute-error # typed-keras
*args, **kwargs)
self._add_quantizer('query')
self._add_quantizer('attention_scores')
self._add_quantizer('attention_output')
self._add_quantizer('masked_softmax_attention_mask',
all_value_quantizer=True)
self._add_quantizer('masked_softmax_sub1')
self._add_quantizer('masked_softmax_mask1')
self._add_quantizer('masked_softmax_sub2')
self._add_quantizer('masked_softmax_clamp', all_value_quantizer=True)
self._add_quantizer('masked_softmax_mask2', all_value_quantizer=True)
self._add_quantizer('masked_softmax_adder_sub', all_value_quantizer=True)
self._add_quantizer('masked_softmax_adder_mul', all_value_quantizer=True)
self._add_quantizer('masked_softmax_add', all_value_quantizer=True)
def _masked_softmax(
self, attention_scores, attention_mask=None, training=None):
"""Normalize the attention scores to probabilities."""
# `attention_scores` = [B, N, T, S]
if attention_mask is None:
return self._softmax(attention_scores)
# The expand dim happens starting from the `num_heads` dimension,
# (<batch_dims>, num_heads, <query_attention_dims, key_attention_dims>)
mask_expansion_axes = [-len(self._attention_axes) * 2 - 1]
for _ in range(len(attention_scores.shape) - len(attention_mask.shape)):
attention_mask = array_ops.expand_dims(
attention_mask, axis=mask_expansion_axes)
if attention_scores.dtype != attention_mask.dtype:
attention_mask = tf.cast(attention_mask, attention_scores.dtype)
attention_mask = self._apply_quantizer(
'masked_softmax_attention_mask', attention_mask, training)
# Makes attention_scores >= 0 to avoid masked maximum value be 0.
attention_scores -= math_ops.reduce_min(
attention_scores, axis=-1, keepdims=True)
attention_scores = self._apply_quantizer(
'masked_softmax_sub1', attention_scores, training)
attention_scores *= attention_mask
attention_scores = self._apply_quantizer(
'masked_softmax_mask1', attention_scores, training)
# Makes attention_scores <= 0, and become max value be 0.
attention_scores -= math_ops.reduce_max(
attention_scores, axis=-1, keepdims=True)
attention_scores = self._apply_quantizer(
'masked_softmax_sub2', attention_scores, training)
# Clip the range of values [-6, 0].
attention_scores = tf.clip_by_value(
attention_scores, clip_value_min=-6, clip_value_max=0)
attention_scores = self._apply_quantizer(
'masked_softmax_clamp', attention_scores, training)
# We basically hard-code the to-be-masked-out part have -6.
# Maximum number is 0. It"s reasonable for 8 bit quantization because
# e^(0) / e^(-6) < 1/256
attention_scores *= attention_mask
attention_scores = self._apply_quantizer(
'masked_softmax_mask2', attention_scores, training)
adder = attention_mask - 1.0
adder = self._apply_quantizer('masked_softmax_adder_sub', adder, training)
adder *= _MASK_CONSTANT_FOR_INT8_QUANTIZATION
adder = self._apply_quantizer('masked_softmax_adder_mul', adder, training)
attention_scores += adder
attention_scores = self._apply_quantizer(
'masked_softmax_add', attention_scores, training)
return self._softmax(attention_scores)
def _compute_attention(self,
query,
key,
value,
attention_mask=None,
training=None):
"""Applies Dot-product attention with query, key, value tensors.
This function defines the computation inside `call` with projected
multi-head Q, K, V inputs. Users can override this function for customized
attention implementation.
Args:
query: Projected query `Tensor` of shape `[B, T, N, key_dim]`.
key: Projected key `Tensor` of shape `[B, T, N, key_dim]`.
value: Projected value `Tensor` of shape `[B, T, N, value_dim]`.
attention_mask: a boolean mask of shape `[B, T, S]`, that prevents
attention to certain positions.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Returns:
attention_output: Multi-headed outputs of attention computation.
attention_scores: Multi-headed attention weights.
"""
if self._compute_attention_first_call:
self._build_quantizer_vars()
# Note: Applying scalar multiply at the smaller end of einsum improves
# XLA performance, but may introduce slight numeric differences in
# the Transformer attention head.
query = math_ops.multiply(query, 1.0 / math.sqrt(float(self._key_dim)))
query = self._apply_quantizer('query', query, training)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
attention_scores = special_math_ops.einsum(self._dot_product_equation, key,
query)
attention_scores = self._apply_quantizer(
'attention_scores', attention_scores, training)
attention_scores = self._masked_softmax(
attention_scores, attention_mask, training)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_scores_dropout = self._dropout_layer(
attention_scores, training=training)
# `context_layer` = [B, T, N, H]
attention_output = special_math_ops.einsum(self._combine_equation,
attention_scores_dropout, value)
attention_output = self._apply_quantizer(
'attention_output', attention_output, training)
self._compute_attention_first_call = False
return attention_output, attention_scores
| 7,445 | 42.8 | 84 | py |
models | models-master/official/projects/qat/nlp/modeling/layers/transformer_encoder_block_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-based quantized transformer block layer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.qat.nlp.modeling.layers.transformer_encoder_block import TransformerEncoderBlockQuantized
@parameterized.named_parameters(
('base', TransformerEncoderBlockQuantized))
class TransformerEncoderBlockQuantizedLayerTest(
tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(TransformerEncoderBlockQuantizedLayerTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy('float32')
def test_layer_creation(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_creation_with_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_invocation(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# Create a model from the test layer.
model = tf.keras.Model(data_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
_ = model.predict(input_data)
def test_layer_invocation_with_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_layer_output_range(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu')
_ = new_layer([input_data, mask_data], output_range=1)
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer([input_data, mask_data], output_range=1)
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
def test_layer_output_range_without_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048,
inner_activation='relu', norm_first=True)
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
output_tensor = test_layer(input_data)
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
norm_first=True)
_ = new_layer(input_data, output_range=1)
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer(input_data, output_range=1)
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
def test_layer_output_range_with_pre_norm(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048,
inner_activation='relu', norm_first=True)
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
norm_first=True)
_ = new_layer([input_data, mask_data], output_range=1)
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer([input_data, mask_data], output_range=1)
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
def test_transform_with_initializer(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list())
def test_dynamic_layer_sequence(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Create a 3-dimensional input (the first dimension is implicit).
width = 30
input_tensor = tf.keras.Input(shape=(None, width))
output_tensor = test_layer(input_tensor)
model = tf.keras.Model(input_tensor, output_tensor)
input_length = 17
input_data = np.ones((1, input_length, width))
output_data = model.predict(input_data)
self.assertAllEqual([1, input_length, width], output_data.shape)
def test_separate_qkv(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=2,
inner_dim=128,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Forward path.
q_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 16], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
inputs = [q_tensor, kv_tensor, dummy_mask]
output = test_layer(inputs)
self.assertEqual(output.shape, q_tensor.shape)
if __name__ == '__main__':
tf.test.main()
| 9,288 | 39.920705 | 112 | py |
models | models-master/official/projects/qat/nlp/tasks/question_answering.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Question/Answering configuration definition."""
import dataclasses
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.core import task_factory
from official.nlp import modeling
from official.nlp.tasks import question_answering
from official.projects.qat.nlp.modeling.layers import mobile_bert_layers
from official.projects.qat.nlp.modeling.layers import transformer_encoder_block
from official.projects.qat.nlp.modeling.models import bert_span_labeler
from official.projects.qat.nlp.quantization import configs
from official.projects.qat.nlp.quantization import schemes
@dataclasses.dataclass
class QuantizedModelQAConfig(question_answering.QuestionAnsweringConfig):
pass
@task_factory.register_task_cls(QuantizedModelQAConfig)
class QuantizedModelQATask(question_answering.QuestionAnsweringTask):
"""Task object for question answering with QAT."""
def build_model(self):
model = super(QuantizedModelQATask, self).build_model()
# pylint: disable=protected-access
encoder_network = model._network
# pylint: enable=protected-access
with tfmot.quantization.keras.quantize_scope({
'TruncatedNormal':
tf.keras.initializers.TruncatedNormal,
'MobileBertTransformerQuantized':
mobile_bert_layers.MobileBertTransformerQuantized,
'MobileBertEmbeddingQuantized':
mobile_bert_layers.MobileBertEmbeddingQuantized,
'TransformerEncoderBlockQuantized':
transformer_encoder_block.TransformerEncoderBlockQuantized,
'NoQuantizeConfig':
configs.NoQuantizeConfig,
}):
def quantize_annotate_layer(layer):
if isinstance(layer, (tf.keras.layers.LayerNormalization)):
return tfmot.quantization.keras.quantize_annotate_layer(
layer, configs.Default8BitOutputQuantizeConfig())
if isinstance(layer, (tf.keras.layers.Dense,
tf.keras.layers.Dropout)):
return tfmot.quantization.keras.quantize_annotate_layer(layer)
if isinstance(layer, (modeling.layers.TransformerEncoderBlock,
modeling.layers.MobileBertTransformer,
modeling.layers.MobileBertEmbedding)):
return tfmot.quantization.keras.quantize_annotate_layer(
layer, configs.NoQuantizeConfig())
return layer
annotated_encoder_network = tf.keras.models.clone_model(
encoder_network,
clone_function=quantize_annotate_layer,
)
quantized_encoder_network = tfmot.quantization.keras.quantize_apply(
annotated_encoder_network, scheme=schemes.Default8BitQuantizeScheme())
encoder_cfg = self.task_config.model.encoder.get()
model = bert_span_labeler.BertSpanLabelerQuantized(
network=quantized_encoder_network,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range))
return model
| 3,587 | 41.211765 | 80 | py |
models | models-master/official/projects/qat/vision/serving/export_module.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export modules for QAT model serving/inference."""
import tensorflow as tf
from official.projects.qat.vision.modeling import factory as qat_factory
from official.vision import configs
from official.vision.serving import detection
from official.vision.serving import image_classification
from official.vision.serving import semantic_segmentation
class ClassificationModule(image_classification.ClassificationModule):
"""Classification Module."""
def _build_model(self):
model = super()._build_model()
input_specs = tf.keras.layers.InputSpec(shape=[self._batch_size] +
self._input_image_size + [3])
return qat_factory.build_qat_classification_model(
model, self.params.task.quantization, input_specs,
self.params.task.model)
class SegmentationModule(semantic_segmentation.SegmentationModule):
"""Segmentation Module."""
def _build_model(self):
model = super()._build_model()
input_specs = tf.keras.layers.InputSpec(shape=[self._batch_size] +
self._input_image_size + [3])
return qat_factory.build_qat_segmentation_model(
model, self.params.task.quantization, input_specs)
class DetectionModule(detection.DetectionModule):
"""Detection Module."""
def _build_model(self):
model = super()._build_model()
if isinstance(self.params.task.model, configs.retinanet.RetinaNet):
model = qat_factory.build_qat_retinanet(model,
self.params.task.quantization,
self.params.task.model)
else:
raise ValueError('Detection module not implemented for {} model.'.format(
type(self.params.task.model)))
return model
| 2,395 | 37.031746 | 79 | py |
models | models-master/official/projects/qat/vision/configs/retinanet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RetinaNet configuration definition."""
import dataclasses
from typing import Optional
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.qat.vision.configs import common
from official.vision.configs import retinanet
from official.vision.configs import backbones
@dataclasses.dataclass
class RetinaNetTask(retinanet.RetinaNetTask):
quantization: Optional[common.Quantization] = None
@exp_factory.register_config_factory('retinanet_mobile_coco_qat')
def retinanet_mobile_coco() -> cfg.ExperimentConfig:
"""Generates a config for COCO OD RetinaNet for mobile with QAT."""
config = retinanet.retinanet_spinenet_mobile_coco()
task = RetinaNetTask.from_args(
quantization=common.Quantization(), **config.task.as_dict())
task.model.backbone = backbones.Backbone(
type='spinenet_mobile',
spinenet_mobile=backbones.SpineNetMobile(
model_id='49',
stochastic_depth_drop_rate=0.2,
min_level=3,
max_level=7,
use_keras_upsampling_2d=True))
config.task = task
return config
| 1,721 | 34.875 | 74 | py |
Subsets and Splits