repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/official/nlp/modeling/layers/text_layers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras Layers for BERT-specific preprocessing."""
# pylint: disable=g-import-not-at-top
from typing import Any, Dict, List, Mapping, Optional, Text, Union
from absl import logging
import tensorflow as tf
try:
# pytype: disable=import-error
import tensorflow_text as text
from tensorflow_text.python.ops import bert_tokenizer
# pytype: enable=import-error
except ImportError:
text = None
bert_tokenizer = None
except tf.errors.NotFoundError as e:
logging.warn("Encountered error when importing tensorflow_text: %s", e)
text = None
bert_tokenizer = None
def _check_if_tf_text_installed():
if text is None:
raise ImportError("import tensorflow_text failed, please install "
"'tensorflow-text-nightly'.")
def _truncate_row_lengths(ragged_tensor: tf.RaggedTensor,
new_lengths: tf.Tensor) -> tf.RaggedTensor:
"""Truncates the rows of `ragged_tensor` to the given row lengths."""
new_lengths = tf.broadcast_to(new_lengths,
ragged_tensor.bounding_shape()[0:1])
def fn(x):
row, new_length = x
return row[0:new_length]
fn_dtype = tf.RaggedTensorSpec(dtype=ragged_tensor.dtype,
ragged_rank=ragged_tensor.ragged_rank - 1)
result = tf.map_fn(fn, (ragged_tensor, new_lengths), dtype=fn_dtype)
# Work around broken shape propagation: without this, result has unknown rank.
flat_values_shape = [None] * ragged_tensor.flat_values.shape.rank
result = result.with_flat_values(
tf.ensure_shape(result.flat_values, flat_values_shape))
return result
class BertTokenizer(tf.keras.layers.Layer):
"""Wraps TF.Text's BertTokenizer with pre-defined vocab as a Keras Layer.
Attributes:
tokenize_with_offsets: If true, calls
`text.BertTokenizer.tokenize_with_offsets()` instead of plain
`text.BertTokenizer.tokenize()` and outputs a triple of
`(tokens, start_offsets, limit_offsets)`.
raw_table_access: An object with methods `.lookup(keys) and `.size()`
that operate on the raw lookup table of tokens. It can be used to
look up special token synbols like `[MASK]`.
"""
def __init__(self, *,
vocab_file: str,
lower_case: Optional[bool] = None,
tokenize_with_offsets: bool = False,
tokenizer_kwargs: Optional[Mapping[Text, Any]] = None,
**kwargs):
"""Initialize a `BertTokenizer` layer.
Args:
vocab_file: A Python string with the path of the vocabulary file.
This is a text file with newline-separated wordpiece tokens.
This layer initializes a lookup table from it that gets used with
`text.BertTokenizer`.
lower_case: Optional boolean forwarded to `text.BertTokenizer`.
If true, input text is converted to lower case (where applicable)
before tokenization. This must be set to match the way in which
the `vocab_file` was created. If passed, this overrides whatever value
may have been passed in `tokenizer_kwargs`.
tokenize_with_offsets: A Python boolean. If true, this layer calls
`text.BertTokenizer.tokenize_with_offsets()` instead of plain
`text.BertTokenizer.tokenize()` and outputs a triple of
`(tokens, start_offsets, limit_offsets)`
insead of just tokens.
tokenizer_kwargs: Optional mapping with keyword arguments to forward to
`text.BertTokenizer`'s constructor.
**kwargs: Standard arguments to `Layer()`.
Raises:
ImportError: If importing `tensorflow_text` failed.
"""
_check_if_tf_text_installed()
self.tokenize_with_offsets = tokenize_with_offsets
# TODO(b/177326279): Stop storing the vocab table initializer as an
# attribute when https://github.com/tensorflow/tensorflow/issues/46456
# has been fixed in the TensorFlow versions of the TF Hub users that load
# a SavedModel created from this layer. Due to that issue, loading such a
# SavedModel forgets to add .vocab_table._initializer as a trackable
# dependency of .vocab_table, so that saving it again to a second SavedModel
# (e.g., the final model built using TF Hub) does not properly track
# the ._vocab_table._initializer._filename as an Asset.
self._vocab_table, self._vocab_initializer_donotuse = (
self._create_vocab_table_and_initializer(vocab_file))
self._special_tokens_dict = self._create_special_tokens_dict(
self._vocab_table, vocab_file)
super().__init__(**kwargs)
tokenizer_kwargs = dict(tokenizer_kwargs or {})
if lower_case is not None:
tokenizer_kwargs["lower_case"] = lower_case
self._bert_tokenizer = text.BertTokenizer(self._vocab_table,
**tokenizer_kwargs)
@property
def vocab_size(self):
return self._vocab_table.size()
def _create_vocab_table_and_initializer(self, vocab_file):
vocab_initializer = tf.lookup.TextFileInitializer(
vocab_file,
key_dtype=tf.string, key_index=tf.lookup.TextFileIndex.WHOLE_LINE,
value_dtype=tf.int64, value_index=tf.lookup.TextFileIndex.LINE_NUMBER)
vocab_table = tf.lookup.StaticHashTable(vocab_initializer, default_value=-1)
return vocab_table, vocab_initializer
def call(self, inputs: tf.Tensor):
"""Calls `text.BertTokenizer` on inputs.
Args:
inputs: A string Tensor of shape `(batch_size,)`.
Returns:
One or three of `RaggedTensors` if `tokenize_with_offsets` is False or
True, respectively. These are
tokens: A `RaggedTensor` of shape
`[batch_size, (words), (pieces_per_word)]`
and type int32. `tokens[i,j,k]` contains the k-th wordpiece of the
j-th word in the i-th input.
start_offsets, limit_offsets: If `tokenize_with_offsets` is True,
RaggedTensors of type int64 with the same indices as tokens.
Element `[i,j,k]` contains the byte offset at the start, or past the
end, resp., for the k-th wordpiece of the j-th word in the i-th input.
"""
# Prepare to reshape the result to work around broken shape inference.
batch_size = tf.shape(inputs)[0]
def _reshape(rt):
values = rt.values
row_splits = rt.row_splits
row_splits = tf.reshape(row_splits, [batch_size + 1])
return tf.RaggedTensor.from_row_splits(values, row_splits)
# Call the tokenizer.
if self.tokenize_with_offsets:
tokens, start_offsets, limit_offsets = (
self._bert_tokenizer.tokenize_with_offsets(inputs))
tokens = tf.cast(tokens, dtype=tf.int32)
return _reshape(tokens), _reshape(start_offsets), _reshape(limit_offsets)
else:
tokens = self._bert_tokenizer.tokenize(inputs)
tokens = tf.cast(tokens, dtype=tf.int32)
return _reshape(tokens)
def get_config(self):
# Skip in tf.saved_model.save(); fail if called direcly.
raise NotImplementedError("TODO(b/170480226): implement")
def get_special_tokens_dict(self):
"""Returns dict of token ids, keyed by standard names for their purpose.
Returns:
A dict from Python strings to Python integers. Each key is a standard
name for a special token describing its use. (For example, "padding_id"
is what BERT traditionally calls "[PAD]" but others may call "<pad>".)
The corresponding value is the integer token id. If a special token
is not found, its entry is omitted from the dict.
The supported keys and tokens are:
* start_of_sequence_id: looked up from "[CLS]"
* end_of_segment_id: looked up from "[SEP]"
* padding_id: looked up form "[PAD]"
* mask_id: looked up from "[MASK]"
* vocab_size: one past the largest token id used
"""
return self._special_tokens_dict
def _create_special_tokens_dict(self, vocab_table, vocab_file):
special_tokens = dict(start_of_sequence_id="[CLS]",
end_of_segment_id="[SEP]",
padding_id="[PAD]",
mask_id="[MASK]")
with tf.init_scope():
if tf.executing_eagerly():
special_token_ids = vocab_table.lookup(
tf.constant(list(special_tokens.values()), tf.string))
vocab_size = vocab_table.size()
else:
# A blast from the past: non-eager init context while building Model.
# This can happen with Estimator or tf.compat.v1.disable_v2_behavior().
logging.warning(
"Non-eager init context; computing "
"BertTokenizer's special_tokens_dict in tf.compat.v1.Session")
with tf.Graph().as_default():
local_vocab_table, _ = self._create_vocab_table_and_initializer(
vocab_file)
special_token_ids_tensor = local_vocab_table.lookup(
tf.constant(list(special_tokens.values()), tf.string))
vocab_size_tensor = local_vocab_table.size()
init_ops = [tf.compat.v1.initialize_all_tables()]
with tf.compat.v1.Session() as sess:
sess.run(init_ops)
special_token_ids, vocab_size = sess.run(
[special_token_ids_tensor, vocab_size_tensor])
result = dict(
vocab_size=int(vocab_size) # Numpy to Python.
)
for k, v in zip(special_tokens, special_token_ids):
v = int(v)
if v >= 0:
result[k] = v
else:
logging.warning("Could not find %s as token \"%s\" in vocab file %s",
k, special_tokens[k], vocab_file)
return result
class SentencepieceTokenizer(tf.keras.layers.Layer):
"""Wraps `tf_text.SentencepieceTokenizer` as a Keras Layer.
Attributes:
tokenize_with_offsets: If true, calls
`SentencepieceTokenizer.tokenize_with_offsets()`
instead of plain `.tokenize()` and outputs a triple of
`(tokens, start_offsets, limit_offsets)`.
"""
def __init__(self,
*,
lower_case: bool,
model_file_path: Optional[str] = None,
model_serialized_proto: Optional[str] = None,
tokenize_with_offsets: bool = False,
nbest_size: int = 0,
alpha: float = 1.0,
strip_diacritics: bool = False,
**kwargs):
"""Initializes a SentencepieceTokenizer layer.
Args:
lower_case: A Python boolean indicating whether to lowercase the string
before tokenization. NOTE: New models are encouraged to build `*_cf`
(case folding) normalization into the Sentencepiece model itself and
avoid this extra step.
model_file_path: A Python string with the path of the sentencepiece model.
Exactly one of `model_file_path` and `model_serialized_proto` can be
specified. In either case, the Keras model config for this layer will
store the actual proto (not a filename passed here).
model_serialized_proto: The sentencepiece model serialized proto string.
tokenize_with_offsets: A Python boolean. If true, this layer calls
`SentencepieceTokenizer.tokenize_with_offsets()` instead of
plain `.tokenize()` and outputs a triple of
`(tokens, start_offsets, limit_offsets)` insead of just tokens.
Note that when following `strip_diacritics` is set to True, returning
offsets is not supported now.
nbest_size: A scalar for sampling:
nbest_size = {0,1}: No sampling is performed. (default)
nbest_size > 1: samples from the nbest_size results.
nbest_size < 0: assuming that nbest_size is infinite and samples
from the all hypothesis (lattice) using
forward-filtering-and-backward-sampling algorithm.
alpha: A scalar for a smoothing parameter. Inverse temperature for
probability rescaling.
strip_diacritics: Whether to strip diacritics or not. Note that stripping
diacritics requires additional text normalization and dropping bytes,
which makes it impossible to keep track of the offsets now. Hence
when `strip_diacritics` is set to True, we don't yet support
`tokenize_with_offsets`. NOTE: New models are encouraged to put this
into custom normalization rules for the Sentencepiece model itself to
avoid this extra step and the limitation regarding offsets.
**kwargs: standard arguments to `Layer()`.
Raises:
ImportError: if importing tensorflow_text failed.
"""
_check_if_tf_text_installed()
super().__init__(**kwargs)
if bool(model_file_path) == bool(model_serialized_proto):
raise ValueError("Exact one of `model_file_path` and "
"`model_serialized_proto` can be specified.")
# TODO(b/181866850): Support tokenize_with_offsets for strip_diacritics=True
if tokenize_with_offsets and strip_diacritics:
raise ValueError("`tokenize_with_offsets` is not supported when "
"`strip_diacritics` is set to True.")
if model_file_path:
self._model_serialized_proto = tf.io.gfile.GFile(model_file_path,
"rb").read()
else:
self._model_serialized_proto = model_serialized_proto
self._lower_case = lower_case
self.tokenize_with_offsets = tokenize_with_offsets
self._nbest_size = nbest_size
self._alpha = alpha
self._strip_diacritics = strip_diacritics
self._tokenizer = self._create_tokenizer()
self._special_tokens_dict = self._create_special_tokens_dict()
def _create_tokenizer(self):
return text.SentencepieceTokenizer(
model=self._model_serialized_proto,
out_type=tf.int32,
nbest_size=self._nbest_size,
alpha=self._alpha)
@property
def vocab_size(self):
return self._tokenizer.vocab_size()
def call(self, inputs: tf.Tensor):
"""Calls `text.SentencepieceTokenizer` on inputs.
Args:
inputs: A string Tensor of shape `(batch_size,)`.
Returns:
One or three of RaggedTensors if tokenize_with_offsets is False or True,
respectively. These are
tokens: A RaggedTensor of shape `[batch_size, (pieces)]` and type `int32`.
`tokens[i,j]` contains the j-th piece in the i-th input.
start_offsets, limit_offsets: If `tokenize_with_offsets` is True,
RaggedTensors of type `int64` with the same indices as tokens.
Element `[i,j]` contains the byte offset at the start, or past the
end, resp., for the j-th piece in the i-th input.
"""
if self._strip_diacritics:
if self.tokenize_with_offsets:
raise ValueError("`tokenize_with_offsets` is not supported yet when "
"`strip_diacritics` is set to True (b/181866850).")
inputs = text.normalize_utf8(inputs, "NFD")
inputs = tf.strings.regex_replace(inputs, r"\p{Mn}", "")
if self._lower_case:
inputs = text.case_fold_utf8(inputs)
# Prepare to reshape the result to work around broken shape inference.
batch_size = tf.shape(inputs)[0]
def _reshape(rt):
values = rt.values
row_splits = rt.row_splits
row_splits = tf.reshape(row_splits, [batch_size + 1])
return tf.RaggedTensor.from_row_splits(values, row_splits)
# Call the tokenizer.
if self.tokenize_with_offsets:
tokens, start_offsets, limit_offsets = (
self._tokenizer.tokenize_with_offsets(inputs))
return _reshape(tokens), _reshape(start_offsets), _reshape(limit_offsets)
else:
tokens = self._tokenizer.tokenize(inputs)
return _reshape(tokens)
def get_config(self):
# Skip in tf.saved_model.save(); fail if called direcly.
raise NotImplementedError("TODO(b/170480226): implement")
def get_special_tokens_dict(self):
"""Returns dict of token ids, keyed by standard names for their purpose.
Returns:
A dict from Python strings to Python integers. Each key is a standard
name for a special token describing its use. (For example, "padding_id"
is what Sentencepiece calls "<pad>" but others may call "[PAD]".)
The corresponding value is the integer token id. If a special token
is not found, its entry is omitted from the dict.
The supported keys and tokens are:
* start_of_sequence_id: looked up from "[CLS]"
* end_of_segment_id: looked up from "[SEP]"
* padding_id: looked up from "<pad>"
* mask_id: looked up from "[MASK]"
* vocab_size: one past the largest token id used
"""
return self._special_tokens_dict
def _create_special_tokens_dict(self):
special_tokens = dict(
start_of_sequence_id=b"[CLS]",
end_of_segment_id=b"[SEP]",
padding_id=b"<pad>",
mask_id=b"[MASK]")
with tf.init_scope():
if tf.executing_eagerly():
special_token_ids = self._tokenizer.string_to_id(
tf.constant(list(special_tokens.values()), tf.string))
inverse_tokens = self._tokenizer.id_to_string(special_token_ids)
vocab_size = self._tokenizer.vocab_size()
else:
# A blast from the past: non-eager init context while building Model.
# This can happen with Estimator or tf.compat.v1.disable_v2_behavior().
logging.warning(
"Non-eager init context; computing SentencepieceTokenizer's "
"special_tokens_dict in tf.compat.v1.Session")
with tf.Graph().as_default():
local_tokenizer = self._create_tokenizer()
special_token_ids_tensor = local_tokenizer.string_to_id(
tf.constant(list(special_tokens.values()), tf.string))
inverse_tokens_tensor = local_tokenizer.id_to_string(
special_token_ids_tensor)
vocab_size_tensor = local_tokenizer.vocab_size()
with tf.compat.v1.Session() as sess:
special_token_ids, inverse_tokens, vocab_size = sess.run(
[special_token_ids_tensor, inverse_tokens_tensor,
vocab_size_tensor])
result = dict(
vocab_size=int(vocab_size) # Numpy to Python.
)
for name, token_id, inverse_token in zip(special_tokens,
special_token_ids,
inverse_tokens):
if special_tokens[name] == inverse_token:
result[name] = int(token_id)
else:
logging.warning(
"Could not find %s as token \"%s\" in sentencepiece model, "
"got \"%s\"", name, special_tokens[name], inverse_token)
return result
class BertPackInputs(tf.keras.layers.Layer):
"""Packs tokens into model inputs for BERT."""
def __init__(self,
seq_length,
*,
start_of_sequence_id=None,
end_of_segment_id=None,
padding_id=None,
special_tokens_dict=None,
truncator="round_robin",
**kwargs):
"""Initializes with a target `seq_length`, relevant token ids and truncator.
Args:
seq_length: The desired output length. Must not exceed the max_seq_length
that was fixed at training time for the BERT model receiving the inputs.
start_of_sequence_id: The numeric id of the token that is to be placed
at the start of each sequence (called "[CLS]" for BERT).
end_of_segment_id: The numeric id of the token that is to be placed
at the end of each input segment (called "[SEP]" for BERT).
padding_id: The numeric id of the token that is to be placed into the
unused positions after the last segment in the sequence
(called "[PAD]" for BERT).
special_tokens_dict: Optionally, a dict from Python strings to Python
integers that contains values for `start_of_sequence_id`,
`end_of_segment_id` and `padding_id`. (Further values in the dict are
silenty ignored.) If this is passed, separate *_id arguments must be
omitted.
truncator: The algorithm to truncate a list of batched segments to fit a
per-example length limit. The value can be either `round_robin` or
`waterfall`:
(1) For "round_robin" algorithm, available space is assigned
one token at a time in a round-robin fashion to the inputs that still
need some, until the limit is reached. It currently only supports
one or two segments.
(2) For "waterfall" algorithm, the allocation of the budget is done
using a "waterfall" algorithm that allocates quota in a
left-to-right manner and fills up the buckets until we run out of
budget. It support arbitrary number of segments.
**kwargs: standard arguments to `Layer()`.
Raises:
ImportError: if importing `tensorflow_text` failed.
"""
_check_if_tf_text_installed()
super().__init__(**kwargs)
self.seq_length = seq_length
if truncator not in ("round_robin", "waterfall"):
raise ValueError("Only 'round_robin' and 'waterfall' algorithms are "
"supported, but got %s" % truncator)
self.truncator = truncator
self._init_token_ids(
start_of_sequence_id=start_of_sequence_id,
end_of_segment_id=end_of_segment_id,
padding_id=padding_id,
special_tokens_dict=special_tokens_dict)
def _init_token_ids(
self, *,
start_of_sequence_id,
end_of_segment_id,
padding_id,
special_tokens_dict):
usage = ("Must pass either all of start_of_sequence_id, end_of_segment_id, "
"padding_id as arguments, or else a special_tokens_dict "
"with those keys.")
special_tokens_args = [start_of_sequence_id, end_of_segment_id, padding_id]
if special_tokens_dict is None:
if any(x is None for x in special_tokens_args):
return ValueError(usage)
self.start_of_sequence_id = int(start_of_sequence_id)
self.end_of_segment_id = int(end_of_segment_id)
self.padding_id = int(padding_id)
else:
if any(x is not None for x in special_tokens_args):
return ValueError(usage)
self.start_of_sequence_id = int(
special_tokens_dict["start_of_sequence_id"])
self.end_of_segment_id = int(special_tokens_dict["end_of_segment_id"])
self.padding_id = int(special_tokens_dict["padding_id"])
def get_config(self) -> Dict[str, Any]:
config = super().get_config()
config["seq_length"] = self.seq_length
config["start_of_sequence_id"] = self.start_of_sequence_id
config["end_of_segment_id"] = self.end_of_segment_id
config["padding_id"] = self.padding_id
config["truncator"] = self.truncator
return config
def call(self, inputs: Union[tf.RaggedTensor, List[tf.RaggedTensor]]):
"""Adds special tokens to pack a list of segments into BERT input Tensors.
Args:
inputs: A Python list of one or two RaggedTensors, each with the batched
values one input segment. The j-th segment of the i-th input example
consists of slice `inputs[j][i, ...]`.
Returns:
A nest of Tensors for use as input to the BERT TransformerEncoder.
"""
# BertPackInputsSavedModelWrapper relies on only calling bert_pack_inputs()
return BertPackInputs.bert_pack_inputs(
inputs, self.seq_length,
start_of_sequence_id=self.start_of_sequence_id,
end_of_segment_id=self.end_of_segment_id,
padding_id=self.padding_id,
truncator=self.truncator)
@staticmethod
def bert_pack_inputs(inputs: Union[tf.RaggedTensor, List[tf.RaggedTensor]],
seq_length: Union[int, tf.Tensor],
start_of_sequence_id: Union[int, tf.Tensor],
end_of_segment_id: Union[int, tf.Tensor],
padding_id: Union[int, tf.Tensor],
truncator="round_robin"):
"""Freestanding equivalent of the BertPackInputs layer."""
_check_if_tf_text_installed()
# Sanitize inputs.
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
if not inputs:
raise ValueError("At least one input is required for packing")
input_ranks = [rt.shape.rank for rt in inputs]
if None in input_ranks or len(set(input_ranks)) > 1:
raise ValueError("All inputs for packing must have the same known rank, "
"found ranks " + ",".join(input_ranks))
# Flatten inputs to [batch_size, (tokens)].
if input_ranks[0] > 2:
inputs = [rt.merge_dims(1, -1) for rt in inputs]
# In case inputs weren't truncated (as they should have been),
# fall back to some ad-hoc truncation.
num_special_tokens = len(inputs) + 1
if truncator == "round_robin":
trimmed_segments = text.RoundRobinTrimmer(seq_length -
num_special_tokens).trim(inputs)
elif truncator == "waterfall":
trimmed_segments = text.WaterfallTrimmer(
seq_length - num_special_tokens).trim(inputs)
else:
raise ValueError("Unsupported truncator: %s" % truncator)
# Combine segments.
segments_combined, segment_ids = text.combine_segments(
trimmed_segments,
start_of_sequence_id=start_of_sequence_id,
end_of_segment_id=end_of_segment_id)
# Pad to dense Tensors.
input_word_ids, _ = text.pad_model_inputs(segments_combined, seq_length,
pad_value=padding_id)
input_type_ids, input_mask = text.pad_model_inputs(segment_ids, seq_length,
pad_value=0)
# Work around broken shape inference.
output_shape = tf.stack([
inputs[0].nrows(out_type=tf.int32), # batch_size
tf.cast(seq_length, dtype=tf.int32)])
def _reshape(t):
return tf.reshape(t, output_shape)
# Assemble nest of input tensors as expected by BERT TransformerEncoder.
return dict(input_word_ids=_reshape(input_word_ids),
input_mask=_reshape(input_mask),
input_type_ids=_reshape(input_type_ids))
class FastWordpieceBertTokenizer(tf.keras.layers.Layer):
"""A bert tokenizer keras layer using text.FastWordpieceTokenizer.
See details: "Fast WordPiece Tokenization" (https://arxiv.org/abs/2012.15524)
"""
def __init__(self,
*,
vocab_file: str,
lower_case: bool,
tokenize_with_offsets: bool = False,
**kwargs):
"""Initializes a FastWordpieceBertTokenizer layer.
Args:
vocab_file: A Python string with the path of the vocabulary file. This is
a text file with newline-separated wordpiece tokens. This layer loads
a list of tokens from it to create text.FastWordpieceTokenizer.
lower_case: A Python boolean forwarded to text.BasicTokenizer. If true,
input text is converted to lower case (where applicable) before
tokenization. This must be set to match the way in which the vocab_file
was created.
tokenize_with_offsets: A Python boolean. If true, this layer calls
FastWordpieceTokenizer.tokenize_with_offsets() instead of plain
.tokenize() and outputs a triple of (tokens, start_offsets,
limit_offsets) insead of just tokens.
**kwargs: standard arguments to Layer().
"""
super().__init__(**kwargs)
logging.info("Initialize a FastWordpieceBertTokenizer.")
self.tokenize_with_offsets = tokenize_with_offsets
self._basic_tokenizer = bert_tokenizer.BasicTokenizer(lower_case=lower_case)
# Read the vocab file into a list of tokens to create `fast_wp_tokenizer`.
self._vocab = [line.rstrip() for line in tf.io.gfile.GFile(vocab_file)]
self._fast_wp_tokenizer = text.FastWordpieceTokenizer(
vocab=self._vocab, token_out_type=tf.int32, no_pretokenization=True)
self._special_tokens_dict = self._create_special_tokens_dict()
@property
def vocab_size(self):
return len(self._vocab)
def get_config(self):
# Skip in tf.saved_model.save(); fail if called direcly.
# We cannot just put the original, user-supplied vocab file name into
# the config, because the path has to change as the SavedModel is copied
# around.
raise NotImplementedError("Not implemented yet.")
def get_special_tokens_dict(self):
"""Returns dict of token ids, keyed by standard names for their purpose.
Returns:
A dict from Python strings to Python integers. Each key is a standard
name for a special token describing its use. (For example, "padding_id"
is what BERT traditionally calls "[PAD]" but others may call "<pad>".)
The corresponding value is the integer token id. If a special token
is not found, its entry is omitted from the dict.
The supported keys and tokens are:
* start_of_sequence_id: looked up from "[CLS]"
* end_of_segment_id: looked up from "[SEP]"
* padding_id: looked up form "[PAD]"
* mask_id: looked up from "[MASK]"
* vocab_size: one past the largest token id used
"""
return self._special_tokens_dict
def _create_special_tokens_dict(self):
"""Creates dict of token ids, keyed by standard names for their purpose."""
special_tokens = {"vocab_size": self.vocab_size}
def add_special_token(key, token):
try:
token_id = self._vocab.index(token)
special_tokens[key] = token_id
except ValueError:
# Similar as nlp.modeling.layers.BertTokenizer, if a special token
# is not found, its entry is omitted from the dict.
logging.warning("Could not find %s as token \"%s\" in vocab file", key,
token)
add_special_token("start_of_sequence_id", "[CLS]")
add_special_token("end_of_segment_id", "[SEP]")
add_special_token("padding_id", "[PAD]")
add_special_token("mask_id", "[MASK]")
return special_tokens
def _tokenize_with_offsets(self, text_input: tf.Tensor):
tokens, begin, _ = self._basic_tokenizer.tokenize_with_offsets(text_input)
wordpieces, wp_begin, wp_end = (
self._fast_wp_tokenizer.tokenize_with_offsets(tokens))
begin_expanded = tf.expand_dims(begin, axis=2)
final_begin = begin_expanded + wp_begin
final_end = begin_expanded + wp_end
return wordpieces, final_begin, final_end
def _tokenize(self, text_input: tf.Tensor):
tokens = self._basic_tokenizer.tokenize(text_input)
return self._fast_wp_tokenizer.tokenize(tokens)
def call(self, inputs: tf.Tensor):
"""Calls text.BertTokenizer on inputs.
Args:
inputs: A string Tensor of shape [batch_size].
Returns:
One or three of RaggedTensors if tokenize_with_offsets is False or True,
respectively. These are
tokens: A RaggedTensor of shape [batch_size, (words), (pieces_per_word)]
and type int32. tokens[i,j,k] contains the k-th wordpiece of the
j-th word in the i-th input.
start_offsets, limit_offsets: If tokenize_with_offsets is True,
RaggedTensors of type int64 with the same indices as tokens.
Element [i,j,k] contains the byte offset at the start, or past the
end, resp., for the k-th wordpiece of the j-th word in the i-th input.
"""
# Prepare to reshape the result to work around broken shape inference.
batch_size = tf.shape(inputs)[0]
def _reshape(rt):
values = rt.values
row_splits = rt.row_splits
row_splits = tf.reshape(row_splits, [batch_size + 1])
return tf.RaggedTensor.from_row_splits(values, row_splits)
if self.tokenize_with_offsets:
tokens, start_offsets, limit_offsets = self._tokenize_with_offsets(inputs)
return _reshape(tokens), _reshape(start_offsets), _reshape(limit_offsets)
else:
tokens = self._tokenize(inputs)
return _reshape(tokens)
| 32,516 | 43.001353 | 80 | py |
models | models-master/official/nlp/modeling/layers/transformer_encoder_block_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-based transformer block layer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers.transformer_encoder_block import TransformerEncoderBlock
@parameterized.named_parameters(('base', TransformerEncoderBlock))
class TransformerEncoderBlockLayerTest(
tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(TransformerEncoderBlockLayerTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy('float32')
def test_layer_creation(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_creation_with_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_invocation(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# Create a model from the test layer.
model = tf.keras.Model(data_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
_ = model.predict(input_data)
def test_layer_invocation_with_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_layer_output_range(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu')
_ = new_layer([input_data, mask_data], output_range=1)
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer([input_data, mask_data], output_range=1)
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
output_tensor = test_layer([input_data, mask_data], output_range=1)
self.assertAllClose(new_output_tensor, output_tensor, atol=5e-5, rtol=0.003)
def test_layer_output_range_without_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
norm_first=True)
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
output_tensor = test_layer(input_data)
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
norm_first=True)
_ = new_layer(input_data, output_range=1)
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer(input_data, output_range=1)
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
def test_layer_output_range_with_pre_norm(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
norm_first=True)
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
norm_first=True)
_ = new_layer([input_data, mask_data], output_range=1)
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer([input_data, mask_data], output_range=1)
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
output_tensor = test_layer([input_data, mask_data], output_range=1)
self.assertAllClose(new_output_tensor, output_tensor, atol=5e-5, rtol=0.003)
def test_layer_invocation_with_float16_dtype(self, transformer_cls):
tf.keras.mixed_precision.set_global_policy('mixed_float16')
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = (10 * np.random.random_sample(
(batch_size, sequence_length, width)))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_transform_with_initializer(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list())
def test_dynamic_layer_sequence(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Create a 3-dimensional input (the first dimension is implicit).
width = 30
input_tensor = tf.keras.Input(shape=(None, width))
output_tensor = test_layer(input_tensor)
model = tf.keras.Model(input_tensor, output_tensor)
input_length = 17
input_data = np.ones((1, input_length, width))
output_data = model.predict(input_data)
self.assertAllEqual([1, input_length, width], output_data.shape)
def test_separate_qkv(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=2,
inner_dim=128,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Forward path.
q_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 16], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
inputs = [q_tensor, kv_tensor, dummy_mask]
output = test_layer(inputs)
self.assertEqual(output.shape, q_tensor.shape)
class TransformerEncoderBlockLayerTestWithoutParams(
tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(TransformerEncoderBlockLayerTestWithoutParams, self).tearDown()
tf.keras.mixed_precision.set_global_policy('float32')
def test_raises_invalid_arg_error_when_q_kv_dims_are_different(self):
test_layer = TransformerEncoderBlock(
num_attention_heads=2,
inner_dim=128,
inner_activation='relu',
norm_first=True)
# Forward path.
q_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 32], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
inputs = [q_tensor, kv_tensor, dummy_mask]
with self.assertRaises(tf.errors.InvalidArgumentError):
test_layer(inputs)
@parameterized.named_parameters(('output_range_not_none', 2),
('output_range_none', None))
def test_needs_diff_q_kv_att_layer_norm_to_be_true_for_diff_q_and_kv_dims(
self, output_range):
test_layer = TransformerEncoderBlock(
num_attention_heads=2,
inner_dim=128,
inner_activation='relu',
norm_first=True)
# Forward path.
q_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 32], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
inputs = [q_tensor, kv_tensor, dummy_mask]
with self.assertRaises(tf.errors.InvalidArgumentError):
test_layer(inputs, output_range=output_range)
test_layer = TransformerEncoderBlock(
num_attention_heads=2,
inner_dim=128,
inner_activation='relu',
diff_q_kv_att_layer_norm=True,
norm_first=True)
# Forward path.
test_layer(inputs)
@parameterized.named_parameters(('norm_first_is_true', True),
('norm_first_is_false', False))
def test_use_query_residual_false_removes_add_op(self, norm_first):
graph_with_res = tf.Graph()
with graph_with_res.as_default():
layer = TransformerEncoderBlock(
num_attention_heads=2,
inner_dim=128,
inner_activation='relu',
norm_first=norm_first)
inputs = tf.keras.Input(shape=(None, None, 2))
outputs = layer(inputs)
tf.keras.Model(inputs=inputs, outputs=outputs)
graph_without_res = tf.Graph()
with graph_without_res.as_default():
layer = TransformerEncoderBlock(
num_attention_heads=2,
inner_dim=128,
inner_activation='relu',
norm_first=norm_first,
use_query_residual=False)
inputs = tf.keras.Input(shape=(None, None, 2))
outputs = layer(inputs)
tf.keras.Model(inputs=inputs, outputs=outputs)
graph_with_res_names = {x.name for x in graph_with_res.get_operations()}
graph_without_res_names = {
x.name for x in graph_without_res.get_operations()
}
self.assertIn('transformer_encoder_block/add',
list(graph_with_res_names - graph_without_res_names)[0])
self.assertEmpty(graph_without_res_names - graph_with_res_names)
@parameterized.named_parameters(('key_dim_is_none', None, 128, 2, 128 // 2),
('key_dim_is_not_none', 30, 128, 2, 30))
def test_key_dim(self, key_dim, q_tensor_last_dim, some_num_attention_heads,
expected):
some_inner_dim = 32
some_inner_activation = 'relu'
test_layer = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
key_dim=key_dim)
q_tensor = tf.zeros([2, 4, q_tensor_last_dim], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 32], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
test_layer([q_tensor, kv_tensor, dummy_mask])
self.assertEqual(expected,
test_layer._attention_layer.get_config()['key_dim'])
@parameterized.named_parameters(
('output_last_dim_is_none_use_query_residual_false', False, None, 128,
128),
('output_last_dim_is_none_use_query_residual_true', True, None, 128, 128),
('output_last_dim_is_not_none', False, 30, 128, 30))
def test_output_last_dim(self, use_query_residual, output_last_dim,
q_tensor_last_dim, expected):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
test_layer = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
# Must be false for multi-head output to be different from
# first input's last dim
use_query_residual=use_query_residual,
output_last_dim=output_last_dim)
q_tensor = tf.zeros([2, 4, q_tensor_last_dim], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 32], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
output = test_layer([q_tensor, kv_tensor, dummy_mask])
self.assertEqual(output.numpy().shape[-1], expected)
@parameterized.named_parameters(('value_dim_is_none', None, 128, 2, 128 // 2),
('value_dim_is_not_none', 30, 128, 2, 30))
def test_value_dim(self, value_dim, q_tensor_last_dim,
some_num_attention_heads, expected):
some_inner_dim = 32
some_inner_activation = 'relu'
test_layer = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
value_dim=value_dim)
q_tensor = tf.zeros([2, 4, q_tensor_last_dim], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 32], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
test_layer([q_tensor, kv_tensor, dummy_mask])
self.assertEqual(expected,
test_layer._attention_layer.get_config()['value_dim'])
class TransformerArgumentTest(tf.test.TestCase, parameterized.TestCase):
def test_use_bias_norm_first(self):
num_attention_heads = 2
hidden_size = 16
encoder_block = TransformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=32,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
attention_initializer=tf.keras.initializers.RandomUniform(
minval=0., maxval=1.))
# Forward path.
dummy_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 4], dtype=tf.float32)
inputs = [dummy_tensor, dummy_mask]
output = encoder_block(inputs)
self.assertEqual(output.shape, (2, 4, hidden_size))
def test_norm_first_false_and_diff_q_kv_att_layer_norm_true_raises(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
with self.assertRaises(ValueError):
TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
norm_first=False,
diff_q_kv_att_layer_norm=True)
def test_diff_q_kv_att_layer_norm_is_part_of_config_1(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
norm_first=False)
self.assertIn('diff_q_kv_att_layer_norm', encoder.get_config())
self.assertFalse(encoder.get_config()['diff_q_kv_att_layer_norm'])
def test_diff_q_kv_att_layer_norm_is_part_of_config_2(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
norm_first=True,
diff_q_kv_att_layer_norm=True)
self.assertIn('diff_q_kv_att_layer_norm', encoder.get_config())
self.assertTrue(encoder.get_config()['diff_q_kv_att_layer_norm'])
def test_use_query_residual_is_part_of_config_1(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation)
self.assertIn('use_query_residual', encoder.get_config())
self.assertTrue(encoder.get_config()['use_query_residual'])
def test_use_query_residual_is_part_of_config_2(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
use_query_residual=False)
self.assertIn('use_query_residual', encoder.get_config())
self.assertFalse(encoder.get_config()['use_query_residual'])
def test_key_dim_is_part_of_config_1(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation)
self.assertIn('key_dim', encoder.get_config())
self.assertIsNone(encoder.get_config()['key_dim'])
def test_key_dim_is_part_of_config_2(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
key_dim = 10
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
key_dim=key_dim)
self.assertIn('key_dim', encoder.get_config())
self.assertEqual(key_dim, encoder.get_config()['key_dim'])
def test_value_dim_is_part_of_config_1(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation)
self.assertIn('value_dim', encoder.get_config())
self.assertIsNone(encoder.get_config()['value_dim'])
def test_value_dim_is_part_of_config_2(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
value_dim = 10
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
value_dim=value_dim)
self.assertIn('value_dim', encoder.get_config())
self.assertEqual(value_dim, encoder.get_config()['value_dim'])
def test_output_last_dim_is_part_of_config_1(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation)
self.assertIn('output_last_dim', encoder.get_config())
self.assertIsNone(encoder.get_config()['output_last_dim'])
def test_output_last_dim_is_part_of_config_2(self):
some_num_attention_heads = 2
some_inner_dim = 32
some_inner_activation = 'relu'
output_last_dim = 10
encoder = TransformerEncoderBlock(
num_attention_heads=some_num_attention_heads,
inner_dim=some_inner_dim,
inner_activation=some_inner_activation,
output_last_dim=output_last_dim)
self.assertIn('output_last_dim', encoder.get_config())
self.assertEqual(output_last_dim, encoder.get_config()['output_last_dim'])
def test_get_config(self):
num_attention_heads = 2
encoder_block = TransformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=32,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
attention_initializer=tf.keras.initializers.RandomUniform(
minval=0., maxval=1.),
use_query_residual=False,
key_dim=20,
value_dim=30,
output_last_dim=40,
diff_q_kv_att_layer_norm=True)
encoder_block_config = encoder_block.get_config()
new_encoder_block = TransformerEncoderBlock.from_config(
encoder_block_config)
self.assertEqual(encoder_block_config, new_encoder_block.get_config())
@parameterized.parameters({'attention_axes': None}, {'attention_axes': [1]},
{'attention_axes': [2]}, {'attention_axes': [1, 2]})
def test_several_attention_axes(self, attention_axes):
test_layer = TransformerEncoderBlock(
inner_dim=32,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
num_attention_heads=10,
attention_axes=attention_axes)
num_rows = 21
num_cols = 13
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(num_rows, num_cols, width))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
@parameterized.parameters(
{
'output_dropout': 0.1,
'attention_dropout': 0.2,
'inner_dropout': 0.3
}, {
'output_dropout': 0.0,
'attention_dropout': 0.2,
'inner_dropout': 0.3
}, {
'output_dropout': 0.1,
'attention_dropout': 0.0,
'inner_dropout': 0.3
}, {
'output_dropout': 0.1,
'attention_dropout': 0.2,
'inner_dropout': 0.0
})
def test_dropout_config(self, output_dropout, attention_dropout,
inner_dropout):
test_layer = TransformerEncoderBlock(
num_attention_heads=2,
inner_dim=32,
inner_activation='relu',
output_dropout=output_dropout,
attention_dropout=attention_dropout,
inner_dropout=inner_dropout)
seq_len = 21
hidden_size = 512
input_tensor = tf.keras.Input(shape=(seq_len, hidden_size))
_ = test_layer(input_tensor)
true_output_dropout = test_layer._output_dropout.get_config()['rate']
true_attention_dropout = test_layer._attention_dropout.get_config()['rate']
true_inner_dropout = test_layer._inner_dropout_layer.get_config()['rate']
self.assertEqual(true_output_dropout, output_dropout)
self.assertEqual(true_attention_dropout, attention_dropout)
self.assertEqual(true_inner_dropout, inner_dropout)
@parameterized.named_parameters(
(
'return_attention_scores_is_false',
False,
),
(
'return_attention_scores_is_true',
True,
),
)
def test_return_attention_scores(self, return_attention_scores):
num_attention_heads = 7
sequence_length = 21
width = 80
test_layer = TransformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=2048,
inner_activation='relu',
return_attention_scores=return_attention_scores)
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
expected_layer_output_shape = [None, sequence_length, width]
expected_attention_scores_shape = [
None, num_attention_heads, sequence_length, sequence_length
]
if return_attention_scores:
self.assertIsInstance(output_tensor, tuple)
self.assertLen(output_tensor, 2)
# First is the standard output.
self.assertEqual(output_tensor[0].shape.as_list(),
expected_layer_output_shape)
# Second is the attention scores.
self.assertEqual(output_tensor[1].shape.as_list(),
expected_attention_scores_shape)
else:
# Only the standard layer output.
self.assertEqual(output_tensor.shape.as_list(),
expected_layer_output_shape)
if __name__ == '__main__':
tf.test.main()
| 27,534 | 38.561782 | 90 | py |
models | models-master/official/nlp/modeling/layers/moe.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mixture of Experts layers and their routing mechanisms."""
import dataclasses
from typing import Callable, Optional, Tuple
import tensorflow as tf
from official.modeling import tf_utils
_InitializerType = tf.keras.initializers.Initializer
_DEFAULT_KERNEL_INITIALIZER = tf.keras.initializers.TruncatedNormal(stddev=2e-2)
_DEFAULT_BIAS_INITIALIZER = tf.keras.initializers.Zeros()
################## Routers (gating functions) ##################
def _router_z_loss(router_logits: tf.Tensor) -> float:
"""Computes router z-loss.
The router z-loss was introduced in Designing Effective Sparse Expert Models
(https://arxiv.org/abs/2202.08906). It encourages router logits to remain
small in an effort to improve stability.
Args:
router_logits: <float32>[num_groups, tokens_per_group, num_experts] router
logits.
Returns:
Scalar router z-loss <float32>.
"""
num_groups = tf.shape(router_logits)[0]
tokens_per_group = router_logits.shape[1]
log_z = tf.math.reduce_logsumexp(router_logits, axis=-1)
z_loss = log_z**2
return tf.math.reduce_sum(z_loss) / tf.cast(
num_groups * tokens_per_group, tf.float32)
@dataclasses.dataclass
class RouterMask:
"""Dispatch and combine arrays for expert routing with masked matmuls.
Attributes:
dispatch_mask:
<float>[num_groups, tokens_per_group, num_experts, expert_capacity]
dispatch array that is 1 if the token gets routed to the
corresponding expert, and 0 otherwise.
combine_array:
<float>[num_groups, tokens_per_group, num_experts, expert_capacity]
combine array used for combining expert outputs and
scaling with router probability.
"""
dispatch_mask: tf.Tensor
combine_array: tf.Tensor
RouterOutput = RouterMask
class Router(tf.keras.layers.Layer):
"""Abstract base router class, defining router API and inner workings.
Computations are performed in float32 for stability, and returned after
conversion according to the precision policy. See the discussion of
"selective precision" in https://arxiv.org/abs/2101.03961.
Uses Keras add_loss() and add_metric() APIs.
Attributes:
num_experts: Number of experts, used to check consistency with
FeedForwardExperts.
jitter_noise: Amplitude of jitter noise applied to router logits.
router_weights: Dense layer that computes logits for all tokens, which are
then used as expert or token weights.
"""
def __init__(
self,
num_experts: int,
*,
jitter_noise: float = 0.0,
use_bias: bool = True,
kernel_initializer: _InitializerType = _DEFAULT_KERNEL_INITIALIZER,
bias_initializer: _InitializerType = _DEFAULT_BIAS_INITIALIZER,
router_z_loss_weight: float = 0.0,
export_metrics: bool = True,
name: str = "router",
**kwargs):
"""Init.
Args:
num_experts: Number of experts.
jitter_noise: Amplitude of jitter noise applied to router logits.
use_bias: Whether or not to use the bias term in computing the router
weights.
kernel_initializer: Kernel initializer for router weights.
bias_initializer: Bias initializer for router weights.
router_z_loss_weight: Weight for router_z_loss. Use non-zero values if
running into training instability (esp. with dtype 'bfloat16' or lower).
export_metrics: Whether to export metrics using Keras add_metric API.
name: Layer name.
**kwargs: Forwarded to super.
"""
super().__init__(name=name, **kwargs)
self.num_experts = num_experts # Used to check consistency with
# FeedForwardExperts.
self.jitter_noise = jitter_noise
self.router_z_loss_weight = router_z_loss_weight
self._export_metrics = export_metrics
self.router_weights = tf.keras.layers.Dense(
num_experts,
use_bias=use_bias,
kernel_initializer=tf_utils.clone_initializer(kernel_initializer),
bias_initializer=tf_utils.clone_initializer(bias_initializer),
name="router_weights",
dtype=tf.float32)
def call(self,
inputs: tf.Tensor,
*,
expert_capacity: int,
training: Optional[bool] = None) -> RouterOutput:
"""Computes dispatch and combine arrays for routing to experts.
Args:
inputs: Inputs to send to experts of shape
<float>[num_groups, tokens_per_group, hidden_dim].
expert_capacity: Each group will send this many tokens to each expert.
training: If true, apply jitter noise during routing. If not provided
taken from tf.keras.backend.
Returns:
Router indices or mask arrays (depending on router type).
"""
if training is None:
training = tf.keras.backend.learning_phase()
# inputs shape <float>[num_groups, tokens_per_group, hidden_dim]
router_probs, router_logits = self._compute_router_probabilities(
inputs, apply_jitter=training)
# router_probs <float32>[num_groups, tokens_per_group, num_experts]
# router_logits <float>[num_groups, tokens_per_group, num_experts]
unscaled_router_z_loss = _router_z_loss(router_logits)
router_z_loss = self.router_z_loss_weight * unscaled_router_z_loss
self.add_loss(router_z_loss)
if self._export_metrics:
self.add_metric(unscaled_router_z_loss, name="unscaled_router_z_loss")
self.add_metric(router_z_loss, name="router_z_loss")
routing_instructions = self._compute_routing_instructions(
router_probs, expert_capacity)
return routing_instructions
def _compute_router_probabilities(
self, inputs: tf.Tensor,
apply_jitter: bool) -> Tuple[tf.Tensor, tf.Tensor]:
"""Computes router probabilities from input tokens.
Args:
inputs: Inputs from which router probabilities are computed, shape
<float>[num_groups, tokens_per_group, hidden_dim].
apply_jitter: If true, apply jitter noise.
Returns:
- <float32>[num_groups, tokens_per_group, num_experts] probabilities for
each token and expert. Used for routing tokens to experts.
- <float32>[num_groups, tokens_per_group, num_experts] raw router logits.
Used for computing router z-loss.
"""
if apply_jitter and self.jitter_noise > 0:
inputs *= tf.random.uniform(
tf.shape(inputs),
minval=1.0 - self.jitter_noise,
maxval=1.0 + self.jitter_noise,
dtype=inputs.dtype)
# inputs <float>, router_logits <float32>
router_logits = self.router_weights(inputs)
router_probs = tf.keras.activations.softmax(router_logits, axis=-1)
return router_probs, router_logits
def _compute_routing_instructions(self, router_probs: tf.Tensor,
expert_capacity: int) -> RouterOutput:
"""Computes instructions for routing inputs to experts."""
raise NotImplementedError(
"Router is an abstract class that should be subclassed.")
class MaskedRouter(Router):
"""Abstract base router class for masked matmul dispatch routers.
MaskedRouter(s) return RouterMask(s) containing a dispatch mask and combine
array for sending and receiving (via masked matmuls) inputs and outputs to and
from experts.
Routing using masked matmuls is generally faster than scatter-based routing on
TPUs.
Uses Keras add_loss() and add_metric() APIs.
"""
def _compute_routing_instructions(self, router_probs: tf.Tensor,
expert_capacity: int) -> RouterMask:
"""Computes masks for the top-k experts per token.
Args:
router_probs: <float32>[num_groups, tokens_per_group, num_experts]
probabilities used to determine the routing of tokens to the experts.
expert_capacity: Each group will send this many tokens to each expert.
Returns:
Router mask arrays.
"""
raise NotImplementedError(
"MaskedRouter is an abstract class that should be subclassed.")
class ExpertsChooseMaskedRouter(MaskedRouter):
"""Masked matmul router using experts choose tokens assignment.
This router uses the same mechanism as in Mixture-of-Experts with Expert
Choice (https://arxiv.org/abs/2202.09368): each expert selects its top
expert_capacity tokens. An individual token may be processed by multiple
experts or none at all.
Note: "experts choose routing" should not be used in decoder blocks because it
breaks the autoregressive behavior, leading to a mismatch between training
(teacher forcing) and inference (autoregressive decoding).
Uses Keras add_loss() and add_metric() APIs.
"""
def _compute_routing_instructions(self, router_probs: tf.Tensor,
expert_capacity: int) -> RouterMask:
"""Computes masks for the highest probability token per expert.
Args:
router_probs: <float32>[num_groups, tokens_per_group, num_experts]
probabilities used to determine the routing of tokens to the experts.
expert_capacity: Each group will send this many tokens to each expert.
Returns:
Dispatch and combine arrays for routing with masked matmuls.
"""
num_groups = tf.shape(router_probs)[0]
tokens_per_group = router_probs.shape[1]
router_probs_t = tf.transpose(router_probs, perm=[0, 2, 1])
# router_probs_t: <float32>[num_groups, num_experts, tokens_per_group]
# Top expert_capacity router probability and corresponding token indices for
# each expert.
# Shapes [num_groups, num_experts, expert_capacity]
_, expert_index = tf.math.top_k(
router_probs_t, k=expert_capacity, sorted=False)
# Convert to one-hot mask of expert indices for each token in each group.
# Shape: [num_groups, tokens_per_group, num_experts, expert_capacity].
dispatch_mask = tf.one_hot(
expert_index, tokens_per_group, axis=1, dtype=router_probs.dtype)
# The combine array will be used for combining expert outputs, scaled by the
# router probabilities.
# Shape: [num_groups, num_experts, tokens_per_group, expert_capacity]
combine_array = tf.expand_dims(router_probs, axis=3) * dispatch_mask
# Add load balancing loss.
# Each expert is choosing tokens until it reaches full capacity, so we don't
# need an auxiliary loading balancing loss for expert choice routing.
if self._export_metrics:
self.add_metric(0.0, name="load_balancing_loss")
# Gather expert metrics.
# Number of tokens that were dispatched to at least one expert.
num_tokens = num_groups * tokens_per_group
num_tokens_dispatched_somewhere = tf.math.reduce_sum(tf.math.reduce_max(
dispatch_mask, axis=(-1, -2)))
fraction_tokens_left_behind = 1.0 - tf.cast(
num_tokens_dispatched_somewhere, tf.float32) / tf.cast(
num_tokens, tf.float32)
# Total number of tokens that were dispatched (one token could be
# dispatched to multiple experts).
num_tokens_dispatched = tf.math.reduce_sum(dispatch_mask)
# Of the tokens dispatched, how confident was the router in its routing?
router_confidence = tf.math.reduce_sum(
combine_array) / num_tokens_dispatched
expert_usage = 1.0 # Experts fully utilized when "expert choose tokens"
self.add_metric(fraction_tokens_left_behind,
name="fraction_tokens_left_behind")
self.add_metric(router_confidence, name="router_confidence")
self.add_metric(expert_usage, name="expert_usage")
# Return to default dtype now that router computation is complete.
dispatch_mask = tf.cast(dispatch_mask, self.compute_dtype)
combine_array = tf.cast(combine_array, self.compute_dtype)
output = RouterMask(dispatch_mask, combine_array)
return output
################## Model layers ##################
class FeedForward(tf.keras.layers.Layer):
"""Feed-forward layer - position independent, dense, nonlinear transformation.
Typically used in an MLP Transformer block.
"""
def __init__(
self,
d_ff: int,
*,
inner_dropout: float = 0.0,
output_dropout: float = 0.0,
activation: Callable[[tf.Tensor], tf.Tensor] = tf.keras.activations.gelu,
kernel_initializer: _InitializerType = _DEFAULT_KERNEL_INITIALIZER,
bias_initializer: _InitializerType = _DEFAULT_BIAS_INITIALIZER,
name: str = "feed_forward",
**kwargs):
"""Initializes layer.
Args:
d_ff: Dimension of feed-forward layer.
inner_dropout: The dropout probability to be applied after intermediate
activations.
output_dropout: The dropout probability to be applied after output layer.
activation: (Nonlinear) transform applied in layer.
kernel_initializer: Initialization scheme for kernel.
bias_initializer: Initialization scheme for bias.
name: Layer name.
**kwargs: Forwarded to super.
"""
super().__init__(name=name, **kwargs)
self.activation = activation
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.intermediate_layer = tf.keras.layers.Dense(
d_ff,
kernel_initializer=tf_utils.clone_initializer(self.kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self.bias_initializer),
name="intermediate")
self.inner_dropout_layer = tf.keras.layers.Dropout(
inner_dropout)
self.output_dropout_layer = tf.keras.layers.Dropout(output_dropout)
def build(self, input_shape: Tuple[int, int, int]):
"""Creates the input shape dependent output weight variables."""
self.output_layer = tf.keras.layers.Dense(
input_shape[-1],
kernel_initializer=tf_utils.clone_initializer(self.kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self.bias_initializer),
name="output")
def call(self,
inputs: tf.Tensor,
*,
training: Optional[bool] = None) -> tf.Tensor:
"""Applies layer to inputs.
Args:
inputs: Batch of input embeddings, of shape
<float>[batch_size, seq_len, hidden_dim].
training: Only apply dropout during training.
Returns:
Transformed inputs with the same shape as inputs
<float>[batch_size, seq_len, hidden_dim].
"""
x = self.intermediate_layer(inputs)
x = self.activation(x)
x = self.inner_dropout_layer(x, training=training)
x = self.output_layer(x)
x = self.output_dropout_layer(x, training=training)
return x
class FeedForwardExperts(tf.keras.layers.Layer):
"""Feed-forward layer with multiple experts.
Note that call() takes inputs with shape
[num_groups, num_experts, expert_capacity, hidden_dim]
which is different from the usual [batch_size, seq_len, hidden_dim] used by
the FeedForward layer.
The experts are independent FeedForward layers of the
same shape, i.e. the kernel doesn't have shape [hidden_dim, out_dim], but
[num_experts, hidden_dim, out_dim].
"""
def __init__(
self,
num_experts: int,
d_ff: int,
*,
inner_dropout: float = 0.0,
output_dropout: float = 0.0,
activation: Callable[[tf.Tensor], tf.Tensor] = tf.keras.activations.gelu,
kernel_initializer: _InitializerType = _DEFAULT_KERNEL_INITIALIZER,
bias_initializer: _InitializerType = _DEFAULT_BIAS_INITIALIZER,
name: str = "experts",
**kwargs):
"""Initializes layer.
Args:
num_experts: Number of experts (i.e. number of independent feed-forward
blocks).
d_ff: Dimension of feed-forward layer of each expert.
inner_dropout: The dropout probability to be applied after intermediate
activations.
output_dropout: The dropout probability to be applied after output layer.
activation: (Nonlinear) transform applied in layer.
kernel_initializer: Initialization scheme for kernel.
bias_initializer: Initialization scheme for bias.
name: Layer name.
**kwargs: Forwarded to super.
"""
super().__init__(name=name, **kwargs)
self.num_experts = num_experts
self.activation = activation
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.intermediate_layer = tf.keras.layers.EinsumDense(
"gech,ehf->gecf",
output_shape=(self.num_experts, None, d_ff),
bias_axes="ef",
kernel_initializer=tf_utils.clone_initializer(self.kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self.bias_initializer),
name="intermediate")
self.inner_dropout_layer = tf.keras.layers.Dropout(
inner_dropout)
self.output_dropout_layer = tf.keras.layers.Dropout(output_dropout)
def build(self, input_shape: Tuple[int, int, int, int]):
"""Creates the input shape dependent output weight variables."""
if input_shape[1] != self.num_experts:
raise ValueError(
f"Input shape {input_shape} is inconsistent with num_experts "
f"{self.num_experts}.")
self.output_layer = tf.keras.layers.EinsumDense(
"gecf,efh->gech",
output_shape=(self.num_experts, None, input_shape[-1]),
bias_axes="eh",
kernel_initializer=tf_utils.clone_initializer(self.kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self.bias_initializer),
name="output")
def call(self,
inputs: tf.Tensor,
*,
training: Optional[bool] = None) -> tf.Tensor:
"""Applies layer to inputs.
Args:
inputs: Inputs of shape
<float>[num_groups, num_experts, expert_capacity, hidden_dim].
training: Only apply dropout during training.
Returns:
Transformed inputs with the same shape as inputs
<float>[num_groups, num_experts, expert_capacity, hidden_dim].
"""
x = self.intermediate_layer(inputs)
x = self.activation(x)
x = self.inner_dropout_layer(x, training=training)
x = self.output_layer(x)
x = self.output_dropout_layer(x, training=training)
return x
class MoeLayer(tf.keras.layers.Layer):
"""Sparse MoE layer with per-token routing.
In this TF implementation, all experts need to fit onto a single device
allowing for batch parallelism only.
Uses Keras add_loss() and add_metric() APIs.
Attributes:
num_experts: Number of experts (i.e. number of independent feed-forward
blocks).
"""
def __init__(
self,
experts: FeedForwardExperts,
router: MaskedRouter,
*,
train_capacity_factor: float = 1.0,
eval_capacity_factor: float = 1.0,
examples_per_group: float = 1.0,
name: str = "moe",
**kwargs):
"""Init.
Args:
experts: Instance of FeedForwardExperts. Needs to have the same
num_experts as the router.
router: Instance of MaskedRouter to route the tokens to
the different experts.
train_capacity_factor: Scaling factor to increase the expert token
capacity during training. This factor plays an analogous, but slightly
different, role depending on the routing assignment algorithm:
- For "tokens choose" routing, the capacity factor only affects the
maximum number of tokens that an expert will process. It does not
affect how many experts a given token is routed to; see the
num_selected_experts attributes of "tokens choose" routers.
- For "experts choose" routing, because experts always fill their
buffer, increasing the capacity factor will increase the number of
tokens that an expert will process AND will indirectly increase the
number of experts that a given token is routed to.
eval_capacity_factor: As above, but used during evaluation.
examples_per_group: Number of examples to form a group. Router then
performs top_k token selection for each expert on a per group basis.
E.g. when `examples_per_group=4.0`, tokens are assigned to experts in
groups formed from 4 examples. When `examples_per_group=0.5`,
each example is split into 2 groups.
`examples_per_group` must divide the local batch size.
A larger group size will result in slower but more accurate top-k and
sorting computations, whereas a smaller group size will result in faster
but more approximate (and potentially less stable) routing choices.
In practice, we find that imperfect routing choices are tolerable and
recommend choosing a group size on the order of 4096 tokens, although
this number will vary based on model configuration and size.
name: Layer name.
**kwargs: Forwarded to super.
"""
super().__init__(name=name, **kwargs)
self._experts = experts
self._router = router
self.num_experts = experts.num_experts
assert experts.num_experts == router.num_experts
self._train_capacity_factor = train_capacity_factor
self._eval_capacity_factor = eval_capacity_factor
self._examples_per_group = examples_per_group
def call(self,
inputs: tf.Tensor,
*,
training: Optional[bool] = None) -> tf.Tensor:
"""Applies MoeLayer.
Args:
inputs: Batch of input embeddings of shape
<float>[batch_size, seq_length, hidden_dim].
training: Only apply dropout and jitter noise during training. If not
provided taken from tf.keras.backend.
Returns:
Transformed inputs with same shape as inputs:
<float>[batch_size, seq_length, hidden_dim].
Raises:
ValueError if we cannot find a group_size satisfying given requirements.
"""
if training is None:
training = tf.keras.backend.learning_phase()
# inputs shape [batch_size, seq_length, hidden_dim]
batch_size, seq_length, hidden_dim = inputs.shape
if batch_size is not None:
if self._examples_per_group > batch_size:
raise ValueError(
f"examples_per_group={self._examples_per_group} is larger than the "
"number of examples available in the local (per-device) batch_size="
f"{batch_size}. Either decrease examples_per_group or increase the "
"batch_size.")
tokens_per_group = int(seq_length * self._examples_per_group)
if training:
capacity_factor = self._train_capacity_factor
else:
capacity_factor = self._eval_capacity_factor
# Each group will send expert_capacity tokens to each expert.
expert_capacity = int(
round(capacity_factor * tokens_per_group / self.num_experts))
# Reshape batch and sequence/token dimensions for expert routing.
x = tf.reshape(inputs, (-1, tokens_per_group, hidden_dim))
x = self._mask_and_dispatch_to_experts(x, expert_capacity, training)
# Return to original input shape.
x = tf.reshape(x, (-1, seq_length, hidden_dim))
return x
def _mask_and_dispatch_to_experts(self, inputs: tf.Tensor,
expert_capacity: int,
training: bool) -> tf.Tensor:
"""Wraps expert masked routing and dispatching algorithm.
This algorithm takes the following steps:
(1) Compute dispatch mask and combine array using self._router.
(2) Dispatch inputs to experts based on dispatch mask.
(3) Recombine individual expert outputs using combine array.
Args:
inputs: <float>[num_groups, tokens_per_group, hidden_dim] inputs to
send to experts.
expert_capacity: Each group will send this many tokens to each expert.
training: If true, apply jitter noise during routing and dropout
during expert computation.
Returns:
<float>[num_groups, num_tokens_per_group, hidden_dim] outputs from
experts.
"""
# Shape [num_groups, tokens_per_group, num_experts, expert_capacity]
router_mask = self._router(
inputs,
expert_capacity=expert_capacity,
training=training)
# Shape [num_groups, num_experts, expert_capacity, hidden_dim]
expert_inputs = tf.einsum(
"gtec,gth->gech",
router_mask.dispatch_mask,
inputs)
expert_outputs = self._experts(expert_inputs, training=training)
# Shape [num_groups, tokens_per_group, hidden_dim]
combined_outputs = tf.einsum(
"gtec,gech->gth",
router_mask.combine_array,
expert_outputs)
return combined_outputs
class MoeLayerWithBackbone(tf.keras.layers.Layer):
"""Sparse MoE layer plus a FeedForward layer evaluated for all tokens.
Uses Keras add_loss() and add_metric() APIs.
"""
def __init__(
self,
moe: MoeLayer,
backbone_d_ff: int,
*,
inner_dropout: float = 0.0,
output_dropout: float = 0.0,
activation: Callable[[tf.Tensor],
tf.Tensor] = tf.keras.activations.gelu,
kernel_initializer: _InitializerType = _DEFAULT_KERNEL_INITIALIZER,
bias_initializer: _InitializerType = _DEFAULT_BIAS_INITIALIZER,
name: str = "moe_with_backbone",
**kwargs):
"""Init.
Args:
moe: Instance of MoeLayer with experts and router.
backbone_d_ff: Dimension of feed-forward layer of a lightweight backbone,
which is evaluated for all tokens.
inner_dropout: The dropout probability to be applied after intermediate
activations for the backbone.
output_dropout: The dropout probability to be applied after the output
of the backbone.
activation: (Nonlinear) transform applied in the backbone.
kernel_initializer: Initialization scheme for kernels in the backbone.
bias_initializer: Initialization scheme for biases in the backbone.
name: Layer name.
**kwargs: Forwarded to super.
"""
super().__init__(name=name, **kwargs)
self._moe = moe
self._backbone = FeedForward(
backbone_d_ff,
inner_dropout=inner_dropout,
output_dropout=output_dropout,
activation=activation,
kernel_initializer=tf_utils.clone_initializer(kernel_initializer),
bias_initializer=tf_utils.clone_initializer(bias_initializer),
name="backbone")
def call(self,
inputs: tf.Tensor,
*,
training: Optional[bool] = None) -> tf.Tensor:
"""Applies MoeLayerWithBackbone layer.
Args:
inputs: Batch of input embeddings of shape
<float>[batch_size, seq_length, hidden_dim].
training: Only apply dropout and jitter noise during training. If not
provided taken from tf.keras.backend.
Returns:
Transformed inputs with same shape as inputs:
<float>[batch_size, seq_length, hidden_dim].
"""
return self._backbone(
inputs, training=training) + self._moe(
inputs, training=training)
| 27,430 | 36.993075 | 80 | py |
models | models-master/official/nlp/modeling/layers/transformer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based transformer block layer."""
# pylint: disable=g-classes-have-attributes
from absl import logging
import gin
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling.layers import attention
from official.nlp.modeling.layers import multi_channel_attention
from official.nlp.modeling.layers import transformer_encoder_block
from official.nlp.modeling.layers.util import tf_function_if_eager
@tf.keras.utils.register_keras_serializable(package="Text")
class Transformer(transformer_encoder_block.TransformerEncoderBlock):
"""Transformer layer.
This layer implements the Transformer from "Attention Is All You Need".
(https://arxiv.org/abs/1706.03762).
**Warning: this layer is deprecated. Please don't use it. Use the
`TransformerEncoderBlock` layer instead.**
Args:
num_attention_heads: Number of attention heads.
intermediate_size: Size of the intermediate layer.
intermediate_activation: Activation for the intermediate layer.
dropout_rate: Dropout probability for the post-attention and output dropout.
attention_dropout_rate: Dropout probability for within the attention layer.
output_range: the sequence output range, [0, output_range) by slicing the
target sequence. `None` means the target sequence is not sliced.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
use_bias: Whether to enable use_bias in attention layer. If set False,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate dense
layers. If set False, output of attention and intermediate dense layers is
normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
intermediate_dropout: Dropout probability for intermediate_dropout_layer.
attention_initializer: Initializer for kernels of attention layers. If set
`None`, attention layers use kernel_initializer as initializer for kernel.
"""
def __init__(self,
num_attention_heads,
intermediate_size,
intermediate_activation,
dropout_rate=0.0,
attention_dropout_rate=0.0,
output_range=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_bias=True,
norm_first=False,
norm_epsilon=1e-12,
intermediate_dropout=0.0,
attention_initializer=None,
**kwargs):
super().__init__(
num_attention_heads=num_attention_heads,
inner_dim=intermediate_size,
inner_activation=intermediate_activation,
output_dropout=dropout_rate,
attention_dropout=attention_dropout_rate,
output_range=output_range,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
use_bias=use_bias,
norm_first=norm_first,
norm_epsilon=norm_epsilon,
inner_dropout=intermediate_dropout,
attention_initializer=attention_initializer,
**kwargs)
logging.warning("The `Transformer` layer is deprecated. Please directly "
"use `TransformerEncoderBlock`.")
def get_config(self):
return {
"num_attention_heads": self._num_heads,
"intermediate_size": self._inner_dim,
"intermediate_activation": self._inner_activation,
"dropout_rate": self._output_dropout_rate,
"attention_dropout_rate": self._attention_dropout_rate,
"output_range": self._output_range,
"kernel_initializer": tf_utils.serialize_initializer(
self._kernel_initializer, use_legacy_format=True
),
"bias_initializer": tf_utils.serialize_initializer(
self._bias_initializer, use_legacy_format=True
),
"kernel_regularizer": tf_utils.serialize_regularizer(
self._kernel_regularizer, use_legacy_format=True
),
"bias_regularizer": tf_utils.serialize_regularizer(
self._bias_regularizer, use_legacy_format=True
),
"activity_regularizer": tf_utils.serialize_regularizer(
self._activity_regularizer, use_legacy_format=True
),
"kernel_constraint": tf_utils.serialize_constraint(
self._kernel_constraint, use_legacy_format=True
),
"bias_constraint": tf_utils.serialize_constraint(
self._bias_constraint, use_legacy_format=True
),
"use_bias": self._use_bias,
"norm_first": self._norm_first,
"norm_epsilon": self._norm_epsilon,
"intermediate_dropout": self._inner_dropout,
"attention_initializer": tf_utils.serialize_initializer(
self._attention_initializer, use_legacy_format=True
),
}
@tf.keras.utils.register_keras_serializable(package="Text")
@gin.configurable
class CompiledTransformer(Transformer):
@tf_function_if_eager(experimental_compile=True)
def call(self, inputs):
return super().call(inputs)
@tf.keras.utils.register_keras_serializable(package="Text")
class TransformerDecoderBlock(tf.keras.layers.Layer):
"""Single transformer layer for decoder.
It has three sub-layers:
(1) a multi-head self-attention mechanism.
(2) a encoder-decoder attention.
(3) a positionwise fully connected feed-forward network.
Args:
num_attention_heads: Number of attention heads.
intermediate_size: Size of the intermediate layer.
intermediate_activation: Activation for the intermediate layer.
dropout_rate: Dropout probability for the post-attention and output dropout.
attention_dropout_rate: Dropout probability for within the attention layer.
multi_channel_cross_attention: Whether to use `MultiChannelAttention` for
cross-attention between target sequences and source sequences.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
use_bias: Whether to enable use_bias in attention layer. If set False,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate dense
layers. If set False, output of attention and intermediate dense layers is
normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
intermediate_dropout: Dropout probability for intermediate_dropout_layer.
attention_initializer: Initializer for kernels of attention layers. If set
`None`, attention layers use kernel_initializer as initializer for kernel.
"""
def __init__(self,
num_attention_heads,
intermediate_size,
intermediate_activation,
dropout_rate=0.0,
attention_dropout_rate=0.0,
multi_channel_cross_attention=False,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_bias=True,
norm_first=False,
norm_epsilon=1e-12,
intermediate_dropout=0.0,
attention_initializer=None,
**kwargs):
super().__init__(**kwargs)
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.intermediate_activation = tf.keras.activations.get(
intermediate_activation)
self.dropout_rate = dropout_rate
self.attention_dropout_rate = attention_dropout_rate
self.multi_channel_cross_attention = multi_channel_cross_attention
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._intermediate_dropout = intermediate_dropout
if attention_initializer:
self._attention_initializer = tf.keras.initializers.get(
attention_initializer)
else:
self._attention_initializer = tf_utils.clone_initializer(
self._kernel_initializer)
if self.multi_channel_cross_attention:
self._cross_attention_cls = multi_channel_attention.MultiChannelAttention
else:
self._cross_attention_cls = attention.MultiHeadAttention
def build(self, input_shape):
target_tensor_shape = tf.TensorShape(input_shape[0])
if len(target_tensor_shape.as_list()) != 3:
raise ValueError("TransformerLayer expects a three-dimensional input of "
"shape [batch, sequence, width].")
hidden_size = target_tensor_shape[2]
if hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, self.num_attention_heads))
self.attention_head_size = int(hidden_size) // self.num_attention_heads
common_kwargs = dict(
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
# Self attention.
self.self_attention = attention.CachedAttention(
num_heads=self.num_attention_heads,
key_dim=self.attention_head_size,
dropout=self.attention_dropout_rate,
use_bias=self._use_bias,
kernel_initializer=tf_utils.clone_initializer(
self._attention_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
name="self_attention",
**common_kwargs)
self.self_attention_output_dense = tf.keras.layers.EinsumDense(
"abc,cd->abd",
output_shape=(None, hidden_size),
bias_axes="d",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
name="output",
**common_kwargs)
self.self_attention_dropout = tf.keras.layers.Dropout(
rate=self.dropout_rate)
self.self_attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype="float32"))
# Encoder-decoder attention.
self.encdec_attention = self._cross_attention_cls(
num_heads=self.num_attention_heads,
key_dim=self.attention_head_size,
dropout=self.attention_dropout_rate,
output_shape=hidden_size,
use_bias=self._use_bias,
kernel_initializer=tf_utils.clone_initializer(
self._attention_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
name="attention/encdec",
**common_kwargs)
self.encdec_attention_dropout = tf.keras.layers.Dropout(
rate=self.dropout_rate)
self.encdec_attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="attention/encdec_output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype="float32"))
# Feed-forward projection.
self.intermediate_dense = tf.keras.layers.EinsumDense(
"abc,cd->abd",
output_shape=(None, self.intermediate_size),
bias_axes="d",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
name="intermediate",
**common_kwargs)
self.intermediate_activation_layer = tf.keras.layers.Activation(
self.intermediate_activation)
self._intermediate_dropout_layer = tf.keras.layers.Dropout(
rate=self._intermediate_dropout)
self.output_dense = tf.keras.layers.EinsumDense(
"abc,cd->abd",
output_shape=(None, hidden_size),
bias_axes="d",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
name="output",
**common_kwargs)
self.output_dropout = tf.keras.layers.Dropout(rate=self.dropout_rate)
self.output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype="float32")
super().build(input_shape)
def get_config(self):
config = {
"num_attention_heads": self.num_attention_heads,
"intermediate_size": self.intermediate_size,
"intermediate_activation": self.intermediate_activation,
"dropout_rate": self.dropout_rate,
"attention_dropout_rate": self.attention_dropout_rate,
"multi_channel_cross_attention": self.multi_channel_cross_attention,
"kernel_initializer": tf_utils.serialize_initializer(
self._kernel_initializer, use_legacy_format=True
),
"bias_initializer": tf_utils.serialize_initializer(
self._bias_initializer, use_legacy_format=True
),
"kernel_regularizer": tf_utils.serialize_regularizer(
self._kernel_regularizer, use_legacy_format=True
),
"bias_regularizer": tf_utils.serialize_regularizer(
self._bias_regularizer, use_legacy_format=True
),
"activity_regularizer": tf_utils.serialize_regularizer(
self._activity_regularizer, use_legacy_format=True
),
"kernel_constraint": tf_utils.serialize_constraint(
self._kernel_constraint, use_legacy_format=True
),
"bias_constraint": tf_utils.serialize_constraint(
self._bias_constraint, use_legacy_format=True
),
"use_bias": self._use_bias,
"norm_first": self._norm_first,
"norm_epsilon": self._norm_epsilon,
"intermediate_dropout": self._intermediate_dropout,
"attention_initializer": tf_utils.serialize_initializer(
self._attention_initializer, use_legacy_format=True
),
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def common_layers_with_encoder(self):
"""Gets layer objects that can make a Transformer encoder block."""
return [
self.self_attention, self.self_attention_layer_norm,
self.intermediate_dense, self.output_dense, self.output_layer_norm
]
def call(self, inputs, cache=None, decode_loop_step=None):
if self.multi_channel_cross_attention:
if len(inputs) != 5:
raise ValueError(
"TransformerDecoderBlock must have 5 inputs, when it uses "
"multi_channel_cross_attention. But it got: %d" % len(inputs))
elif len(inputs) != 4:
raise ValueError(
"TransformerDecoderBlock must have 4 inputs, but it got: %d" %
len(inputs))
input_tensor, memory, attention_mask, self_attention_mask = inputs[:4]
source_tensor = input_tensor
if self._norm_first:
input_tensor = self.self_attention_layer_norm(input_tensor)
self_attention_output, cache = self.self_attention(
query=input_tensor,
value=input_tensor,
attention_mask=self_attention_mask,
cache=cache,
decode_loop_step=decode_loop_step)
self_attention_output = self.self_attention_dropout(self_attention_output)
if self._norm_first:
self_attention_output = source_tensor + self_attention_output
else:
self_attention_output = self.self_attention_layer_norm(
input_tensor + self_attention_output)
if self._norm_first:
source_self_attention_output = self_attention_output
self_attention_output = self.encdec_attention_layer_norm(
self_attention_output)
cross_attn_inputs = dict(
query=self_attention_output,
value=memory,
attention_mask=attention_mask)
if self.multi_channel_cross_attention:
# Accesses the 5-th input tensor for the doc-attention probabilities.
cross_attn_inputs["context_attention_weights"] = inputs[-1]
attention_output = self.encdec_attention(**cross_attn_inputs)
attention_output = self.encdec_attention_dropout(attention_output)
if self._norm_first:
attention_output = source_self_attention_output + attention_output
else:
attention_output = self.encdec_attention_layer_norm(
self_attention_output + attention_output)
if self._norm_first:
source_attention_output = attention_output
attention_output = self.output_layer_norm(attention_output)
intermediate_output = self.intermediate_dense(attention_output)
intermediate_output = self.intermediate_activation_layer(
intermediate_output)
intermediate_output = self._intermediate_dropout_layer(intermediate_output)
layer_output = self.output_dense(intermediate_output)
layer_output = self.output_dropout(layer_output)
if self._norm_first:
layer_output = source_attention_output + layer_output
else:
layer_output = self.output_layer_norm(layer_output + attention_output)
return layer_output, cache
| 19,414 | 42.925339 | 80 | py |
models | models-master/official/nlp/modeling/layers/block_diag_feedforward_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-based gated feedforward layer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import block_diag_feedforward
# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It
# guarantees forward compatibility of this code for the V2 switchover.
class BlockDiagFeedforwardTest(tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(BlockDiagFeedforwardTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy("float32")
@parameterized.parameters(
(1, True, "float32"),
(1, True, "mixed_float16"),
(1, False, "float32"),
(1, False, "mixed_float16"),
(2, True, "float32"),
(2, True, "mixed_float16"),
(2, False, "float32"),
(2, False, "mixed_float16"),
)
def test_layer_creation(self, num_blocks, apply_mixing, dtype):
tf.keras.mixed_precision.set_global_policy(dtype)
kwargs = dict(
intermediate_size=128,
intermediate_activation="relu",
dropout=0.1,
num_blocks=num_blocks,
apply_mixing=apply_mixing,
kernel_initializer="glorot_uniform",
bias_initializer="zeros")
test_layer = block_diag_feedforward.BlockDiagFeedforward(**kwargs)
sequence_length = 64
width = 128
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
@parameterized.parameters(
(1, True, "float32"),
(1, True, "mixed_float16"),
(1, False, "float32"),
(1, False, "mixed_float16"),
(2, True, "float32"),
(2, True, "mixed_float16"),
(2, False, "float32"),
(2, False, "mixed_float16"),
)
def test_layer_invocation(self, num_blocks, apply_mixing, dtype):
tf.keras.mixed_precision.set_global_policy(dtype)
kwargs = dict(
intermediate_size=16,
intermediate_activation="relu",
dropout=0.1,
num_blocks=num_blocks,
apply_mixing=apply_mixing,
kernel_initializer="glorot_uniform",
bias_initializer="zeros")
test_layer = block_diag_feedforward.BlockDiagFeedforward(**kwargs)
sequence_length = 16
width = 32
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# Create a model from the test layer.
model = tf.keras.Model(data_tensor, output_tensor)
# Invoke the model on test data.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
output_data = model.predict(input_data)
self.assertEqual(output_data.shape, (batch_size, sequence_length, width))
def test_get_config(self):
kwargs = dict(
intermediate_size=16,
intermediate_activation="relu",
dropout=0.1,
num_blocks=2,
apply_mixing=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros")
test_layer = block_diag_feedforward.BlockDiagFeedforward(**kwargs)
new_layer = block_diag_feedforward.BlockDiagFeedforward.from_config(
test_layer.get_config())
self.assertAllEqual(test_layer.get_config(), new_layer.get_config())
if __name__ == "__main__":
tf.test.main()
| 4,171 | 34.355932 | 80 | py |
models | models-master/official/nlp/modeling/layers/position_embedding_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-based positional embedding layer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import position_embedding
class PositionEmbeddingLayerTest(tf.test.TestCase):
def test_static_layer_output_shape(self):
# Create a 3-dimensional input (the first dimension is implicit).
sequence_length = 21
test_layer = position_embedding.PositionEmbedding(
max_length=sequence_length)
width = 30
input_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(input_tensor)
# When using static positional embedding shapes, the output is expected
# to be the same as the input shape in all dimensions save batch.
expected_output_shape = [None, sequence_length, width]
self.assertEqual(expected_output_shape, output_tensor.shape.as_list())
# The default output dtype for this layer should be tf.float32.
self.assertEqual(tf.float32, output_tensor.dtype)
def test_non_default_axis_static(self):
# Create a 3-dimensional input (the first dimension is implicit).
sequence_length = 21
test_layer = position_embedding.PositionEmbedding(
max_length=sequence_length, seq_axis=2)
width = 30
input_tensor = tf.keras.Input(shape=(width, sequence_length, width))
output_tensor = test_layer(input_tensor)
# When using static positional embedding shapes, the output is expected
# to be the same as the input shape in all dimensions save batch.
expected_output_shape = [None, width, sequence_length, width]
self.assertEqual(expected_output_shape, output_tensor.shape.as_list())
# The default output dtype for this layer should be tf.float32.
self.assertEqual(tf.float32, output_tensor.dtype)
def test_float16_dtype(self):
# Create a 3-dimensional input (the first dimension is implicit).
sequence_length = 21
test_layer = position_embedding.PositionEmbedding(
max_length=sequence_length, dtype="float16")
width = 30
input_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(input_tensor)
# When using static positional embedding shapes, the output is expected
# to be the same as the input shape in all dimensions save batch.
expected_output_shape = [None, sequence_length, width]
self.assertEqual(expected_output_shape, output_tensor.shape.as_list())
# The default output dtype for this layer should be tf.float32.
self.assertEqual(tf.float16, output_tensor.dtype)
def test_dynamic_layer_output_shape(self):
max_sequence_length = 40
test_layer = position_embedding.PositionEmbedding(
max_length=max_sequence_length)
# Create a 3-dimensional input (the first dimension is implicit).
width = 30
input_tensor = tf.keras.Input(shape=(None, width))
output_tensor = test_layer(input_tensor)
# When using dynamic positional embedding shapes, the output is expected
# to be the same as the input shape in all dimensions - but may be None if
# the input shape is None there.
expected_output_shape = [None, None, width]
self.assertEqual(expected_output_shape, output_tensor.shape.as_list())
def test_non_default_axis_dynamic(self):
max_sequence_length = 60
test_layer = position_embedding.PositionEmbedding(
max_length=max_sequence_length, seq_axis=2)
# Create a 3-dimensional input (the first dimension is implicit).
width = 30
input_tensor = tf.keras.Input(shape=(None, None, width))
output_tensor = test_layer(input_tensor)
# When using dynamic positional embedding shapes, the output is expected
# to be the same as the input shape in all dimensions - but may be None if
# the input shape is None there.
expected_output_shape = [None, None, None, width]
self.assertEqual(expected_output_shape, output_tensor.shape.as_list())
def test_dynamic_layer_slicing(self):
max_sequence_length = 40
test_layer = position_embedding.PositionEmbedding(
max_length=max_sequence_length)
# Create a 3-dimensional input (the first dimension is implicit).
width = 30
input_tensor = tf.keras.Input(shape=(None, width))
output_tensor = test_layer(input_tensor)
model = tf.keras.Model(input_tensor, output_tensor)
# Create input data that is shorter than max_sequence_length, which should
# trigger a down-slice.
input_length = 17
# Note: This test explicitly uses a batch size of 1. This is to get around
# Keras' restriction on Model invocations: inputs are expected to have the
# same batch cardinality as outputs. In practice, this layer should be used
# inside a model, where it can be projected when added to another tensor.
input_data = np.ones((1, input_length, width))
output_data = model.predict(input_data)
self.assertAllEqual([1, input_length, width], output_data.shape)
class RelativePositionEmbeddingLayerTest(tf.test.TestCase):
def test_relative_tensor_input(self):
hidden_size = 8
test_layer = position_embedding.RelativePositionEmbedding(
hidden_size=hidden_size)
# create a 3-dimensional input for test_layer to infer length as 1.
input_tensor = tf.constant([[[0] * hidden_size]])
output_tensor = test_layer(input_tensor)
# expected output is the theoretical result of the input based on
# sine cosine relative position embedding formula.
expected_output_tensor = tf.constant([[0, 0, 0, 0, 1, 1, 1, 1]])
self.assertAllEqual(output_tensor, expected_output_tensor)
def test_relative_length_input(self):
hidden_size = 8
# When we do not have tensor as input, we explicitly specify length
# value when initializing test_layer.
test_layer = position_embedding.RelativePositionEmbedding(
hidden_size=hidden_size)
input_tensor = None
output_tensor = test_layer(input_tensor, length=1)
# expected output is the theoretical result of the input based on
# sine cosine relative position embedding formula.
expected_output_tensor = tf.constant([[0, 0, 0, 0, 1, 1, 1, 1]])
self.assertAllEqual(output_tensor, expected_output_tensor)
class RelativePositionBiasTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(("bidirectional", True),
("unidirectional", False))
def test_relative_position_bias(self, bidirectional):
query = tf.zeros((4, 4, 2))
key = tf.zeros((4, 2, 2))
l = position_embedding.RelativePositionBias(
num_heads=3,
bidirectional=bidirectional,
name="foo")
self.assertEqual(l(query, key).shape, (4, 3, 4, 2))
self.assertLen(l.trainable_variables, 1)
self.assertEqual(l.trainable_variables[0].name, "foo/rel_embedding:0")
def test_relative_position_bucket(self):
context_position = tf.range(3)[:, None]
memory_position = tf.range(2)[None, :]
relative_position = memory_position - context_position
outputs = position_embedding._relative_position_bucket(relative_position)
self.assertAllEqual(outputs.numpy(), np.array([[0, 17], [1, 0], [2, 1]]))
outputs = position_embedding._relative_position_bucket(
relative_position, bidirectional=False)
self.assertAllEqual(outputs.numpy(), np.array([[0, 0], [1, 0], [2, 1]]))
if __name__ == "__main__":
tf.test.main()
| 8,009 | 41.606383 | 79 | py |
models | models-master/official/nlp/modeling/layers/reuse_attention_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the attention layer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import reuse_attention as attention
class ReuseMultiHeadAttentionTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("key_value_same_proj", None, None, [40, 80]),
("key_value_different_proj", 32, 60, [40, 60]),
)
def test_non_masked_attention(self, value_dim, output_shape, output_dims):
"""Test that the attention layer can be created without a mask tensor."""
test_layer = attention.ReuseMultiHeadAttention(
num_heads=12,
key_dim=64,
value_dim=value_dim,
output_shape=output_shape)
# Create a 3-dimensional input (the first dimension is implicit).
query = tf.keras.Input(shape=(40, 80))
value = tf.keras.Input(shape=(20, 80))
output = test_layer(query=query, value=value)
self.assertEqual(output.shape.as_list(), [None] + output_dims)
def test_non_masked_self_attention(self):
"""Test with one input (self-attenntion) and no mask tensor."""
test_layer = attention.ReuseMultiHeadAttention(
num_heads=12, key_dim=64)
# Create a 3-dimensional input (the first dimension is implicit).
query = tf.keras.Input(shape=(40, 80))
output = test_layer(query, query)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
def test_attention_scores(self):
"""Test attention outputs with coefficients."""
test_layer = attention.ReuseMultiHeadAttention(
num_heads=12, key_dim=64)
# Create a 3-dimensional input (the first dimension is implicit).
query = tf.keras.Input(shape=(40, 80))
output, coef = test_layer(query, query, return_attention_scores=True)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
self.assertEqual(coef.shape.as_list(), [None, 12, 40, 40])
def test_attention_scores_with_values(self):
"""Test attention outputs with coefficients."""
test_layer = attention.ReuseMultiHeadAttention(
num_heads=12, key_dim=64)
# Create a 3-dimensional input (the first dimension is implicit).
query = tf.keras.Input(shape=(40, 80))
value = tf.keras.Input(shape=(60, 80))
output, coef = test_layer(query, value, return_attention_scores=True)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
self.assertEqual(coef.shape.as_list(), [None, 12, 40, 60])
@parameterized.named_parameters(
("with_bias", True, 0), ("no_bias", False, 0),
("reuse_all_with_bias", True, -1), ("reuse_all_no_bias", False, -1),
("reuse_partial_with_bias", True, 1),
("reuse_partial_no_bias", False, 1))
def test_masked_attention(self, use_bias, reuse_attention):
"""Test with a mask tensor."""
test_layer = attention.ReuseMultiHeadAttention(
num_heads=2, key_dim=2, use_bias=use_bias,
reuse_attention=reuse_attention)
# Create a 3-dimensional input (the first dimension is implicit).
batch_size = 3
query = tf.keras.Input(shape=(4, 8))
value = tf.keras.Input(shape=(2, 8))
mask_tensor = tf.keras.Input(shape=(4, 2))
reuse_attention_scores = tf.keras.Input(shape=(2, 4, 2))
output = test_layer(query=query, value=value, attention_mask=mask_tensor,
reuse_attention_scores=reuse_attention_scores)
# Create a model containing the test layer.
model = tf.keras.Model(
[query, value, mask_tensor, reuse_attention_scores], output)
# Generate data for the input (non-mask) tensors.
from_data = 10 * np.random.random_sample((batch_size, 4, 8))
to_data = 10 * np.random.random_sample((batch_size, 2, 8))
reuse_scores = np.random.random_sample((batch_size, 2, 4, 2))
# Invoke the data with a random set of mask data. This should mask at least
# one element.
mask_data = np.random.randint(2, size=(batch_size, 4, 2))
masked_output_data = model.predict(
[from_data, to_data, mask_data, reuse_scores])
# Invoke the same data, but with a null mask (where no elements are masked).
null_mask_data = np.ones((batch_size, 4, 2))
unmasked_output_data = model.predict(
[from_data, to_data, null_mask_data, reuse_scores])
# Because one data is masked and one is not, the outputs should not be the
# same.
if reuse_attention == -1:
self.assertAllEqual(masked_output_data, unmasked_output_data)
else:
self.assertNotAllClose(masked_output_data, unmasked_output_data)
# Tests the layer with three inputs: Q, K, V.
key = tf.keras.Input(shape=(2, 8))
output = test_layer(query, value=value, key=key, attention_mask=mask_tensor,
reuse_attention_scores=reuse_attention_scores)
model = tf.keras.Model(
[query, value, key, mask_tensor, reuse_attention_scores], output)
masked_output_data = model.predict(
[from_data, to_data, to_data, mask_data, reuse_scores])
unmasked_output_data = model.predict(
[from_data, to_data, to_data, null_mask_data, reuse_scores])
# Because one data is masked and one is not, the outputs should not be the
# same.
if reuse_attention == -1:
self.assertAllEqual(masked_output_data, unmasked_output_data)
else:
self.assertNotAllClose(masked_output_data, unmasked_output_data)
if reuse_attention > 0:
self.assertLen(test_layer._output_dense, 2)
if use_bias:
if reuse_attention == 0:
self.assertLen(test_layer._query_dense.trainable_variables, 2)
self.assertLen(test_layer._output_dense[0].trainable_variables, 2)
if len(test_layer._output_dense) == 2:
self.assertLen(test_layer._output_dense[1].trainable_variables, 1)
else:
if reuse_attention == 0:
self.assertLen(test_layer._query_dense.trainable_variables, 1)
self.assertLen(test_layer._output_dense[0].trainable_variables, 1)
if len(test_layer._output_dense) == 2:
self.assertLen(test_layer._output_dense[1].trainable_variables, 1)
def test_initializer(self):
"""Test with a specified initializer."""
test_layer = attention.ReuseMultiHeadAttention(
num_heads=12,
key_dim=64,
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Create a 3-dimensional input (the first dimension is implicit).
query = tf.keras.Input(shape=(40, 80))
output = test_layer(query, query)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
def test_masked_attention_with_scores(self):
"""Test with a mask tensor."""
test_layer = attention.ReuseMultiHeadAttention(
num_heads=2, key_dim=2)
# Create a 3-dimensional input (the first dimension is implicit).
batch_size = 3
query = tf.keras.Input(shape=(4, 8))
value = tf.keras.Input(shape=(2, 8))
mask_tensor = tf.keras.Input(shape=(4, 2))
output = test_layer(query=query, value=value, attention_mask=mask_tensor)
# Create a model containing the test layer.
model = tf.keras.Model([query, value, mask_tensor], output)
# Generate data for the input (non-mask) tensors.
from_data = 10 * np.random.random_sample((batch_size, 4, 8))
to_data = 10 * np.random.random_sample((batch_size, 2, 8))
# Invoke the data with a random set of mask data. This should mask at least
# one element.
mask_data = np.random.randint(2, size=(batch_size, 4, 2))
masked_output_data = model.predict([from_data, to_data, mask_data])
# Invoke the same data, but with a null mask (where no elements are masked).
null_mask_data = np.ones((batch_size, 4, 2))
unmasked_output_data = model.predict([from_data, to_data, null_mask_data])
# Because one data is masked and one is not, the outputs should not be the
# same.
self.assertNotAllClose(masked_output_data, unmasked_output_data)
# Create a model containing attention scores.
output, scores = test_layer(
query=query, value=value, attention_mask=mask_tensor,
return_attention_scores=True)
model = tf.keras.Model([query, value, mask_tensor], [output, scores])
masked_output_data_score, masked_score = model.predict(
[from_data, to_data, mask_data])
unmasked_output_data_score, unmasked_score = model.predict(
[from_data, to_data, null_mask_data])
self.assertNotAllClose(masked_output_data_score, unmasked_output_data_score)
self.assertAllClose(masked_output_data, masked_output_data_score)
self.assertAllClose(unmasked_output_data, unmasked_output_data_score)
self.assertNotAllClose(masked_score, unmasked_score)
@parameterized.named_parameters(
("4d_inputs_1freebatch_mask2", [3, 4], [3, 2], [4, 2],
(2,)), ("4d_inputs_1freebatch_mask3", [3, 4], [3, 2], [3, 4, 2], (2,)),
("4d_inputs_1freebatch_mask4", [3, 4], [3, 2], [3, 2, 4, 2],
(2,)), ("4D_inputs_2D_attention", [3, 4], [3, 2], [3, 4, 3, 2], (1, 2)),
("5D_inputs_2D_attention", [5, 3, 4], [5, 3, 2], [3, 4, 3, 2], (2, 3)),
("5D_inputs_2D_attention_fullmask", [5, 3, 4], [5, 3, 2], [5, 3, 4, 3, 2],
(2, 3)))
def test_high_dim_attention(self, q_dims, v_dims, mask_dims, attention_axes):
"""Test with a mask tensor."""
test_layer = attention.ReuseMultiHeadAttention(
num_heads=2, key_dim=2, attention_axes=attention_axes)
batch_size, hidden_size = 3, 8
# Generate data for the input (non-mask) tensors.
query_shape = [batch_size] + q_dims + [hidden_size]
value_shape = [batch_size] + v_dims + [hidden_size]
mask_shape = [batch_size] + mask_dims
query = 10 * np.random.random_sample(query_shape)
value = 10 * np.random.random_sample(value_shape)
# Invoke the data with a random set of mask data. This should mask at least
# one element.
mask_data = np.random.randint(2, size=mask_shape).astype("bool")
# Invoke the same data, but with a null mask (where no elements are masked).
null_mask_data = np.ones(mask_shape)
# Because one data is masked and one is not, the outputs should not be the
# same.
query_tensor = tf.keras.Input(query_shape[1:], name="query")
value_tensor = tf.keras.Input(value_shape[1:], name="value")
mask_tensor = tf.keras.Input(mask_shape[1:], name="mask")
output = test_layer(query=query_tensor, value=value_tensor,
attention_mask=mask_tensor)
model = tf.keras.Model([query_tensor, value_tensor, mask_tensor], output)
self.assertNotAllClose(
model.predict([query, value, mask_data]),
model.predict([query, value, null_mask_data]))
def test_dropout(self):
test_layer = attention.ReuseMultiHeadAttention(
num_heads=2, key_dim=2, dropout=0.5)
# Generate data for the input (non-mask) tensors.
from_data = tf.keras.backend.ones(shape=(32, 4, 8))
to_data = tf.keras.backend.ones(shape=(32, 2, 8))
train_out = test_layer(from_data, to_data, None, None, None, True)
test_out = test_layer(from_data, to_data, None, None, None, False)
# Output should be close when not in training mode,
# and should not be close when enabling dropout in training mode.
self.assertNotAllClose(
tf.keras.backend.eval(train_out),
tf.keras.backend.eval(test_out))
def test_non_masked_self_attention_with_reuse(self):
"""Test with one input (self-attenntion) and no mask tensor."""
test_layer = attention.ReuseMultiHeadAttention(
num_heads=12, key_dim=64, reuse_attention=True)
# Create a 3-dimensional input (the first dimension is implicit).
query = tf.keras.Input(shape=(40, 80))
reuse_scores = tf.keras.Input(shape=(12, 40, 40))
output = test_layer(query, query, reuse_attention_scores=reuse_scores)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
@parameterized.named_parameters(
("no_reuse_with_pe_max_seq_length_20", False, 20),
("reuse_all_with_pe_max_seq_length_20", True, 20),
("reuse_partial_with_pe_max_seq_length_20", 5, 20),
("no_reuse_with_pe_max_seq_length_40", False, 40),
("reuse_all_with_pe_max_seq_length_40", True, 40),
("reuse_partial_with_pe_max_seq_length_40", 5, 40))
def test_non_masked_self_attention_with_relative_pe(self, reuse_attention,
pe_max_seq_length):
"""Test with one input (self-attenntion) and no mask tensor."""
test_layer = attention.ReuseMultiHeadAttention(
num_heads=12, key_dim=64, reuse_attention=reuse_attention,
use_relative_pe=True, pe_max_seq_length=pe_max_seq_length)
# Create a 3-dimensional input (the first dimension is implicit).
query = tf.keras.Input(shape=(40, 80))
reuse_scores = tf.keras.Input(shape=(12, 40, 40))
output = test_layer(query, query, reuse_attention_scores=reuse_scores)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
query = tf.keras.Input(shape=(30, 80))
reuse_scores = tf.keras.Input(shape=(12, 30, 30))
output = test_layer(query, query, reuse_attention_scores=reuse_scores)
self.assertEqual(output.shape.as_list(), [None, 30, 80])
query = tf.keras.Input(shape=(30, 80))
key = tf.keras.Input(shape=(20, 80))
reuse_scores = tf.keras.Input(shape=(12, 30, 20))
output = test_layer(query, key, reuse_attention_scores=reuse_scores)
self.assertEqual(output.shape.as_list(), [None, 30, 80])
query = tf.keras.Input(shape=(50, 80))
key = tf.keras.Input(shape=(60, 80))
reuse_scores = tf.keras.Input(shape=(12, 50, 60))
output = test_layer(query, key, reuse_attention_scores=reuse_scores)
self.assertEqual(output.shape.as_list(), [None, 50, 80])
if __name__ == "__main__":
tf.test.main()
| 14,319 | 45.95082 | 80 | py |
models | models-master/official/nlp/modeling/layers/util.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based transformer block layer."""
import functools
import tensorflow as tf
class TfFunctionIfEagerDecorator(object):
"""Helper decorator function to optionally apply the @tf.function annotation."""
def __init__(self, **kwargs):
self.func_kwargs = kwargs
def __call__(self, func):
@functools.wraps(func)
def wrapped_func(*args):
# TODO(b/150147476, b/150024785): Fix tf.function in TF1 crash.
if not hasattr(tf.compat.v1, 'executing_eagerly_outside_functions'
) or tf.compat.v1.executing_eagerly_outside_functions():
return tf.function(func=func, **self.func_kwargs)(*args)
return func(*args)
# Cache the created function in self._call_impl.
if not hasattr(self, '_call_impl'):
self._call_impl = wrapped_func
return self._call_impl
def tf_function_if_eager(**kwargs):
"""Applies the @tf.function decorator only if running in eager mode."""
return TfFunctionIfEagerDecorator(**kwargs)
def filter_kwargs(kwargs):
"""In place removes unused options in kwargs.
This function removes the construction signatures: e.g.
number_attention_heads... in TransformerEncoderBlock. This is needed,
otherwise base_layer.py in Keras will complain.
Args:
kwargs: keyword arguments to be filtered.
"""
# This is the union of signatures of TransformerEncoderBlock and
# ReZeroTransformer. Every Transformer
# block that uses compatible signature with TransformerEncoderBlock should
# call this function before base constructor super().__init__(**kwargs).
denylist = [
'num_attention_heads', 'intermediate_size', 'intermediate_activation',
'inner_dim', 'inner_activation', 'output_range', 'kernel_initializer',
'bias_initializer', 'kernel_regularizer', 'bias_regularizer',
'activity_regularizer', 'kernel_constraint', 'bias_constraint',
'use_bias', 'norm_first', 'norm_epsilon', 'output_dropout',
'attention_dropout', 'inner_dropout', 'attention_initializer',
'attention_axes', 'share_rezero'
]
for unused_key in denylist:
kwargs.pop(unused_key, None)
| 2,723 | 36.315068 | 82 | py |
models | models-master/official/nlp/modeling/layers/relative_attention_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the attention layer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from official.nlp.modeling.layers import relative_attention
def _create_mock_attention_data(
num_heads,
key_dim,
value_dim,
seq_length,
batch_size,
memory_length=0,
num_predictions=2,
two_stream=False,
include_state=False,
include_mask=False,
include_segment=False):
"""Creates mock testing data.
Args:
num_heads: `int`, Number of attention heads.
key_dim: `int`, Size of query head.
value_dim: `int`, Size of key, value dim.
seq_length: `int`, Sequence length of the input.
batch_size: `int`, the batch size.
memory_length: optional `int`, the length of the state. Defaults to 0.
num_predictions: `int`, the number of predictions used in two stream
attention.
two_stream: `bool`, whether or not to generate two stream data.
include_state: optional `bool`, whether or not to include state data.
include_mask: optional `bool`, whether or not to include mask data.
include_segment: optional `bool`, whether or not to include segment data.
Returns:
A dictionary with `str` as keys and `Tensor` as values.
"""
query_shape = (batch_size, seq_length, key_dim)
value_shape = (batch_size, seq_length, value_dim)
encoding_shape = (batch_size, seq_length * 2, key_dim)
attention_bias_shape = (num_heads, key_dim)
data = dict(
relative_position_encoding=tf.random.normal(shape=encoding_shape),
content_attention_bias=tf.random.normal(shape=attention_bias_shape),
positional_attention_bias=tf.random.normal(shape=attention_bias_shape))
if two_stream:
query_stream_shape = (batch_size, num_predictions, key_dim)
target_mapping_shape = (batch_size, num_predictions, seq_length)
stream_data = dict(
content_stream=tf.random.normal(shape=query_shape),
query_stream=tf.random.normal(shape=query_stream_shape),
target_mapping=tf.random.normal(shape=target_mapping_shape))
else:
stream_data = dict(
query=tf.random.normal(shape=query_shape),
value=tf.random.normal(shape=value_shape),
key=tf.random.normal(shape=value_shape))
data.update(stream_data)
if include_state:
total_seq_length = seq_length + memory_length
state_data = dict(
state=tf.random.normal(shape=(batch_size, memory_length, value_dim)))
data.update(state_data)
else:
total_seq_length = seq_length
if include_mask:
mask_shape = (batch_size, num_heads, seq_length, total_seq_length)
mask_data = np.random.randint(2, size=mask_shape).astype("float32")
if two_stream:
mask_data = dict(
content_attention_mask=mask_data,
query_attention_mask=mask_data)
else:
mask_data = dict(attention_mask=mask_data)
data.update(mask_data)
if include_segment:
segment_encoding_shape = (2, num_heads, key_dim)
segment_matrix = np.random.randint(
2, size=(batch_size, seq_length, total_seq_length))
segment_matrix = tf.math.equal(segment_matrix, 1)
segment_data = dict(
segment_attention_bias=tf.random.normal(shape=attention_bias_shape),
segment_encoding=tf.random.normal(shape=segment_encoding_shape),
segment_matrix=segment_matrix)
data.update(segment_data)
return data
class MultiHeadRelativeAttentionTest(tf.test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(
value_dim=[32, 64],
memory_length=[0, 4],
state=[True, False],
mask=[True, False],
segment=[True, False]))
def test_attention_scores(self,
value_dim,
memory_length,
state,
mask,
segment):
"""Tests combinations of attention score calculations."""
batch_size, num_heads, key_dim, seq_length = 2, 12, 64, 8
test_layer = relative_attention.MultiHeadRelativeAttention(
num_heads=num_heads,
key_dim=key_dim,
value_dim=value_dim)
data = _create_mock_attention_data(
num_heads=num_heads,
key_dim=key_dim,
value_dim=value_dim,
seq_length=seq_length,
memory_length=memory_length,
two_stream=False,
batch_size=batch_size,
include_state=state,
include_mask=mask,
include_segment=segment)
output = test_layer(**data)
self.assertEqual(output.shape, [batch_size, seq_length, key_dim])
class TwoStreamRelativeAttentionTest(tf.test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(
num_predictions=[2, 10],
memory_length=[0, 4],
state=[True, False],
mask=[True, False],
segment=[True, False]))
def test_attention_scores(self,
num_predictions,
memory_length,
state,
mask,
segment):
"""Tests combinations of attention score calculations."""
batch_size, num_heads, key_dim, seq_length = 2, 12, 64, 8
test_layer = relative_attention.TwoStreamRelativeAttention(
num_heads=num_heads,
key_dim=key_dim,
value_dim=key_dim)
data = _create_mock_attention_data(
num_heads=num_heads,
key_dim=key_dim,
value_dim=key_dim,
seq_length=seq_length,
memory_length=memory_length,
num_predictions=num_predictions,
two_stream=True,
batch_size=batch_size,
include_state=state,
include_mask=mask,
include_segment=segment)
content_output, query_output, = test_layer(**data)
self.assertEqual(content_output.shape, [batch_size, seq_length, key_dim])
self.assertEqual(query_output.shape, [batch_size, num_predictions, key_dim])
if __name__ == "__main__":
np.random.seed(0)
tf.random.set_seed(0)
tf.test.main()
| 6,685 | 34.189474 | 80 | py |
models | models-master/official/nlp/modeling/layers/factorized_embedding.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A factorized embedding layer."""
# pylint: disable=g-classes-have-attributes
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling.layers import on_device_embedding
@tf.keras.utils.register_keras_serializable(package='Text')
class FactorizedEmbedding(on_device_embedding.OnDeviceEmbedding):
"""A factorized embeddings layer for supporting larger embeddings.
Arguments:
vocab_size: Number of elements in the vocabulary.
embedding_width: Width of word embeddings.
output_dim: The output dimension of this layer.
initializer: The initializer to use for the embedding weights. Defaults to
"glorot_uniform".
use_one_hot: Whether to use tf.one_hot over tf.gather for the embedding
lookup. Defaults to False (that is, using tf.gather). Setting this option
to True may improve performance, especially on small vocabulary sizes, but
will generally require more memory.
scale_factor: Whether to scale the output embeddings. Defaults to None (that
is, not to scale). Setting this option to a float will let values in
output embeddings multiplied by scale_factor.
"""
def __init__(self,
vocab_size: int,
embedding_width: int,
output_dim: int,
initializer='glorot_uniform',
use_one_hot=False,
scale_factor=None,
**kwargs):
super().__init__(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=initializer,
use_one_hot=use_one_hot,
scale_factor=scale_factor,
**kwargs)
self._output_dim = output_dim
def get_config(self):
config = {'output_dim': self._output_dim}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
self._embedding_projection = tf.keras.layers.EinsumDense(
'...x,xy->...y',
output_shape=self._output_dim,
bias_axes=None,
kernel_initializer=tf_utils.clone_initializer(self._initializer),
name='embedding_projection')
super().build(input_shape)
def call(self, inputs):
output = super().call(inputs)
return self._embedding_projection(output)
| 2,892 | 36.571429 | 80 | py |
models | models-master/official/nlp/modeling/layers/mat_mul_with_margin.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dot product with margin layer."""
# pylint: disable=g-classes-have-attributes
from typing import Tuple
# Import libraries
import tensorflow as tf
from official.modeling import tf_utils
@tf.keras.utils.register_keras_serializable(package='Text')
class MatMulWithMargin(tf.keras.layers.Layer):
"""This layer computs a dot product matrix given two encoded inputs.
Args:
logit_scale: The scaling factor of dot products when doing training.
logit_margin: The margin value between the positive and negative examples
when doing training.
"""
def __init__(self,
logit_scale=1.0,
logit_margin=0.0,
**kwargs):
super().__init__(**kwargs)
self.logit_scale = logit_scale
self.logit_margin = logit_margin
def call(self, left_encoded: tf.Tensor,
right_encoded: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
batch_size = tf_utils.get_shape_list(
left_encoded, name='sequence_output_tensor')[0]
# Left -> Right dot product.
left_dot_products = tf.matmul(
left_encoded, right_encoded, transpose_b=True)
self.left_logits = self.logit_scale * (
left_dot_products - self.logit_margin * tf.eye(batch_size))
# Right -> Left dot product.
self.right_logits = tf.transpose(self.left_logits)
return (self.left_logits, self.right_logits)
def get_config(self):
config = {
'logit_scale': self.logit_scale,
'logit_margin': self.logit_margin}
config.update(super().get_config())
return config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 2,250 | 31.157143 | 77 | py |
models | models-master/official/nlp/modeling/layers/relative_attention.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based relative attention layers."""
import math
import string
import tensorflow as tf
_CHR_IDX = string.ascii_lowercase
def _build_proj_equation(free_dims, bound_dims, output_dims):
"""Builds an einsum equation for projections inside multi-head attention."""
input_str = ""
kernel_str = ""
output_str = ""
bias_axes = ""
letter_offset = 0
for i in range(free_dims):
char = _CHR_IDX[i + letter_offset]
input_str += char
output_str += char
letter_offset += free_dims
for i in range(bound_dims):
char = _CHR_IDX[i + letter_offset]
input_str += char
kernel_str += char
letter_offset += bound_dims
for i in range(output_dims):
char = _CHR_IDX[i + letter_offset]
kernel_str += char
output_str += char
bias_axes += char
equation = "%s,%s->%s" % (input_str, kernel_str, output_str)
return equation, bias_axes, len(output_str)
def _get_output_shape(output_rank, known_last_dims):
return [None] * (output_rank - len(known_last_dims)) + list(known_last_dims)
def _rel_shift(x, klen=-1):
"""Performs relative shift to form the relative attention score."""
x = tf.transpose(x, perm=[2, 3, 0, 1])
x_size = tf.shape(x)
x = tf.reshape(x, [x_size[1], x_size[0], x_size[2], x_size[3]])
x = tf.slice(x, [1, 0, 0, 0], [-1, -1, -1, -1])
x = tf.reshape(x, [x_size[0], x_size[1] - 1, x_size[2], x_size[3]])
x = tf.slice(x, [0, 0, 0, 0], [-1, klen, -1, -1])
x = tf.transpose(x, perm=[2, 3, 0, 1])
return x
@tf.keras.utils.register_keras_serializable(package="Text")
class MultiHeadRelativeAttention(tf.keras.layers.MultiHeadAttention):
"""A multi-head attention layer with relative attention + position encoding.
This layer shares the same input/output projections as the common
`tf.keras.layers.MultiHeadAttention` layer.
When it calculates attention logits, position encoding is projected to form
relative keys. The logits are composed by shifted relative logits and content
logits.
**Note: This layer is currently experimental.
Attributes:
kernel_initializer: The kernel initializer. Defaults to variance_scaling.
Call args:
query: Query `Tensor` of shape `[B, T, dim]`.
value: Value `Tensor` of shape `[B, S, dim]`.
content_attention_bias: Bias `Tensor` for content based attention of shape
`[num_heads, dim]`.
positional_attention_bias: Bias `Tensor` for position based attention of
shape `[num_heads, dim]`.
key: Optional key `Tensor` of shape `[B, S, dim]`. If not given, will use
`value` for both `key` and `value`, which is the most common case.
relative_position_encoding: Relative positional encoding `Tensor` of shape
`[B, L, dim]`.
segment_matrix: Optional `Tensor` representing segmentation IDs used in
XLNet of shape `[B, S, S + M]`.
segment_encoding: Optional `Tensor` representing the segmentation encoding
as used in XLNet of shape `[2, num_heads, dim]`.
segment_attention_bias: Optional trainable bias parameter added to the query
had when calculating the segment-based attention score used in XLNet of
shape `[num_heads, dim]`.
state: Optional `Tensor` of shape `[B, M, E]` where M is the length of the
state or memory. If passed, this is also attended over as in Transformer
XL.
attention_mask: A boolean mask of shape `[B, T, S]` that prevents attention
to certain positions.
"""
def __init__(self,
kernel_initializer="variance_scaling",
**kwargs):
super().__init__(kernel_initializer=kernel_initializer,
**kwargs)
def _build_from_signature(self, query, value, key=None):
super(MultiHeadRelativeAttention, self)._build_from_signature(
query=query,
value=value,
key=key)
if hasattr(value, "shape"):
value_shape = tf.TensorShape(value.shape)
else:
value_shape = value
if key is None:
key_shape = value_shape
elif hasattr(key, "shape"):
key_shape = tf.TensorShape(key.shape)
else:
key_shape = key
common_kwargs = dict(
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
with tf.init_scope():
einsum_equation, _, output_rank = _build_proj_equation(
key_shape.rank - 1, bound_dims=1, output_dims=2)
self._encoding_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1,
[self._num_heads, self._key_dim]),
bias_axes=None,
name="encoding",
**common_kwargs)
def compute_attention(self,
query,
key,
value,
position,
content_attention_bias,
positional_attention_bias,
segment_matrix=None,
segment_encoding=None,
segment_attention_bias=None,
attention_mask=None):
"""Computes the attention.
This function defines the computation inside `call` with projected
multihead Q, K, V, R inputs.
Args:
query: Projected query `Tensor` of shape `[B, T, N, key_dim]`.
key: Projected key `Tensor` of shape `[B, S + M, N, key_dim]`.
value: Projected value `Tensor` of shape `[B, S + M, N, key_dim]`.
position: Projected position `Tensor` of shape `[B, L, N, key_dim]`.
content_attention_bias: Trainable bias parameter added to the query head
when calculating the content-based attention score.
positional_attention_bias: Trainable bias parameter added to the query
head when calculating the position-based attention score.
segment_matrix: Optional `Tensor` representing segmentation IDs used in
XLNet.
segment_encoding: Optional trainable `Tensor` representing the
segmentation encoding as used in XLNet.
segment_attention_bias: Optional trainable bias parameter added to the
query had when calculating the segment-based attention score used in
XLNet.
attention_mask: (default None) Optional mask that is added to attention
logits. If state is not None, the mask source sequence dimension should
extend M.
Returns:
attention_output: Multi-headed output of attention computation of shape
`[B, S, N, key_dim]`.
"""
content_attention = tf.einsum(self._dot_product_equation,
key,
query + content_attention_bias)
positional_attention = tf.einsum(self._dot_product_equation,
position,
query + positional_attention_bias)
positional_attention = _rel_shift(
positional_attention, klen=tf.shape(content_attention)[3])
if segment_matrix is not None:
segment_attention = tf.einsum("bind,snd->bnis",
query + segment_attention_bias,
segment_encoding)
target_shape = tf.shape(positional_attention)
segment_attention = tf.where(
tf.broadcast_to(tf.expand_dims(segment_matrix, 1), target_shape),
tf.broadcast_to(segment_attention[:, :, :, 1:], target_shape),
tf.broadcast_to(segment_attention[:, :, :, :1], target_shape))
attention_sum = (
content_attention + positional_attention + segment_attention)
else:
attention_sum = content_attention + positional_attention
attention_scores = tf.multiply(
attention_sum, 1.0 / math.sqrt(float(self._key_dim)))
attention_scores = self._masked_softmax(attention_scores, attention_mask)
attention_output = self._dropout_layer(attention_scores)
attention_output = tf.einsum(self._combine_equation,
attention_output,
value)
return attention_output
def call(self, # pytype: disable=signature-mismatch # overriding-parameter-count-checks
query,
value,
content_attention_bias,
positional_attention_bias,
key=None,
relative_position_encoding=None,
segment_matrix=None,
segment_encoding=None,
segment_attention_bias=None,
state=None,
attention_mask=None):
"""Compute multi-head relative attention over inputs.
Size glossary:
* Number of heads (H): the number of attention heads.
* Value size (V): the size of each value embedding per head.
* Key size (K): the size of each key embedding per head. Equally, the size
of each query embedding per head. Typically K <= V.
* Batch dimensions (B).
* Query (target) attention axes shape (T).
* Value (source) attention axes shape (S), the rank must match the target.
* Encoding length (L): The relative positional encoding length.
Args:
query: attention input.
value: attention input.
content_attention_bias: A trainable bias parameter added to the query head
when calculating the content-based attention score.
positional_attention_bias: A trainable bias parameter added to the query
head when calculating the position-based attention score.
key: attention input.
relative_position_encoding: relative positional encoding for key and
value.
segment_matrix: Optional `Tensor` representing segmentation IDs used in
XLNet.
segment_encoding: Optional `Tensor` representing the segmentation encoding
as used in XLNet.
segment_attention_bias: Optional trainable bias parameter added to the
query had when calculating the segment-based attention score used in
XLNet.
state: (default None) optional state. If passed, this is also attended
over as in TransformerXL.
attention_mask: (default None) Optional mask that is added to attention
logits. If state is not None, the mask source sequence dimension should
extend M.
Returns:
attention_output: The result of the computation, of shape [B, T, E],
where `T` is for target sequence shapes and `E` is the query input last
dimension if `output_shape` is `None`. Otherwise, the multi-head outputs
are projected to the shape specified by `output_shape`.
"""
if not self._built_from_signature:
self._build_from_signature(query, value, key=key)
if key is None:
key = value
if state is not None and state.shape.ndims > 1:
value = tf.concat([state, value], 1)
key = tf.concat([state, key], 1)
# `query` = [B, T, N ,H]
query = self._query_dense(query)
# `key` = [B, S + M, N, H]
key = self._key_dense(key)
# `value` = [B, S + M, N, H]
value = self._value_dense(value)
# `position` = [B, L, N, H]
position = self._encoding_dense(relative_position_encoding)
attention_output = self.compute_attention(
query=query,
key=key,
value=value,
position=position,
content_attention_bias=content_attention_bias,
positional_attention_bias=positional_attention_bias,
segment_matrix=segment_matrix,
segment_encoding=segment_encoding,
segment_attention_bias=segment_attention_bias,
attention_mask=attention_mask)
# `attention_output` = [B, S, N, H]
attention_output = self._output_dense(attention_output)
return attention_output
@tf.keras.utils.register_keras_serializable(package="Text")
class TwoStreamRelativeAttention(MultiHeadRelativeAttention):
"""Two-stream relative self-attention for XLNet.
In XLNet, each token has two associated vectors at each self-attention layer,
the content stream (h) and the query stream (g).
The content stream is the self-attention stream as in Transformer XL and
represents the context and content (the token itself).
The query stream only has access to contextual information and the position,
but not the content.
This layer shares the same build signature as
`tf.keras.layers.MultiHeadAttention` but has different input/output
projections.
**Note: This layer is currently experimental.
Call args:
content_stream: `Tensor` of shape `[B, T, dim]`.
content_attention_bias: Bias `Tensor` for content based attention of shape
`[num_heads, dim]`.
positional_attention_bias: Bias `Tensor` for position based attention of
shape `[num_heads, dim]`.
query_stream: `Tensor` of shape `[B, P, dim]`.
target_mapping: `Tensor` of shape `[B, P, S]`.
relative_position_encoding: Relative positional encoding `Tensor` of shape
`[B, L, dim]`.
segment_matrix: Optional `Tensor` representing segmentation IDs used in
XLNet of shape `[B, S, S + M]`.
segment_encoding: Optional `Tensor` representing the segmentation
encoding as used in XLNet of shape `[2, num_heads, dim]`.
segment_attention_bias: Optional trainable bias parameter added to the
query had when calculating the segment-based attention score used in
XLNet of shape `[num_heads, dim]`.
state: Optional `Tensor` of shape [B, M, E] where M is the length of the
state or memory.
If passed, this is also attended over as in Transformer XL.
content_attention_mask: a boolean mask of shape `[B, T, S]` that
prevents attention to certain positions for content attention computation.
query_attention_mask: a boolean mask of shape `[B, T, S]` that
prevents attention to certain position for query attention computation.
"""
def call(self,
content_stream,
content_attention_bias,
positional_attention_bias,
query_stream,
relative_position_encoding,
target_mapping=None,
segment_matrix=None,
segment_encoding=None,
segment_attention_bias=None,
state=None,
content_attention_mask=None,
query_attention_mask=None):
"""Compute multi-head relative attention over inputs.
Size glossary:
* Number of heads (H): the number of attention heads.
* Value size (V): the size of each value embedding per head.
* Key size (K): the size of each key embedding per head. Equally, the size
of each query embedding per head. Typically K <= V.
* Number of predictions (P): the number of predictions.
* Batch dimensions (B).
* Query (target) attention axes shape (T).
* Value (source) attention axes shape (S), the rank must match the target.
* Encoding length (L): The relative positional encoding length.
Args:
content_stream: The content representation, commonly referred to as h.
This serves a similar role to the standard hidden states in
Transformer-XL.
content_attention_bias: A trainable bias parameter added to the query head
when calculating the content-based attention score.
positional_attention_bias: A trainable bias parameter added to the query
head when calculating the position-based attention score.
query_stream: The query representation, commonly referred to as g. This
only has access to contextual information and position, but not content.
If not provided, then this is MultiHeadRelativeAttention with
self-attention.
relative_position_encoding: relative positional encoding for key and
value.
target_mapping: Optional `Tensor` representing the target mapping used in
partial prediction.
segment_matrix: Optional `Tensor` representing segmentation IDs used in
XLNet.
segment_encoding: Optional `Tensor` representing the segmentation encoding
as used in XLNet.
segment_attention_bias: Optional trainable bias parameter added to the
query head when calculating the segment-based attention score.
state: (default None) optional state. If passed, this is also attended
over as in TransformerXL and XLNet.
content_attention_mask: (default None) Optional mask that is added to
content attention logits. If state is not None, the mask source sequence
dimension should extend M.
query_attention_mask: (default None) Optional mask that is added to query
attention logits. If state is not None, the mask source sequence
dimension should extend M.
Returns:
content_attention_output, query_attention_output: the results of the
computation, both of shape [B, T, E]. `T` is for target sequence shapes,
`E` is the query input last dimension if `output_shape` is `None`.
Otherwise, the multi-head outputs are projected to the shape specified
by `output_shape`.
"""
if not self._built_from_signature:
self._build_from_signature(content_stream, content_stream, content_stream)
if state is not None and state.shape.ndims > 1:
content_and_memory_stream = tf.concat([state, content_stream], 1)
else:
content_and_memory_stream = content_stream
# `query` = [B, T, N, H]
query = self._query_dense(content_stream)
# `key` = [B, S + M, N, H]
key = self._key_dense(content_and_memory_stream)
# `value` = [B, S + M, N, H]
value = self._value_dense(content_and_memory_stream)
# `position` = [B, L, N, H]
position = self._encoding_dense(relative_position_encoding)
content_attention_output = self.compute_attention(
query=query,
key=key,
value=value,
position=position,
content_attention_bias=content_attention_bias,
positional_attention_bias=positional_attention_bias,
segment_matrix=segment_matrix,
segment_encoding=segment_encoding,
segment_attention_bias=segment_attention_bias,
attention_mask=content_attention_mask)
# `content_attention_output` = [B, S, N, H]
content_attention_output = self._output_dense(content_attention_output)
query_attention_output = None
if query_stream is not None:
query = self._query_dense(query_stream)
if target_mapping is not None:
query = tf.einsum("bmnd,bml->blnd", query, target_mapping)
query_attention_output = self.compute_attention(
query=query,
key=key,
value=value,
position=position,
content_attention_bias=content_attention_bias,
positional_attention_bias=positional_attention_bias,
segment_matrix=segment_matrix,
segment_encoding=segment_encoding,
segment_attention_bias=segment_attention_bias,
attention_mask=query_attention_mask)
query_attention_output = tf.einsum("blnd,bml->bmnd",
query_attention_output,
target_mapping)
else:
query_attention_output = self.compute_attention(
query=query,
key=key,
value=value,
position=position,
content_attention_bias=content_attention_bias,
positional_attention_bias=positional_attention_bias,
segment_matrix=segment_matrix,
segment_encoding=segment_encoding,
segment_attention_bias=segment_attention_bias,
attention_mask=query_attention_mask)
query_attention_output = self._output_dense(query_attention_output)
return content_attention_output, query_attention_output
| 20,547 | 40.178357 | 91 | py |
models | models-master/official/nlp/modeling/layers/rezero_transformer_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-based rezero-transformer block layer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import rezero_transformer
class TransformerWithReZeroLayerTest(tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(TransformerWithReZeroLayerTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy('float32')
@parameterized.named_parameters(('no_share_attn_ffn', False),
('share_attn_ffn', True))
def test_layer_invocation_with_float16_dtype(self, share_rezero):
tf.keras.mixed_precision.set_global_policy('mixed_float16')
test_layer = rezero_transformer.ReZeroTransformer(
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu',
share_rezero=share_rezero)
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = (10 * np.random.random_sample(
(batch_size, sequence_length, width)))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_rezero_without_layer_norm(self):
test_layer = rezero_transformer.ReZeroTransformer(
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu',
use_layer_norm=False)
input_length, width = 16, 30
input_tensor = tf.keras.Input(shape=(input_length, width))
output_tensor = test_layer(input_tensor)
model = tf.keras.Model(input_tensor, output_tensor)
input_data = np.random.rand(2, input_length, width)
test_layer._rezero_a.assign(1.0)
test_layer.reset_rezero()
output_data = model.predict(input_data)
self.assertAllClose(input_data, output_data)
def test_rezero_with_layer_norm(self):
test_layer = rezero_transformer.ReZeroTransformer(
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu',
use_layer_norm=True)
input_length, width = 16, 30
input_tensor = tf.keras.Input(shape=(input_length, width))
output_tensor = test_layer(input_tensor)
model = tf.keras.Model(input_tensor, output_tensor)
input_data = np.random.rand(2, input_length, width) + 2.0
output_data = model.predict(input_data)
input_data_normed = (input_data -
np.mean(input_data, axis=-1, keepdims=True)) / (
np.std(input_data, axis=-1, keepdims=True))
self.assertAllClose(input_data_normed, output_data)
def test_layer_output_range(self):
test_layer = rezero_transformer.ReZeroTransformer(
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu')
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embeeding.
new_layer = rezero_transformer.ReZeroTransformer(
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu',
output_range=1)
_ = new_layer([input_data, mask_data])
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer([input_data, mask_data])
self.assertAllClose(new_output_tensor, output_tensor[:, 0:1, :])
output_tensor = test_layer([input_data, mask_data], output_range=1)
self.assertAllClose(new_output_tensor, output_tensor, atol=5e-5, rtol=0.003)
def test_separate_qkv(self):
test_layer = rezero_transformer.ReZeroTransformer(
num_attention_heads=2,
intermediate_size=128,
intermediate_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Forward path.
q_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 16], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
inputs = [q_tensor, kv_tensor, dummy_mask]
output = test_layer(inputs)
self.assertEqual(output.shape, q_tensor.shape)
if __name__ == '__main__':
tf.test.main()
| 5,751 | 38.129252 | 80 | py |
models | models-master/official/nlp/modeling/layers/gaussian_process_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Gaussian process functions."""
import os
import shutil
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import gaussian_process
def exact_gaussian_kernel(x1, x2):
"""Computes exact Gaussian kernel value(s) for tensors x1 and x2."""
x1_squared = tf.reduce_sum(tf.square(x1), list(range(1, len(x1.shape))))
x2_squared = tf.reduce_sum(tf.square(x2), list(range(1, len(x2.shape))))
square = (x1_squared[:, tf.newaxis] + x2_squared[tf.newaxis, :] -
2 * tf.matmul(x1, x2, transpose_b=True))
return tf.math.exp(-square / 2.)
def _generate_normal_data(num_sample, num_dim, loc):
"""Generates random data sampled from i.i.d. normal distribution."""
return np.random.normal(
size=(num_sample, num_dim), loc=loc, scale=1. / np.sqrt(num_dim))
def _generate_rbf_data(x_data, orthogonal=True):
"""Generates high-dim data that is the eigen components of a RBF kernel."""
k_rbf = exact_gaussian_kernel(x_data, x_data)
x_orth, x_diag, _ = np.linalg.svd(k_rbf)
if orthogonal:
return x_orth
return np.diag(np.sqrt(x_diag)).dot(x_orth.T)
def _make_minibatch_iterator(data_numpy, batch_size, num_epoch):
"""Makes a tf.data.Dataset for given batch size and num epoches."""
dataset = tf.data.Dataset.from_tensor_slices(data_numpy)
dataset = dataset.repeat(num_epoch).batch(batch_size)
return iter(dataset)
def _compute_posterior_kernel(x_tr, x_ts, kernel_func, ridge_penalty):
"""Computes the posterior covariance matrix of a Gaussian process."""
num_sample = x_tr.shape[0]
k_tt_inv = tf.linalg.inv(
kernel_func(x_tr, x_tr) + ridge_penalty * np.eye(num_sample))
k_ts = kernel_func(x_tr, x_ts)
k_ss = kernel_func(x_ts, x_ts)
return k_ss - tf.matmul(k_ts, tf.matmul(k_tt_inv, k_ts), transpose_a=True)
class GaussianProcessTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(GaussianProcessTest, self).setUp()
self.num_data_dim = 10
self.num_inducing = 1024
self.num_train_sample = 1024
self.num_test_sample = 256
self.prec_tolerance = {'atol': 1e-3, 'rtol': 5e-2}
self.cov_tolerance = {'atol': 5e-2, 'rtol': 2.}
self.rbf_kern_func = exact_gaussian_kernel
self.x_tr = _generate_normal_data(
self.num_train_sample, self.num_data_dim, loc=0.)
self.x_ts = _generate_normal_data(
self.num_test_sample, self.num_data_dim, loc=1.)
def test_layer_build(self):
"""Tests if layer.built=True after building."""
rfgp_model = gaussian_process.RandomFeatureGaussianProcess(units=1)
rfgp_model.build(input_shape=self.x_tr.shape)
self.assertTrue(rfgp_model.built)
@parameterized.named_parameters(('rbf_data', False),
('orthogonal_data', True))
def test_laplace_covariance_minibatch(self, generate_orthogonal_data):
"""Tests if model correctly learns population-lvel precision matrix."""
batch_size = 50
epochs = 1000
x_data = _generate_rbf_data(self.x_ts, generate_orthogonal_data)
data_iterator = _make_minibatch_iterator(x_data, batch_size, epochs)
# Estimates precision matrix using minibatch.
cov_estimator = gaussian_process.LaplaceRandomFeatureCovariance(
momentum=0.999, ridge_penalty=0)
for minibatch_data in data_iterator:
_ = cov_estimator(minibatch_data, training=True)
# Evaluation
prec_mat_expected = x_data.T.dot(x_data)
prec_mat_computed = (
cov_estimator.precision_matrix.numpy() * self.num_test_sample)
np.testing.assert_allclose(prec_mat_computed, prec_mat_expected,
**self.prec_tolerance)
def test_random_feature_prior_approximation(self):
"""Tests random feature GP's ability in approximating exact GP prior."""
num_inducing = 10240
rfgp_model = gaussian_process.RandomFeatureGaussianProcess(
units=1,
num_inducing=num_inducing,
normalize_input=False,
gp_kernel_type='gaussian',
return_random_features=True)
# Extract random features.
_, _, gp_feature = rfgp_model(self.x_tr, training=True)
gp_feature_np = gp_feature.numpy()
prior_kernel_computed = gp_feature_np.dot(gp_feature_np.T)
prior_kernel_expected = self.rbf_kern_func(self.x_tr, self.x_tr)
np.testing.assert_allclose(prior_kernel_computed, prior_kernel_expected,
**self.cov_tolerance)
def test_random_feature_posterior_approximation(self):
"""Tests random feature GP's ability in approximating exact GP posterior."""
# Set momentum = 0.5 so posterior precision matrix is 0.5 * (I + K).
gp_cov_momentum = 0.5
gp_cov_ridge_penalty = 1.
num_inducing = 1024
rfgp_model = gaussian_process.RandomFeatureGaussianProcess(
units=1,
num_inducing=num_inducing,
normalize_input=False,
gp_kernel_type='gaussian',
gp_cov_momentum=gp_cov_momentum,
gp_cov_ridge_penalty=gp_cov_ridge_penalty)
# Computes posterior covariance on test data.
_, _ = rfgp_model(self.x_tr, training=True)
_, gp_cov_ts = rfgp_model(self.x_ts, training=False)
# Scale up covariance estimate since prec matrix is down-scaled by momentum.
post_kernel_computed = gp_cov_ts * gp_cov_momentum
post_kernel_expected = _compute_posterior_kernel(self.x_tr, self.x_ts,
self.rbf_kern_func,
gp_cov_ridge_penalty)
np.testing.assert_allclose(post_kernel_computed, post_kernel_expected,
**self.cov_tolerance)
def test_random_feature_linear_kernel(self):
"""Tests if linear kernel indeed leads to an identity mapping."""
# Specify linear kernel
gp_kernel_type = 'linear'
normalize_input = False
scale_random_features = False
use_custom_random_features = True
rfgp_model = gaussian_process.RandomFeatureGaussianProcess(
units=1,
normalize_input=normalize_input,
gp_kernel_type=gp_kernel_type,
scale_random_features=scale_random_features,
use_custom_random_features=use_custom_random_features,
return_random_features=True)
_, _, gp_feature = rfgp_model(self.x_tr, training=True)
# Check if linear kernel leads to identity mapping.
np.testing.assert_allclose(gp_feature, self.x_tr, **self.prec_tolerance)
def test_no_matrix_update_during_test(self):
"""Tests if the precision matrix is not updated during testing."""
rfgp_model = gaussian_process.RandomFeatureGaussianProcess(units=1)
# Training.
_, gp_covmat_null = rfgp_model(self.x_tr, training=True)
precision_mat_before_test = rfgp_model._gp_cov_layer.precision_matrix
# Testing.
_ = rfgp_model(self.x_ts, training=False)
precision_mat_after_test = rfgp_model._gp_cov_layer.precision_matrix
self.assertAllClose(
gp_covmat_null, tf.eye(self.num_train_sample), atol=1e-4)
self.assertAllClose(
precision_mat_before_test, precision_mat_after_test, atol=1e-4)
def test_state_saving_and_loading(self):
"""Tests if the loaded model returns same results."""
input_data = np.random.random((1, 2))
rfgp_model = gaussian_process.RandomFeatureGaussianProcess(units=1)
inputs = tf.keras.Input((2,), batch_size=1)
outputs = rfgp_model(inputs)
model = tf.keras.Model(inputs, outputs)
gp_output, gp_covmat = model.predict(input_data)
# Save and then load the model.
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
saved_model_dir = os.path.join(temp_dir, 'rfgp_model')
model.save(saved_model_dir)
new_model = tf.keras.models.load_model(saved_model_dir)
gp_output_new, gp_covmat_new = new_model.predict(input_data)
self.assertAllClose(gp_output, gp_output_new, atol=1e-4)
self.assertAllClose(gp_covmat, gp_covmat_new, atol=1e-4)
class MeanFieldLogitsTest(tf.test.TestCase):
def testMeanFieldLogitsLikelihood(self):
"""Tests if scaling is correct under different likelihood."""
batch_size = 10
num_classes = 12
variance = 1.5
mean_field_factor = 2.
rng = np.random.RandomState(0)
tf.random.set_seed(1)
logits = rng.randn(batch_size, num_classes)
covmat = tf.linalg.diag([variance] * batch_size)
logits_logistic = gaussian_process.mean_field_logits(
logits, covmat, mean_field_factor=mean_field_factor)
self.assertAllClose(logits_logistic, logits / 2., atol=1e-4)
def testMeanFieldLogitsTemperatureScaling(self):
"""Tests using mean_field_logits as temperature scaling method."""
batch_size = 10
num_classes = 12
rng = np.random.RandomState(0)
tf.random.set_seed(1)
logits = rng.randn(batch_size, num_classes)
# Test if there's no change to logits when mean_field_factor < 0.
logits_no_change = gaussian_process.mean_field_logits(
logits, covariance_matrix=None, mean_field_factor=-1)
# Test if mean_field_logits functions as a temperature scaling method when
# mean_field_factor > 0, with temperature = sqrt(1. + mean_field_factor).
logits_scale_by_two = gaussian_process.mean_field_logits(
logits, covariance_matrix=None, mean_field_factor=3.)
self.assertAllClose(logits_no_change, logits, atol=1e-4)
self.assertAllClose(logits_scale_by_two, logits / 2., atol=1e-4)
if __name__ == '__main__':
tf.test.main()
| 10,091 | 36.656716 | 80 | py |
models | models-master/official/nlp/modeling/layers/reuse_transformer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based TransformerEncoder block layer."""
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling.layers import reuse_attention as attention
class ReuseTransformer(tf.keras.layers.Layer):
"""Transformer layer.
This layer implements the ReuseTransformer Encoder from
"Leveraging redundancy in attention with Reuse Transformers".
(https://arxiv.org/abs/2110.06821)
"""
def __init__(self,
num_attention_heads,
inner_dim,
inner_activation,
head_size=None,
output_range=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_bias=True,
norm_first=False,
norm_epsilon=1e-12,
output_dropout=0.0,
attention_dropout=0.0,
inner_dropout=0.0,
attention_initializer=None,
attention_axes=None,
reuse_attention=0,
use_relative_pe=False,
pe_max_seq_length=512,
layer_idx=None,
max_reuse_layer_idx=None,
**kwargs):
"""Initializes `ReuseTransformer`.
Args:
num_attention_heads: Number of attention heads.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network.
head_size: Projection size of heads.
output_range: the sequence output range, [0, output_range) for slicing the
target sequence. `None` means the target sequence is not sliced.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
use_bias: Whether to enable use_bias in attention layer. If set False,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate
dense layers. If set False, output of attention and intermediate dense
layers is normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: Dropout probability for within the attention layer.
inner_dropout: Dropout probability for the first Dense layer in a
two-layer feedforward network.
attention_initializer: Initializer for kernels of attention layers. If set
`None`, attention layers use kernel_initializer as initializer for
kernel.
attention_axes: axes over which the attention is applied. `None` means
attention over all axes, but batch, heads, and features.
reuse_attention: reuse_attention: An integer specifying number of heads
to reuse. -1 for all heads.
use_relative_pe: whether to use relative position bias.
pe_max_seq_length: used to set the size of the relative positin encodings.
layer_idx: the idx of this layer.
max_reuse_layer_idx: layer idx (if passed) greater than this value will
not reuse attention scores from previous layers.
**kwargs: keyword arguments.
"""
super().__init__(**kwargs)
self._num_heads = num_attention_heads
self._inner_dim = inner_dim
self._inner_activation = inner_activation
self._head_size = head_size
self._attention_dropout = attention_dropout
self._attention_dropout_rate = attention_dropout
self._output_dropout = output_dropout
self._output_dropout_rate = output_dropout
self._output_range = output_range
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._inner_dropout = inner_dropout
self._reuse_attention = reuse_attention
self._use_relative_pe = use_relative_pe
self._pe_max_seq_length = pe_max_seq_length
self._layer_idx = layer_idx
self._max_reuse_layer_idx = max_reuse_layer_idx
# Overwrite for the first layer and layers greater than max_reuse_layer_idx.
if self._layer_idx is not None and (
self._layer_idx == 0 or (self._max_reuse_layer_idx is not None and
self._max_reuse_layer_idx < self._layer_idx)):
self._reuse_attention = 0
if attention_initializer:
self._attention_initializer = tf.keras.initializers.get(
attention_initializer)
else:
self._attention_initializer = tf_utils.clone_initializer(
self._kernel_initializer)
self._attention_axes = attention_axes
def build(self, input_shape):
if isinstance(input_shape, tf.TensorShape):
input_tensor_shape = input_shape
elif isinstance(input_shape, (list, tuple)):
input_tensor_shape = tf.TensorShape(input_shape[0])
else:
raise ValueError(
"The type of input shape argument is not supported, got: %s" %
type(input_shape))
einsum_equation = "abc,cd->abd"
if len(input_tensor_shape.as_list()) > 3:
einsum_equation = "...bc,cd->...bd"
hidden_size = input_tensor_shape[-1]
if self._head_size is None:
if hidden_size % self._num_heads != 0:
raise ValueError(
"The input size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, self._num_heads))
self._attention_head_size = int(hidden_size // self._num_heads)
else:
self._attention_head_size = self._head_size
common_kwargs = dict(
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
self._attention_layer = attention.ReuseMultiHeadAttention(
num_heads=self._num_heads,
key_dim=self._attention_head_size,
dropout=self._attention_dropout,
use_bias=self._use_bias,
kernel_initializer=self._attention_initializer,
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
attention_axes=self._attention_axes,
reuse_attention=self._reuse_attention,
use_relative_pe=self._use_relative_pe,
pe_max_seq_length=self._pe_max_seq_length,
name="self_attention",
**common_kwargs)
self._attention_dropout = tf.keras.layers.Dropout(
rate=self._output_dropout)
# Use float32 in layernorm for numeric stability.
# It is probably safe in mixed_float16, but we haven't validated this yet.
self._attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32))
self._intermediate_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=(None, self._inner_dim),
bias_axes="d",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
name="intermediate",
**common_kwargs)
policy = tf.keras.mixed_precision.global_policy()
if policy.name == "mixed_bfloat16":
# bfloat16 causes BERT with the LAMB optimizer to not converge
# as well, so we use float32.
# TODO(b/154538392): Investigate this.
policy = tf.float32
self._intermediate_activation_layer = tf.keras.layers.Activation(
self._inner_activation, dtype=policy)
self._inner_dropout_layer = tf.keras.layers.Dropout(
rate=self._inner_dropout)
self._output_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=(None, hidden_size),
bias_axes="d",
name="output",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs)
self._output_dropout = tf.keras.layers.Dropout(rate=self._output_dropout)
# Use float32 in layernorm for numeric stability.
self._output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32)
super(ReuseTransformer, self).build(input_shape)
def get_config(self):
config = {
"num_attention_heads":
self._num_heads,
"inner_dim":
self._inner_dim,
"inner_activation":
self._inner_activation,
"head_size":
self._head_size,
"output_dropout":
self._output_dropout_rate,
"attention_dropout":
self._attention_dropout_rate,
"output_range":
self._output_range,
"reuse_attention":
self._reuse_attention,
"use_relative_pe": self._use_relative_pe,
"pe_max_seq_length": self._pe_max_seq_length,
"max_reuse_layer_idx": self._max_reuse_layer_idx,
"kernel_initializer":
tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer":
tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer":
tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer":
tf.keras.regularizers.serialize(self._bias_regularizer),
"activity_regularizer":
tf.keras.regularizers.serialize(self._activity_regularizer),
"kernel_constraint":
tf.keras.constraints.serialize(self._kernel_constraint),
"bias_constraint":
tf.keras.constraints.serialize(self._bias_constraint),
"use_bias":
self._use_bias,
"norm_first":
self._norm_first,
"norm_epsilon":
self._norm_epsilon,
"inner_dropout":
self._inner_dropout,
"attention_initializer":
tf.keras.initializers.serialize(self._attention_initializer),
"attention_axes": self._attention_axes,
}
base_config = super(ReuseTransformer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
"""Transformer self-attention encoder block call.
Args:
inputs: a single tensor or a list of tensors.
`input tensor` as the single sequence of embeddings.
[`input tensor`, `attention mask`] to have the additional attention
mask.
[`query tensor`, `attention mask`, `attention scores`] to have
additional attention scores for reuse computation. If `attention scores`
is None, the reuse_attention flag will be ignored.
Returns:
An output tensor with the same dimensions as input/query tensor.
Attention scores if return_attention_scores is true.
"""
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
input_tensor, attention_mask = inputs
reuse_attention_scores = None
elif len(inputs) == 3:
input_tensor, attention_mask, reuse_attention_scores = inputs
else:
raise ValueError("Unexpected inputs to %s with length at %d" %
(self.__class__, len(inputs)))
else:
input_tensor, attention_mask, reuse_attention_scores = (inputs, None,
None)
key_value = None
if self._reuse_attention != 0 and reuse_attention_scores is None:
raise ValueError(
"reuse_attention_scores cannot be None when reuse_attention != 0.")
if self._output_range:
if self._norm_first:
source_tensor = input_tensor[:, 0:self._output_range, :]
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm(key_value)
target_tensor = input_tensor[:, 0:self._output_range, :]
if attention_mask is not None:
attention_mask = attention_mask[:, 0:self._output_range, :]
if reuse_attention_scores is not None:
reuse_attention_scores = reuse_attention_scores[:, :,
0:self._output_range, :]
else:
if self._norm_first:
source_tensor = input_tensor
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm(key_value)
target_tensor = input_tensor
if key_value is None:
key_value = input_tensor
attention_output = self._attention_layer(
query=target_tensor, value=key_value, attention_mask=attention_mask,
reuse_attention_scores=reuse_attention_scores,
return_attention_scores=True)
attention_output, attention_scores = attention_output
attention_output = self._attention_dropout(attention_output)
if self._norm_first:
attention_output = source_tensor + attention_output
else:
attention_output = self._attention_layer_norm(target_tensor +
attention_output)
if self._norm_first:
source_attention_output = attention_output
attention_output = self._output_layer_norm(attention_output)
inner_output = self._intermediate_dense(attention_output)
inner_output = self._intermediate_activation_layer(inner_output)
inner_output = self._inner_dropout_layer(inner_output)
layer_output = self._output_dense(inner_output)
layer_output = self._output_dropout(layer_output)
if self._norm_first:
return source_attention_output + layer_output, attention_scores
# During mixed precision training, layer norm output is always fp32 for now.
# Casts fp32 for the subsequent add.
layer_output = tf.cast(layer_output, tf.float32)
layer_output = self._output_layer_norm(layer_output + attention_output)
return layer_output, attention_scores
| 15,687 | 42.457064 | 80 | py |
models | models-master/official/nlp/modeling/layers/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers are the fundamental building blocks for NLP models.
They can be used to assemble new `tf.keras` layers or models.
"""
# pylint: disable=wildcard-import
from official.nlp.modeling.layers import util
from official.nlp.modeling.layers.attention import *
from official.nlp.modeling.layers.bigbird_attention import BigBirdAttention
from official.nlp.modeling.layers.bigbird_attention import BigBirdMasks
from official.nlp.modeling.layers.block_diag_feedforward import BlockDiagFeedforward
from official.nlp.modeling.layers.cls_head import *
from official.nlp.modeling.layers.factorized_embedding import FactorizedEmbedding
from official.nlp.modeling.layers.gated_feedforward import GatedFeedforward
from official.nlp.modeling.layers.gaussian_process import RandomFeatureGaussianProcess
from official.nlp.modeling.layers.kernel_attention import KernelAttention
from official.nlp.modeling.layers.kernel_attention import KernelMask
from official.nlp.modeling.layers.masked_lm import MaskedLM
from official.nlp.modeling.layers.masked_softmax import MaskedSoftmax
from official.nlp.modeling.layers.mat_mul_with_margin import MatMulWithMargin
from official.nlp.modeling.layers.mixing import FourierTransformLayer
from official.nlp.modeling.layers.mixing import HartleyTransformLayer
from official.nlp.modeling.layers.mixing import LinearTransformLayer
from official.nlp.modeling.layers.mixing import MixingMechanism
from official.nlp.modeling.layers.mobile_bert_layers import MobileBertEmbedding
from official.nlp.modeling.layers.mobile_bert_layers import MobileBertMaskedLM
from official.nlp.modeling.layers.mobile_bert_layers import MobileBertTransformer
from official.nlp.modeling.layers.moe import ExpertsChooseMaskedRouter
from official.nlp.modeling.layers.moe import FeedForwardExperts
from official.nlp.modeling.layers.moe import MoeLayer
from official.nlp.modeling.layers.moe import MoeLayerWithBackbone
from official.nlp.modeling.layers.multi_channel_attention import *
from official.nlp.modeling.layers.on_device_embedding import OnDeviceEmbedding
from official.nlp.modeling.layers.pack_optimization import PackBertEmbeddings
from official.nlp.modeling.layers.pack_optimization import StridedTransformerEncoderBlock
from official.nlp.modeling.layers.pack_optimization import StridedTransformerScaffold
from official.nlp.modeling.layers.per_dim_scale_attention import PerDimScaleAttention
from official.nlp.modeling.layers.position_embedding import PositionEmbedding
from official.nlp.modeling.layers.position_embedding import RelativePositionBias
from official.nlp.modeling.layers.position_embedding import RelativePositionEmbedding
from official.nlp.modeling.layers.relative_attention import MultiHeadRelativeAttention
from official.nlp.modeling.layers.relative_attention import TwoStreamRelativeAttention
from official.nlp.modeling.layers.reuse_attention import ReuseMultiHeadAttention
from official.nlp.modeling.layers.reuse_transformer import ReuseTransformer
from official.nlp.modeling.layers.rezero_transformer import ReZeroTransformer
from official.nlp.modeling.layers.routing import *
from official.nlp.modeling.layers.self_attention_mask import *
from official.nlp.modeling.layers.spectral_normalization import *
from official.nlp.modeling.layers.talking_heads_attention import TalkingHeadsAttention
from official.nlp.modeling.layers.text_layers import BertPackInputs
from official.nlp.modeling.layers.text_layers import BertTokenizer
from official.nlp.modeling.layers.text_layers import FastWordpieceBertTokenizer
from official.nlp.modeling.layers.text_layers import SentencepieceTokenizer
from official.nlp.modeling.layers.tn_transformer_expand_condense import TNTransformerExpandCondense
from official.nlp.modeling.layers.transformer import Transformer
from official.nlp.modeling.layers.transformer import TransformerDecoderBlock
from official.nlp.modeling.layers.transformer_encoder_block import TransformerEncoderBlock
from official.nlp.modeling.layers.transformer_scaffold import TransformerScaffold
from official.nlp.modeling.layers.transformer_xl import TransformerXL
from official.nlp.modeling.layers.transformer_xl import TransformerXLBlock
| 4,780 | 62.746667 | 99 | py |
models | models-master/official/nlp/modeling/layers/transformer_scaffold_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-based transformer block layer."""
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import attention
from official.nlp.modeling.layers import transformer_scaffold
# Test class that wraps a standard attention layer. If this layer is called
# at any point, the list passed to the config object will be filled with a
# boolean 'True'. We register this class as a Keras serializable so we can
# test serialization below.
@tf.keras.utils.register_keras_serializable(package='TestOnlyAttention')
class ValidatedAttentionLayer(attention.MultiHeadAttention):
def __init__(self, call_list, **kwargs):
super(ValidatedAttentionLayer, self).__init__(**kwargs)
self.list = call_list
def call(self, query, value, attention_mask=None):
self.list.append(True)
return super(ValidatedAttentionLayer, self).call(
query, value, attention_mask=attention_mask)
def get_config(self):
config = super(ValidatedAttentionLayer, self).get_config()
config['call_list'] = []
return config
# Test class implements a simple feedforward layer. If this layer is called
# at any point, the list passed to the config object will be filled with a
# boolean 'True'. We register this class as a Keras serializable so we can
# test serialization below.
@tf.keras.utils.register_keras_serializable(package='TestOnlyFeedforward')
class ValidatedFeedforwardLayer(tf.keras.layers.Layer):
def __init__(self, call_list, activation, **kwargs):
super(ValidatedFeedforwardLayer, self).__init__(**kwargs)
self.list = call_list
self.activation = activation
def build(self, input_shape):
hidden_size = input_shape[-1]
self._feedforward_dense = tf.keras.layers.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
activation=self.activation,
name='feedforward')
def call(self, inputs):
self.list.append(True)
return self._feedforward_dense(inputs)
def get_config(self):
config = super(ValidatedFeedforwardLayer, self).get_config()
config['call_list'] = []
config['activation'] = self.activation
return config
class TransformerLayerTest(tf.test.TestCase):
def tearDown(self):
super(TransformerLayerTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy('float32')
def test_layer_creation(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu')
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
def test_layer_creation_with_feedforward_cls(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
feedforward_call_list = []
feedforward_layer_cfg = {
'activation': 'relu',
'call_list': feedforward_call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
feedforward_cls=ValidatedFeedforwardLayer,
feedforward_cfg=feedforward_layer_cfg,
num_attention_heads=10,
inner_dim=None,
inner_activation=None)
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
self.assertNotEmpty(feedforward_call_list)
self.assertTrue(feedforward_call_list[0],
"The passed layer class wasn't instantiated.")
def test_layer_creation_with_mask(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu')
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
def test_layer_invocation(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu')
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# Create a model from the test layer.
model = tf.keras.Model(data_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
_ = model.predict(input_data)
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
def test_layer_invocation_with_feedforward_cls(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
feedforward_call_list = []
feedforward_layer_cfg = {
'activation': 'relu',
'call_list': feedforward_call_list,
}
feedforward_layer = ValidatedFeedforwardLayer(**feedforward_layer_cfg)
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
feedforward_cls=feedforward_layer,
num_attention_heads=10,
inner_dim=None,
inner_activation=None)
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
self.assertNotEmpty(feedforward_call_list)
self.assertTrue(feedforward_call_list[0],
"The passed layer class wasn't instantiated.")
def test_layer_invocation_with_mask(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu')
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
def test_layer_invocation_with_float16_dtype(self):
tf.keras.mixed_precision.set_global_policy('mixed_float16')
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu')
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = (10 * np.random.random_sample(
(batch_size, sequence_length, width)))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
def test_transform_with_initializer(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list())
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0])
def test_layer_restoration_from_config(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
'name': 'test_layer',
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu')
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
pre_serialization_output = model.predict([input_data, mask_data])
# Serialize the model config. Pass the serialized data through json to
# ensure that we can serialize this layer to disk.
serialized_data = model.get_config()
# Create a new model from the old config, and copy the weights. These models
# should have identical outputs.
new_model = tf.keras.Model.from_config(serialized_data)
new_model.set_weights(model.get_weights())
output = new_model.predict([input_data, mask_data])
self.assertAllClose(pre_serialization_output, output)
# If the layer was configured correctly, it should have a list attribute
# (since it should have the custom class and config passed to it).
new_model.summary()
new_call_list = new_model.get_layer(
name='transformer_scaffold')._attention_layer.list
self.assertNotEmpty(new_call_list)
self.assertTrue(new_call_list[0],
"The passed layer class wasn't instantiated.")
def test_layer_with_feedforward_cls_restoration_from_config(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
'name': 'test_layer',
}
feedforward_call_list = []
feedforward_layer_cfg = {
'activation': 'relu',
'call_list': feedforward_call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
feedforward_cls=ValidatedFeedforwardLayer,
feedforward_cfg=feedforward_layer_cfg,
num_attention_heads=10,
inner_dim=None,
inner_activation=None)
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
pre_serialization_output = model.predict([input_data, mask_data])
serialized_data = model.get_config()
# Create a new model from the old config, and copy the weights. These models
# should have identical outputs.
new_model = tf.keras.Model.from_config(serialized_data)
new_model.set_weights(model.get_weights())
output = new_model.predict([input_data, mask_data])
self.assertAllClose(pre_serialization_output, output)
# If the layer was configured correctly, it should have a list attribute
# (since it should have the custom class and config passed to it).
new_model.summary()
new_call_list = new_model.get_layer(
name='transformer_scaffold')._attention_layer.list
self.assertNotEmpty(new_call_list)
self.assertTrue(new_call_list[0],
"The passed layer class wasn't instantiated.")
new_feedforward_call_list = new_model.get_layer(
name='transformer_scaffold')._feedforward_block.list
self.assertNotEmpty(new_feedforward_call_list)
self.assertTrue(new_feedforward_call_list[0],
"The passed layer class wasn't instantiated.")
if __name__ == '__main__':
tf.test.main()
| 19,910 | 38.349802 | 80 | py |
models | models-master/official/nlp/modeling/layers/masked_softmax.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based softmax layer with optional masking."""
# pylint: disable=g-classes-have-attributes
import tensorflow as tf
def _large_compatible_negative(tensor_type):
"""Large negative number as Tensor.
This function is necessary because the standard value for epsilon
in this module (-1e9) cannot be represented using `tf.float16`.
Args:
tensor_type: A dtype to determine the type.
Returns:
A large negative number.
"""
if tensor_type == tf.float16:
return tf.float16.min
return -1e9
@tf.keras.utils.register_keras_serializable(package='Text')
class MaskedSoftmax(tf.keras.layers.Layer):
"""Performs a softmax with optional masking on a tensor.
Args:
mask_expansion_axes: Any axes that should be padded on the mask tensor.
normalization_axes: On which axes the softmax should perform.
"""
def __init__(self,
mask_expansion_axes=None,
normalization_axes=None,
**kwargs):
self._mask_expansion_axes = mask_expansion_axes
if normalization_axes is None:
self._normalization_axes = (-1,)
else:
self._normalization_axes = normalization_axes
super().__init__(**kwargs)
def call(self, scores, mask=None):
if mask is not None:
for _ in range(len(scores.shape) - len(mask.shape)):
mask = tf.expand_dims(mask, axis=self._mask_expansion_axes)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -1.e9 for masked positions.
adder = (1.0 - tf.cast(mask, scores.dtype)) * _large_compatible_negative(
scores.dtype)
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
scores += adder
if len(self._normalization_axes) == 1:
return tf.nn.softmax(scores, axis=self._normalization_axes[0])
else:
return tf.math.exp(scores - tf.math.reduce_logsumexp(
scores, axis=self._normalization_axes, keepdims=True))
def get_config(self):
config = {
'mask_expansion_axes': self._mask_expansion_axes,
'normalization_axes': self._normalization_axes
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| 2,990 | 33.77907 | 79 | py |
models | models-master/official/nlp/modeling/layers/multi_channel_attention_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for projects.nhnet.multi_channel_attention."""
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import multi_channel_attention
class MultiChannelAttentionTest(tf.test.TestCase):
def test_doc_attention(self):
num_heads = 2
doc_attention = multi_channel_attention.VotingAttention(
num_heads, head_size=8)
num_docs = 3
inputs = np.zeros((2, num_docs, 10, 16), dtype=np.float32)
doc_mask = np.zeros((2, num_docs), dtype=np.float32)
outputs = doc_attention(inputs, doc_mask)
self.assertEqual(outputs.shape, (2, num_docs))
def test_multi_channel_attention(self):
num_heads = 2
num_docs = 5
attention_layer = multi_channel_attention.MultiChannelAttention(
num_heads, key_dim=2)
from_data = 10 * np.random.random_sample((3, 4, 8))
to_data = 10 * np.random.random_sample((3, num_docs, 2, 8))
mask_data = np.random.randint(2, size=(3, num_docs, 4, 2))
doc_probs = np.random.randint(
2, size=(3, num_heads, 4, num_docs)).astype(float)
outputs = attention_layer(
query=from_data,
value=to_data,
context_attention_weights=doc_probs,
attention_mask=mask_data)
self.assertEqual(outputs.shape, (3, 4, 8))
if __name__ == "__main__":
tf.test.main()
| 1,912 | 33.160714 | 74 | py |
models | models-master/official/nlp/modeling/layers/masked_lm_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for masked language model network."""
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import masked_lm
from official.nlp.modeling.networks import bert_encoder
class MaskedLMTest(tf.test.TestCase):
def create_layer(self,
vocab_size,
hidden_size,
output='predictions',
xformer_stack=None):
# First, create a transformer stack that we can use to get the LM's
# vocabulary weight.
if xformer_stack is None:
xformer_stack = bert_encoder.BertEncoder(
vocab_size=vocab_size,
num_layers=1,
hidden_size=hidden_size,
num_attention_heads=4,
)
# Create a maskedLM from the transformer stack.
test_layer = masked_lm.MaskedLM(
embedding_table=xformer_stack.get_embedding_table(), output=output)
return test_layer
def test_layer_creation(self):
vocab_size = 100
sequence_length = 32
hidden_size = 64
num_predictions = 21
test_layer = self.create_layer(
vocab_size=vocab_size, hidden_size=hidden_size)
# Make sure that the output tensor of the masked LM is the right shape.
lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size))
masked_positions = tf.keras.Input(shape=(num_predictions,), dtype=tf.int32)
output = test_layer(lm_input_tensor, masked_positions=masked_positions)
expected_output_shape = [None, num_predictions, vocab_size]
self.assertEqual(expected_output_shape, output.shape.as_list())
def test_layer_invocation_with_external_logits(self):
vocab_size = 100
sequence_length = 32
hidden_size = 64
num_predictions = 21
xformer_stack = bert_encoder.BertEncoder(
vocab_size=vocab_size,
num_layers=1,
hidden_size=hidden_size,
num_attention_heads=4,
)
test_layer = self.create_layer(
vocab_size=vocab_size,
hidden_size=hidden_size,
xformer_stack=xformer_stack,
output='predictions')
logit_layer = self.create_layer(
vocab_size=vocab_size,
hidden_size=hidden_size,
xformer_stack=xformer_stack,
output='logits')
# Create a model from the masked LM layer.
lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size))
masked_positions = tf.keras.Input(shape=(num_predictions,), dtype=tf.int32)
output = test_layer(lm_input_tensor, masked_positions)
logit_output = logit_layer(lm_input_tensor, masked_positions)
logit_output = tf.keras.layers.Activation(tf.nn.log_softmax)(logit_output)
logit_layer.set_weights(test_layer.get_weights())
model = tf.keras.Model([lm_input_tensor, masked_positions], output)
logits_model = tf.keras.Model(([lm_input_tensor, masked_positions]),
logit_output)
# Invoke the masked LM on some fake data to make sure there are no runtime
# errors in the code.
batch_size = 3
lm_input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, hidden_size))
masked_position_data = np.random.randint(
sequence_length, size=(batch_size, num_predictions))
# ref_outputs = model.predict([lm_input_data, masked_position_data])
# outputs = logits_model.predict([lm_input_data, masked_position_data])
ref_outputs = model([lm_input_data, masked_position_data])
outputs = logits_model([lm_input_data, masked_position_data])
# Ensure that the tensor shapes are correct.
expected_output_shape = (batch_size, num_predictions, vocab_size)
self.assertEqual(expected_output_shape, ref_outputs.shape)
self.assertEqual(expected_output_shape, outputs.shape)
self.assertAllClose(ref_outputs, outputs)
def test_layer_invocation(self):
vocab_size = 100
sequence_length = 32
hidden_size = 64
num_predictions = 21
test_layer = self.create_layer(
vocab_size=vocab_size, hidden_size=hidden_size)
# Create a model from the masked LM layer.
lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size))
masked_positions = tf.keras.Input(shape=(num_predictions,), dtype=tf.int32)
output = test_layer(lm_input_tensor, masked_positions)
model = tf.keras.Model([lm_input_tensor, masked_positions], output)
# Invoke the masked LM on some fake data to make sure there are no runtime
# errors in the code.
batch_size = 3
lm_input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, hidden_size))
masked_position_data = np.random.randint(
2, size=(batch_size, num_predictions))
_ = model.predict([lm_input_data, masked_position_data])
def test_unknown_output_type_fails(self):
with self.assertRaisesRegex(ValueError, 'Unknown `output` value "bad".*'):
_ = self.create_layer(vocab_size=8, hidden_size=8, output='bad')
if __name__ == '__main__':
tf.test.main()
| 5,551 | 37.825175 | 79 | py |
models | models-master/official/nlp/modeling/layers/attention.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based attention layer."""
# pylint: disable=g-classes-have-attributes
import math
import tensorflow as tf
EinsumDense = tf.keras.layers.EinsumDense
MultiHeadAttention = tf.keras.layers.MultiHeadAttention
@tf.keras.utils.register_keras_serializable(package="Text")
class CachedAttention(tf.keras.layers.MultiHeadAttention):
"""Attention layer with cache used for autoregressive decoding.
Arguments are the same as `tf.keras.layers.MultiHeadAttention` layer.
"""
def _update_cache(self, key, value, cache, decode_loop_step):
"""Updates cache states and gets full-length key/value tensors."""
# Combines cached keys and values with new keys and values.
if decode_loop_step is not None:
# TPU special case.
key_seq_dim = cache["key"].shape.as_list()[1]
indices = tf.reshape(
tf.one_hot(decode_loop_step, key_seq_dim, dtype=key.dtype),
[1, key_seq_dim, 1, 1])
key = cache["key"] + key * indices
value_seq_dim = cache["value"].shape.as_list()[1]
indices = tf.reshape(
tf.one_hot(decode_loop_step, value_seq_dim, dtype=value.dtype),
[1, value_seq_dim, 1, 1])
value = cache["value"] + value * indices
else:
key = tf.concat([tf.cast(cache["key"], key.dtype), key], axis=1)
value = tf.concat([tf.cast(cache["value"], value.dtype), value], axis=1)
# Update cache
cache["key"] = key
cache["value"] = value
return key, value
def call(self,
query,
value,
key=None,
attention_mask=None,
cache=None,
decode_loop_step=None,
return_attention_scores=False):
if not self._built_from_signature:
self._build_from_signature(query=query, value=value, key=key)
if key is None:
key = value
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
# `query` = [B, F, N ,H]
query = self._query_dense(query)
# `key` = [B, T, N, H]
key = self._key_dense(key)
# `value` = [B, T, N, H]
value = self._value_dense(value)
if cache:
key, value = self._update_cache(key, value, cache, decode_loop_step)
query = tf.multiply(query, 1.0 / math.sqrt(float(self._key_dim)))
# Take the dot product between "query" and "key" to get the raw
# attention scores.
attention_scores = tf.einsum(self._dot_product_equation, key, query)
# Normalize the attention scores to probabilities.
# `attention_scores` = [B, N, F, T]
attention_scores = self._masked_softmax(attention_scores, attention_mask)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_scores = self._dropout_layer(attention_scores)
# `context_layer` = [B, F, N, H]
attention_output = tf.einsum(self._combine_equation, attention_scores,
value)
attention_output = self._output_dense(attention_output)
if return_attention_scores:
return attention_output, attention_scores, cache
return attention_output, cache
| 3,896 | 35.083333 | 78 | py |
models | models-master/official/nlp/modeling/layers/text_layers_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests bert.text_layers."""
import os
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow import estimator as tf_estimator
from sentencepiece import SentencePieceTrainer
from official.nlp.modeling.layers import text_layers
# This test covers the in-process behavior of a BertTokenizer layer.
# For saving, restoring, and the restored behavior (incl. shape inference),
# see nlp/tools/export_tfhub_lib_test.py.
class BertTokenizerTest(tf.test.TestCase):
def _make_vocab_file(self, vocab, filename="vocab.txt"):
path = os.path.join(
tempfile.mkdtemp(dir=self.get_temp_dir()), # New subdir each time.
filename)
with tf.io.gfile.GFile(path, "w") as f:
f.write("\n".join(vocab + [""]))
return path
def test_uncased(self):
vocab_file = self._make_vocab_file(
["[PAD]", "[UNK]", "[CLS]", "[SEP]", "d", "##ef", "abc", "xy"])
bert_tokenize = text_layers.BertTokenizer(
vocab_file=vocab_file, lower_case=True)
inputs = tf.constant(["abc def", "ABC DEF d"])
token_ids = bert_tokenize(inputs)
self.assertAllEqual(token_ids, tf.ragged.constant([[[6], [4, 5]],
[[6], [4, 5], [4]]]))
bert_tokenize.tokenize_with_offsets = True
token_ids_2, start_offsets, limit_offsets = bert_tokenize(inputs)
self.assertAllEqual(token_ids, token_ids_2)
self.assertAllEqual(start_offsets, tf.ragged.constant([[[0], [4, 5]],
[[0], [4, 5], [8]]]))
self.assertAllEqual(limit_offsets, tf.ragged.constant([[[3], [5, 7]],
[[3], [5, 7], [9]]]))
self.assertEqual(bert_tokenize.vocab_size.numpy(), 8)
# Repeat the above and test that case matters with lower_case=False.
def test_cased(self):
vocab_file = self._make_vocab_file(
["[PAD]", "[UNK]", "[CLS]", "[SEP]", "d", "##ef", "abc", "ABC"])
bert_tokenize = text_layers.BertTokenizer(
vocab_file=vocab_file, lower_case=False, tokenize_with_offsets=True)
inputs = tf.constant(["abc def", "ABC DEF"])
token_ids, start_offsets, limit_offsets = bert_tokenize(inputs)
self.assertAllEqual(token_ids, tf.ragged.constant([[[6], [4, 5]],
[[7], [1]]]))
self.assertAllEqual(start_offsets, tf.ragged.constant([[[0], [4, 5]],
[[0], [4]]]))
self.assertAllEqual(limit_offsets, tf.ragged.constant([[[3], [5, 7]],
[[3], [7]]]))
def test_special_tokens_complete(self):
vocab_file = self._make_vocab_file(
["foo", "[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]", "xy"])
bert_tokenize = text_layers.BertTokenizer(
vocab_file=vocab_file, lower_case=True)
self.assertDictEqual(bert_tokenize.get_special_tokens_dict(),
dict(padding_id=1,
start_of_sequence_id=3,
end_of_segment_id=4,
mask_id=5,
vocab_size=7))
def test_special_tokens_partial(self):
vocab_file = self._make_vocab_file(
["[PAD]", "[CLS]", "[SEP]"])
bert_tokenize = text_layers.BertTokenizer(
vocab_file=vocab_file, lower_case=True)
self.assertDictEqual(bert_tokenize.get_special_tokens_dict(),
dict(padding_id=0,
start_of_sequence_id=1,
end_of_segment_id=2,
vocab_size=3)) # No mask_id,
def test_special_tokens_in_estimator(self):
"""Tests getting special tokens without an Eager init context."""
vocab_file = self._make_vocab_file(
["[PAD]", "[UNK]", "[CLS]", "[SEP]", "d", "##ef", "abc", "xy"])
def input_fn():
with tf.init_scope():
self.assertFalse(tf.executing_eagerly())
# Build a preprocessing Model.
sentences = tf.keras.layers.Input(shape=[], dtype=tf.string)
bert_tokenizer = text_layers.BertTokenizer(
vocab_file=vocab_file, lower_case=True)
special_tokens_dict = bert_tokenizer.get_special_tokens_dict()
for k, v in special_tokens_dict.items():
self.assertIsInstance(v, int, "Unexpected type for {}".format(k))
tokens = bert_tokenizer(sentences)
packed_inputs = text_layers.BertPackInputs(
4, special_tokens_dict=special_tokens_dict)(tokens)
preprocessing = tf.keras.Model(sentences, packed_inputs)
# Map the dataset.
ds = tf.data.Dataset.from_tensors(
(tf.constant(["abc", "DEF"]), tf.constant([0, 1])))
ds = ds.map(lambda features, labels: (preprocessing(features), labels))
return ds
def model_fn(features, labels, mode):
del labels # Unused.
return tf_estimator.EstimatorSpec(mode=mode,
predictions=features["input_word_ids"])
estimator = tf_estimator.Estimator(model_fn=model_fn)
outputs = list(estimator.predict(input_fn))
self.assertAllEqual(outputs, np.array([[2, 6, 3, 0],
[2, 4, 5, 3]]))
# This test covers the in-process behavior of a SentencepieceTokenizer layer.
class SentencepieceTokenizerTest(tf.test.TestCase):
def setUp(self):
super().setUp()
# Make a sentencepiece model.
tmp_dir = self.get_temp_dir()
tempfile.mkdtemp(dir=tmp_dir)
vocab = ["a", "b", "c", "d", "e", "abc", "def", "ABC", "DEF"]
model_prefix = os.path.join(tmp_dir, "spm_model")
input_text_file_path = os.path.join(tmp_dir, "train_input.txt")
with tf.io.gfile.GFile(input_text_file_path, "w") as f:
f.write(" ".join(vocab + ["\n"]))
# Add 7 more tokens: <pad>, <unk>, [CLS], [SEP], [MASK], <s>, </s>.
full_vocab_size = len(vocab) + 7
flags = dict(
model_prefix=model_prefix,
model_type="word",
input=input_text_file_path,
pad_id=0, unk_id=1, control_symbols="[CLS],[SEP],[MASK]",
vocab_size=full_vocab_size,
bos_id=full_vocab_size-2, eos_id=full_vocab_size-1)
SentencePieceTrainer.Train(
" ".join(["--{}={}".format(k, v) for k, v in flags.items()]))
self._spm_path = model_prefix + ".model"
def test_uncased(self):
sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(
model_file_path=self._spm_path, lower_case=True, nbest_size=0)
inputs = tf.constant(["abc def", "ABC DEF d"])
token_ids = sentencepiece_tokenizer(inputs)
self.assertAllEqual(
token_ids,
tf.ragged.constant([[8, 12], [8, 12, 11]]))
sentencepiece_tokenizer.tokenize_with_offsets = True
token_ids_2, start_offsets, limit_offsets = sentencepiece_tokenizer(inputs)
self.assertAllEqual(token_ids, token_ids_2)
self.assertAllEqual(
start_offsets, tf.ragged.constant([[0, 3], [0, 3, 7]]))
self.assertAllEqual(
limit_offsets, tf.ragged.constant([[3, 7], [3, 7, 9]]))
self.assertEqual(sentencepiece_tokenizer.vocab_size.numpy(), 16)
# Repeat the above and test that case matters with lower_case=False.
def test_cased(self):
sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(
model_file_path=self._spm_path,
lower_case=False,
nbest_size=0,
tokenize_with_offsets=False)
inputs = tf.constant(["abc def", "ABC DEF d"])
token_ids = sentencepiece_tokenizer(inputs)
self.assertAllEqual(
token_ids,
tf.ragged.constant([[8, 12], [5, 6, 11]]))
sentencepiece_tokenizer.tokenize_with_offsets = True
token_ids_2, start_offsets, limit_offsets = sentencepiece_tokenizer(inputs)
self.assertAllEqual(token_ids, token_ids_2)
self.assertAllEqual(
start_offsets,
tf.ragged.constant([[0, 3], [0, 3, 7]]))
self.assertAllEqual(
limit_offsets,
tf.ragged.constant([[3, 7], [3, 7, 9]]))
def test_special_tokens(self):
sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(
model_file_path=self._spm_path, lower_case=True, nbest_size=0)
self.assertDictEqual(sentencepiece_tokenizer.get_special_tokens_dict(),
dict(padding_id=0,
start_of_sequence_id=2,
end_of_segment_id=3,
mask_id=4,
vocab_size=16))
def test_special_tokens_in_estimator(self):
"""Tests getting special tokens without an Eager init context."""
def input_fn():
with tf.init_scope():
self.assertFalse(tf.executing_eagerly())
# Build a preprocessing Model.
sentences = tf.keras.layers.Input(shape=[], dtype=tf.string)
sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(
model_file_path=self._spm_path, lower_case=True, nbest_size=0)
special_tokens_dict = sentencepiece_tokenizer.get_special_tokens_dict()
for k, v in special_tokens_dict.items():
self.assertIsInstance(v, int, "Unexpected type for {}".format(k))
tokens = sentencepiece_tokenizer(sentences)
packed_inputs = text_layers.BertPackInputs(
4, special_tokens_dict=special_tokens_dict)(tokens)
preprocessing = tf.keras.Model(sentences, packed_inputs)
# Map the dataset.
ds = tf.data.Dataset.from_tensors(
(tf.constant(["abc", "DEF"]), tf.constant([0, 1])))
ds = ds.map(lambda features, labels: (preprocessing(features), labels))
return ds
def model_fn(features, labels, mode):
del labels # Unused.
return tf_estimator.EstimatorSpec(mode=mode,
predictions=features["input_word_ids"])
estimator = tf_estimator.Estimator(model_fn=model_fn)
outputs = list(estimator.predict(input_fn))
self.assertAllEqual(outputs, np.array([[2, 8, 3, 0],
[2, 12, 3, 0]]))
def test_strip_diacritics(self):
sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(
model_file_path=self._spm_path,
lower_case=True,
nbest_size=0,
strip_diacritics=True)
inputs = tf.constant(["a b c d e", "ă ḅ č ḓ é"])
token_ids = sentencepiece_tokenizer(inputs)
self.assertAllEqual(
token_ids,
tf.ragged.constant([[7, 9, 10, 11, 13], [7, 9, 10, 11, 13]]))
def test_fail_on_tokenize_with_offsets_and_strip_diacritics(self):
# Raise an error in init().
with self.assertRaises(ValueError):
text_layers.SentencepieceTokenizer(
model_file_path=self._spm_path,
tokenize_with_offsets=True,
lower_case=True,
nbest_size=0,
strip_diacritics=True)
sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(
model_file_path=self._spm_path,
lower_case=True,
nbest_size=0,
strip_diacritics=True)
sentencepiece_tokenizer.tokenize_with_offsets = True
# Raise an error in call():
inputs = tf.constant(["abc def", "ABC DEF d", "Äffin"])
with self.assertRaises(ValueError):
sentencepiece_tokenizer(inputs)
def test_serialize_deserialize(self):
self.skipTest("b/170480226")
sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(
model_file_path=self._spm_path,
lower_case=False,
nbest_size=0,
tokenize_with_offsets=False,
name="sentencepiece_tokenizer_layer")
config = sentencepiece_tokenizer.get_config()
new_tokenizer = text_layers.SentencepieceTokenizer.from_config(config)
self.assertEqual(config, new_tokenizer.get_config())
inputs = tf.constant(["abc def", "ABC DEF d"])
token_ids = sentencepiece_tokenizer(inputs)
token_ids_2 = new_tokenizer(inputs)
self.assertAllEqual(token_ids, token_ids_2)
# TODO(b/170480226): Remove once tf_hub_export_lib_test.py covers saving.
def test_saving(self):
sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(
model_file_path=self._spm_path, lower_case=True, nbest_size=0)
inputs = tf.keras.layers.Input([], dtype=tf.string)
outputs = sentencepiece_tokenizer(inputs)
model = tf.keras.Model(inputs, outputs)
export_path = tempfile.mkdtemp(dir=self.get_temp_dir())
model.save(export_path, signatures={})
class BertPackInputsTest(tf.test.TestCase):
def test_round_robin_correct_outputs(self):
bpi = text_layers.BertPackInputs(
10,
start_of_sequence_id=1001,
end_of_segment_id=1002,
padding_id=999,
truncator="round_robin")
# Single input, rank 2.
bert_inputs = bpi(
tf.ragged.constant([[11, 12, 13],
[21, 22, 23, 24, 25, 26, 27, 28, 29, 30]]))
self.assertAllEqual(
bert_inputs["input_word_ids"],
tf.constant([[1001, 11, 12, 13, 1002, 999, 999, 999, 999, 999],
[1001, 21, 22, 23, 24, 25, 26, 27, 28, 1002]]))
self.assertAllEqual(
bert_inputs["input_mask"],
tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]))
self.assertAllEqual(
bert_inputs["input_type_ids"],
tf.constant([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))
# Two inputs, rank 3. Truncation does not respect word boundaries.
bert_inputs = bpi([
tf.ragged.constant([[[111], [112, 113]],
[[121, 122, 123], [124, 125, 126], [127, 128]]]),
tf.ragged.constant([[[211, 212], [213]],
[[221, 222], [223, 224, 225], [226, 227, 228]]])
])
self.assertAllEqual(
bert_inputs["input_word_ids"],
tf.constant([[1001, 111, 112, 113, 1002, 211, 212, 213, 1002, 999],
[1001, 121, 122, 123, 124, 1002, 221, 222, 223, 1002]]))
self.assertAllEqual(
bert_inputs["input_mask"],
tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]))
self.assertAllEqual(
bert_inputs["input_type_ids"],
tf.constant([[0, 0, 0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1]]))
# Three inputs. rank 3.
bert_inputs = bpi([
tf.ragged.constant([[[111], [112, 113]],
[[121, 122, 123], [124, 125, 126], [127, 128]]]),
tf.ragged.constant([[[211, 212], [213]],
[[221, 222], [223, 224, 225], [226, 227, 228]]]),
tf.ragged.constant([[[311, 312], [313]],
[[321, 322], [323, 324, 325], [326, 327, 328]]])
])
self.assertAllEqual(
bert_inputs["input_word_ids"],
tf.constant([[1001, 111, 112, 1002, 211, 212, 1002, 311, 312, 1002],
[1001, 121, 122, 1002, 221, 222, 1002, 321, 322, 1002]]))
def test_waterfall_correct_outputs(self):
bpi = text_layers.BertPackInputs(
10,
start_of_sequence_id=1001,
end_of_segment_id=1002,
padding_id=999,
truncator="waterfall")
# Single input, rank 2.
bert_inputs = bpi(
tf.ragged.constant([[11, 12, 13],
[21, 22, 23, 24, 25, 26, 27, 28, 29, 30]]))
self.assertAllEqual(
bert_inputs["input_word_ids"],
tf.constant([[1001, 11, 12, 13, 1002, 999, 999, 999, 999, 999],
[1001, 21, 22, 23, 24, 25, 26, 27, 28, 1002]]))
self.assertAllEqual(
bert_inputs["input_mask"],
tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]))
self.assertAllEqual(
bert_inputs["input_type_ids"],
tf.constant([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))
# Two inputs, rank 3. Truncation does not respect word boundaries.
bert_inputs = bpi([
tf.ragged.constant([[[111], [112, 113]],
[[121, 122, 123], [124, 125, 126], [127, 128]]]),
tf.ragged.constant([[[211, 212], [213]],
[[221, 222], [223, 224, 225], [226, 227, 228]]])
])
self.assertAllEqual(
bert_inputs["input_word_ids"],
tf.constant([[1001, 111, 112, 113, 1002, 211, 212, 213, 1002, 999],
[1001, 121, 122, 123, 124, 125, 126, 127, 1002, 1002]]))
self.assertAllEqual(
bert_inputs["input_mask"],
tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]))
self.assertAllEqual(
bert_inputs["input_type_ids"],
tf.constant([[0, 0, 0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]))
# Three inputs, rank 3. Truncation does not respect word boundaries.
bert_inputs = bpi([
tf.ragged.constant([[[111], [112, 113]],
[[121, 122, 123], [124, 125, 126], [127, 128]]]),
tf.ragged.constant([[[211], [212]],
[[221, 222], [223, 224, 225], [226, 227, 228]]]),
tf.ragged.constant([[[311, 312], [313]],
[[321, 322], [323, 324, 325], [326, 327]]])
])
self.assertAllEqual(
bert_inputs["input_word_ids"],
tf.constant([[1001, 111, 112, 113, 1002, 211, 212, 1002, 311, 1002],
[1001, 121, 122, 123, 124, 125, 126, 1002, 1002, 1002]]))
self.assertAllEqual(
bert_inputs["input_mask"],
tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]))
self.assertAllEqual(
bert_inputs["input_type_ids"],
tf.constant([[0, 0, 0, 0, 0, 1, 1, 1, 2, 2],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 2]]))
def test_special_tokens_dict(self):
special_tokens_dict = dict(start_of_sequence_id=1001,
end_of_segment_id=1002,
padding_id=999,
extraneous_key=666)
bpi = text_layers.BertPackInputs(10,
special_tokens_dict=special_tokens_dict)
bert_inputs = bpi(
tf.ragged.constant([[11, 12, 13],
[21, 22, 23, 24, 25, 26, 27, 28, 29, 30]]))
self.assertAllEqual(
bert_inputs["input_word_ids"],
tf.constant([[1001, 11, 12, 13, 1002, 999, 999, 999, 999, 999],
[1001, 21, 22, 23, 24, 25, 26, 27, 28, 1002]]))
# This test covers the in-process behavior of FastWordpieceBertTokenizer layer.
class FastWordPieceBertTokenizerTest(tf.test.TestCase):
def _make_vocab_file(self, vocab, filename="vocab.txt"):
path = os.path.join(
tempfile.mkdtemp(dir=self.get_temp_dir()), # New subdir each time.
filename)
with tf.io.gfile.GFile(path, "w") as f:
f.write("\n".join(vocab + [""]))
return path
def test_uncased(self):
vocab_file = self._make_vocab_file(
["[PAD]", "[UNK]", "[CLS]", "[SEP]", "d", "##ef", "abc", "xy"])
bert_tokenize = text_layers.FastWordpieceBertTokenizer(
vocab_file=vocab_file, lower_case=True)
inputs = tf.constant(["abc def", "ABC DEF d"])
token_ids = bert_tokenize(inputs)
self.assertAllEqual(token_ids, tf.ragged.constant([[[6], [4, 5]],
[[6], [4, 5], [4]]]))
bert_tokenize.tokenize_with_offsets = True
token_ids_2, start_offsets, limit_offsets = bert_tokenize(inputs)
self.assertAllEqual(token_ids, token_ids_2)
self.assertAllEqual(start_offsets, tf.ragged.constant([[[0], [4, 5]],
[[0], [4, 5], [8]]]))
self.assertAllEqual(limit_offsets, tf.ragged.constant([[[3], [5, 7]],
[[3], [5, 7], [9]]]))
self.assertEqual(bert_tokenize.vocab_size, 8)
# Repeat the above and test that case matters with lower_case=False.
def test_cased(self):
vocab_file = self._make_vocab_file(
["[PAD]", "[UNK]", "[CLS]", "[SEP]", "d", "##ef", "abc", "ABC"])
bert_tokenize = text_layers.FastWordpieceBertTokenizer(
vocab_file=vocab_file, lower_case=False, tokenize_with_offsets=True)
inputs = tf.constant(["abc def", "ABC DEF"])
token_ids, start_offsets, limit_offsets = bert_tokenize(inputs)
self.assertAllEqual(token_ids, tf.ragged.constant([[[6], [4, 5]],
[[7], [1]]]))
self.assertAllEqual(start_offsets, tf.ragged.constant([[[0], [4, 5]],
[[0], [4]]]))
self.assertAllEqual(limit_offsets, tf.ragged.constant([[[3], [5, 7]],
[[3], [7]]]))
def test_special_tokens_complete(self):
vocab_file = self._make_vocab_file(
["foo", "[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]", "xy"])
bert_tokenize = text_layers.FastWordpieceBertTokenizer(
vocab_file=vocab_file, lower_case=True)
self.assertDictEqual(bert_tokenize.get_special_tokens_dict(),
dict(padding_id=1,
start_of_sequence_id=3,
end_of_segment_id=4,
mask_id=5,
vocab_size=7))
def test_special_tokens_partial(self):
# [UNK] token is required by fast wordpiece tokenizer.
vocab_file = self._make_vocab_file(
["[PAD]", "[CLS]", "[SEP]", "[UNK]"])
bert_tokenize = text_layers.FastWordpieceBertTokenizer(
vocab_file=vocab_file, lower_case=True)
self.assertDictEqual(bert_tokenize.get_special_tokens_dict(),
dict(padding_id=0,
start_of_sequence_id=1,
end_of_segment_id=2,
vocab_size=4)) # No mask_id,
def test_special_tokens_in_estimator(self):
"""Tests getting special tokens without an Eager init context."""
vocab_file = self._make_vocab_file(
["[PAD]", "[UNK]", "[CLS]", "[SEP]", "d", "##ef", "abc", "xy"])
def input_fn():
with tf.init_scope():
self.assertFalse(tf.executing_eagerly())
# Build a preprocessing Model.
sentences = tf.keras.layers.Input(shape=[], dtype=tf.string)
bert_tokenizer = text_layers.FastWordpieceBertTokenizer(
vocab_file=vocab_file, lower_case=True)
special_tokens_dict = bert_tokenizer.get_special_tokens_dict()
for k, v in special_tokens_dict.items():
self.assertIsInstance(v, int, "Unexpected type for {}".format(k))
tokens = bert_tokenizer(sentences)
packed_inputs = text_layers.BertPackInputs(
4, special_tokens_dict=special_tokens_dict)(tokens)
preprocessing = tf.keras.Model(sentences, packed_inputs)
# Map the dataset.
ds = tf.data.Dataset.from_tensors(
(tf.constant(["abc", "DEF"]), tf.constant([0, 1])))
ds = ds.map(lambda features, labels: (preprocessing(features), labels))
return ds
def model_fn(features, labels, mode):
del labels # Unused.
return tf_estimator.EstimatorSpec(mode=mode,
predictions=features["input_word_ids"])
estimator = tf_estimator.Estimator(model_fn=model_fn)
outputs = list(estimator.predict(input_fn))
self.assertAllEqual(outputs, np.array([[2, 6, 3, 0],
[2, 4, 5, 3]]))
if __name__ == "__main__":
tf.test.main()
| 24,237 | 42.90942 | 80 | py |
models | models-master/official/nlp/modeling/layers/mat_mul_with_margin_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mat_mul_with_margin layer."""
import tensorflow as tf
from official.nlp.modeling.layers import mat_mul_with_margin
class MatMulWithMarginTest(tf.test.TestCase):
def test_layer_invocation(self):
"""Validate that the Keras object can be created and invoked."""
input_width = 512
test_layer = mat_mul_with_margin.MatMulWithMargin()
# Create a 2-dimensional input (the first dimension is implicit).
left_encoded = tf.keras.Input(shape=(input_width,), dtype=tf.float32)
right_encoded = tf.keras.Input(shape=(input_width,), dtype=tf.float32)
left_logits, right_logits = test_layer(left_encoded, right_encoded)
# Validate that the outputs are of the expected shape.
expected_output_shape = [None, None]
self.assertEqual(expected_output_shape, left_logits.shape.as_list())
self.assertEqual(expected_output_shape, right_logits.shape.as_list())
def test_serialize_deserialize(self):
# Create a layer object that sets all of its config options.
layer = mat_mul_with_margin.MatMulWithMargin()
# Create another layer object from the first object's config.
new_layer = mat_mul_with_margin.MatMulWithMargin.from_config(
layer.get_config())
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(layer.get_config(), new_layer.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,022 | 37.903846 | 79 | py |
models | models-master/official/nlp/modeling/layers/masked_softmax_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-based masked softmax layer."""
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import masked_softmax
class MaskedSoftmaxLayerTest(tf.test.TestCase):
def test_non_masked_softmax(self):
test_layer = masked_softmax.MaskedSoftmax()
input_tensor = tf.keras.Input(shape=(4, 8))
output = test_layer(input_tensor)
model = tf.keras.Model(input_tensor, output)
input_data = 10 * np.random.random_sample((3, 4, 8))
output_data = model.predict(input_data)
expected_data = tf.nn.softmax(input_data)
self.assertAllClose(expected_data, output_data)
def test_masked_softmax(self):
test_layer = masked_softmax.MaskedSoftmax()
input_tensor = tf.keras.Input(shape=(4, 8))
mask_tensor = tf.keras.Input(shape=(4, 8))
output = test_layer(input_tensor, mask_tensor)
model = tf.keras.Model([input_tensor, mask_tensor], output)
input_data = 10 * np.random.random_sample((3, 4, 8))
mask_data = np.random.randint(2, size=(3, 4, 8))
output_data = model.predict([input_data, mask_data])
expected_zeros = np.greater(mask_data, 0)
is_zeros = np.greater(output_data, 0)
self.assertAllEqual(expected_zeros, is_zeros)
def test_masked_softmax_with_none_mask(self):
test_layer = masked_softmax.MaskedSoftmax()
input_tensor = tf.keras.Input(shape=(4, 8))
output = test_layer(input_tensor, None)
model = tf.keras.Model(input_tensor, output)
input_data = 10 * np.random.random_sample((3, 4, 8))
output_data = model.predict(input_data)
expected_data = tf.nn.softmax(input_data)
self.assertAllClose(expected_data, output_data)
def test_softmax_with_axes_expansion(self):
test_layer = masked_softmax.MaskedSoftmax(mask_expansion_axes=[1])
input_tensor = tf.keras.Input(shape=(4, 8))
mask_tensor = tf.keras.Input(shape=(8))
output = test_layer(input_tensor, mask_tensor)
model = tf.keras.Model([input_tensor, mask_tensor], output)
input_data = 10 * np.random.random_sample((3, 4, 8))
mask_data = np.random.randint(2, size=(3, 8))
output_data = model.predict([input_data, mask_data])
expanded_mask = np.expand_dims(mask_data, axis=1) * np.ones_like(input_data)
expected_zeros = np.greater(expanded_mask, 0)
is_zeros = np.greater(output_data, 0)
self.assertAllEqual(expected_zeros, is_zeros)
def test_masked_softmax_high_dims(self):
test_layer = masked_softmax.MaskedSoftmax(
mask_expansion_axes=[1], normalization_axes=[6, 7])
input_shape = [2, 3, 4, 5, 6, 7, 8]
mask_shape = [5, 6, 7, 8]
input_tensor = tf.keras.Input(shape=input_shape)
mask_tensor = tf.keras.Input(shape=mask_shape)
output = test_layer(input_tensor, mask_tensor)
model = tf.keras.Model([input_tensor, mask_tensor], output)
input_data = 10 * np.random.random_sample([3] + input_shape)
mask_data = np.random.randint(2, size=[3] + mask_shape)
output_data = model.predict([input_data, mask_data])
expanded_mask = np.expand_dims(mask_data, axis=1)
expanded_mask = np.expand_dims(expanded_mask, axis=1)
expanded_mask = np.expand_dims(
expanded_mask, axis=1) * np.ones_like(input_data)
expected_zeros = np.greater(expanded_mask, 0)
is_zeros = np.greater(output_data, 0)
self.assertAllEqual(expected_zeros, is_zeros)
def test_serialize_deserialize(self):
test_layer = masked_softmax.MaskedSoftmax(
mask_expansion_axes=[1], normalization_axes=[6, 7])
new_layer = masked_softmax.MaskedSoftmax.from_config(
test_layer.get_config())
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(test_layer.get_config(), new_layer.get_config())
if __name__ == '__main__':
tf.test.main()
| 4,393 | 38.232143 | 80 | py |
models | models-master/official/nlp/modeling/layers/transformer_xl_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Transformer XL."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from official.nlp.modeling.layers import transformer_xl
def create_mock_transformer_xl_data(
batch_size,
num_heads,
head_size,
hidden_size,
seq_length,
memory_length=0,
num_predictions=2,
two_stream=False,
num_layers=1,
include_biases=True,
include_state=False,
include_mask=False,
include_segment=False):
"""Creates mock testing data.
Args:
batch_size: `int`, the batch size.
num_heads: `int`, number of attention heads.
head_size: `int`, the size of each attention head.
hidden_size: `int`, the layer's hidden size.
seq_length: `int`, Sequence length of the input.
memory_length: optional `int`, the length of the state. Defaults to 0.
num_predictions: `int`, the number of predictions used in two stream
attention.
two_stream: `bool`, whether or not to generate two stream data.
num_layers: `int`, the number of Transformer XL blocks.
include_biases: optional `bool`, whether or not to include attention biases.
include_state: optional `bool`, whether or not to include state data.
include_mask: optional `bool`, whether or not to include mask data.
include_segment: optional `bool`, whether or not to include segment data.
Returns:
A dictionary with `str` as keys and `Tensor` as values.
"""
encoding_shape = (batch_size, seq_length * 2, hidden_size)
data = dict(
relative_position_encoding=tf.random.normal(shape=encoding_shape),
content_stream=tf.random.normal(
shape=(batch_size, seq_length, hidden_size)))
if include_biases:
attention_bias_shape = (num_heads, head_size)
data.update(dict(
content_attention_bias=tf.random.normal(shape=attention_bias_shape),
segment_attention_bias=tf.random.normal(shape=attention_bias_shape),
positional_attention_bias=tf.random.normal(shape=attention_bias_shape)))
if two_stream:
data.update(dict(
query_stream=tf.random.normal(
shape=(batch_size, num_predictions, hidden_size)),
target_mapping=tf.random.normal(
shape=(batch_size, num_predictions, seq_length))))
if include_state:
total_seq_length = seq_length + memory_length
if num_layers > 1:
state_shape = (num_layers, batch_size, memory_length, hidden_size)
else:
state_shape = (batch_size, memory_length, hidden_size)
data.update(dict(
state=tf.random.normal(shape=state_shape)))
else:
total_seq_length = seq_length
if include_mask:
mask_shape = (batch_size, num_heads, seq_length, total_seq_length)
mask_data = np.random.randint(2, size=mask_shape).astype("float32")
data["content_attention_mask"] = mask_data
if two_stream:
data["query_attention_mask"] = mask_data
if include_segment:
# A transformer XL block takes an individual segment "encoding" from the
# entirety of the Transformer XL segment "embedding".
if num_layers > 1:
segment_encoding_shape = (num_layers, 2, num_heads, head_size)
segment_encoding_name = "segment_embedding"
else:
segment_encoding_shape = (2, num_heads, head_size)
segment_encoding_name = "segment_encoding"
segment_matrix = np.random.randint(
2, size=(batch_size, seq_length, total_seq_length))
data["segment_matrix"] = tf.math.equal(segment_matrix, 1)
data[segment_encoding_name] = tf.random.normal(shape=segment_encoding_shape)
return data
class TransformerXLBlockTest(tf.test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(
memory_length=[0, 4],
two_stream=[True, False],
state=[True, False],
mask=[True, False],
segment=[True, False]))
def test_transformer_xl_block(
self,
two_stream,
memory_length,
state,
mask,
segment):
"""Tests combinations of Transformer XL block calculations."""
batch_size, num_heads, head_size, seq_length = 2, 12, 64, 8
hidden_size, num_predictions, inner_size = 24, 8, 12
data = create_mock_transformer_xl_data(
include_biases=True,
num_heads=num_heads,
head_size=head_size,
hidden_size=hidden_size,
seq_length=seq_length,
batch_size=batch_size,
memory_length=memory_length,
num_predictions=num_predictions,
two_stream=two_stream,
include_state=state,
include_mask=mask,
include_segment=segment)
test_layer = transformer_xl.TransformerXLBlock(
vocab_size=32000,
hidden_size=hidden_size,
num_attention_heads=num_heads,
head_size=head_size,
inner_size=inner_size,
dropout_rate=0.,
attention_dropout_rate=0.,
two_stream=two_stream)
output = test_layer(**data)
content_attention = output["content_attention"]
self.assertEqual(content_attention.shape,
[batch_size, seq_length, hidden_size])
if two_stream:
self.assertIn("query_attention", output)
self.assertEqual(output["query_attention"].shape,
[batch_size, num_predictions, hidden_size])
else:
self.assertNotIn("query_attention", output)
def test_get_config(self):
transformer_xl_block = transformer_xl.TransformerXLBlock(
vocab_size=32000,
head_size=64,
num_attention_heads=2,
hidden_size=10,
inner_size=50,
dropout_rate=0.,
attention_dropout_rate=0.,
two_stream=False)
transformer_xl_block_config = transformer_xl_block.get_config()
new_block = transformer_xl.TransformerXLBlock.from_config(
transformer_xl_block_config)
self.assertEqual(transformer_xl_block_config, new_block.get_config())
class TransformerXLTest(tf.test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(
two_stream=[True, False],
memory_length=[0, 4],
reuse_length=[0, 4],
tie_attention_biases=[True, False],
state=[True, False],
mask=[True, False],
segment=[True, False]))
def test_transformer_xl(
self,
two_stream,
memory_length,
reuse_length,
tie_attention_biases,
state,
mask,
segment):
batch_size, num_heads, head_size, seq_length = 2, 12, 64, 8
hidden_size, num_predictions, inner_size = 24, 8, 12
num_layers = 3
data = create_mock_transformer_xl_data(
include_biases=False,
num_heads=num_heads,
head_size=head_size,
hidden_size=hidden_size,
seq_length=seq_length,
batch_size=batch_size,
memory_length=memory_length,
num_predictions=num_predictions,
two_stream=two_stream,
num_layers=num_layers,
include_state=state,
include_mask=mask,
include_segment=segment)
transformer_xl_layer = transformer_xl.TransformerXL(
vocab_size=32000,
num_layers=num_layers,
head_size=head_size,
hidden_size=hidden_size,
num_attention_heads=num_heads,
inner_size=inner_size,
dropout_rate=0.,
attention_dropout_rate=0.,
initializer=tf.keras.initializers.RandomNormal(stddev=0.1),
two_stream=two_stream,
tie_attention_biases=tie_attention_biases,
memory_length=memory_length,
reuse_length=reuse_length,
inner_activation="relu")
attention_output, cached_memory_states = transformer_xl_layer(**data)
if two_stream:
self.assertEqual(attention_output.shape,
[batch_size, num_predictions, hidden_size])
else:
self.assertEqual(attention_output.shape,
[batch_size, seq_length, hidden_size])
self.assertLen(cached_memory_states, num_layers)
def test_get_config(self):
transformer_xl_layer = transformer_xl.TransformerXL(
vocab_size=32000,
num_layers=12,
hidden_size=36,
head_size=12,
num_attention_heads=12,
inner_size=12,
dropout_rate=0.,
attention_dropout_rate=0.,
initializer=tf.keras.initializers.RandomNormal(stddev=0.1),
two_stream=False,
tie_attention_biases=True,
memory_length=0,
reuse_length=0,
inner_activation="relu")
transformer_xl_config = transformer_xl_layer.get_config()
new_transformer_xl = transformer_xl.TransformerXL.from_config(
transformer_xl_config)
self.assertEqual(transformer_xl_config, new_transformer_xl.get_config())
if __name__ == "__main__":
np.random.seed(0)
tf.random.set_seed(0)
tf.test.main()
| 9,386 | 33.134545 | 80 | py |
models | models-master/official/nlp/modeling/layers/gated_feedforward_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-based gated feedforward layer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import gated_feedforward
class GatedFeedforwardTest(tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(GatedFeedforwardTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy("float32")
@parameterized.parameters(
(True, 1, "after_residual", "float32"),
(True, 1, "after_residual", "mixed_float16"),
(False, 4, "before_residual", "float32"),
(False, 4, "before_residual", "mixed_float16"),
(True, 4, "after_residual", "float32"),
(True, 4, "after_residual", "mixed_float16"),
(False, 1, "before_residual", "float32"),
(False, 1, "before_residual", "mixed_float16"),
)
def test_layer_creation(self, use_gate, num_blocks, dropout_position, dtype):
tf.keras.mixed_precision.set_global_policy(dtype)
kwargs = dict(
inner_dim=128,
inner_activation="relu",
dropout=0.1,
use_gate=use_gate,
num_blocks=num_blocks,
dropout_position=dropout_position,
kernel_initializer="glorot_uniform",
bias_initializer="zeros")
test_layer = gated_feedforward.GatedFeedforward(**kwargs)
sequence_length = 64
width = 128
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
@parameterized.parameters(
(True, 1, "after_residual", "float32"),
(True, 1, "after_residual", "mixed_float16"),
(False, 4, "before_residual", "float32"),
(False, 4, "before_residual", "mixed_float16"),
(True, 4, "after_residual", "float32"),
(True, 4, "after_residual", "mixed_float16"),
(False, 1, "before_residual", "float32"),
(False, 1, "before_residual", "mixed_float16"),
)
def test_layer_invocation(self, use_gate, num_blocks, dropout_position,
dtype):
tf.keras.mixed_precision.set_global_policy(dtype)
kwargs = dict(
inner_dim=16,
inner_activation="relu",
dropout=0.1,
use_gate=use_gate,
num_blocks=num_blocks,
dropout_position=dropout_position,
kernel_initializer="glorot_uniform",
bias_initializer="zeros")
test_layer = gated_feedforward.GatedFeedforward(**kwargs)
sequence_length = 16
width = 32
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# Create a model from the test layer.
model = tf.keras.Model(data_tensor, output_tensor)
# Invoke the model on test data.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
output_data = model.predict(input_data)
self.assertEqual(output_data.shape, (batch_size, sequence_length, width))
def test_serialize_deserialize(self):
kwargs = dict(
inner_dim=16,
inner_activation="relu",
dropout=0.1,
use_gate=False,
num_blocks=4,
dropout_position="after_residual",
kernel_initializer="glorot_uniform",
bias_initializer="zeros")
test_layer = gated_feedforward.GatedFeedforward(**kwargs)
new_layer = gated_feedforward.GatedFeedforward.from_config(
test_layer.get_config())
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(test_layer.get_config(), new_layer.get_config())
if __name__ == "__main__":
tf.test.main()
| 4,484 | 36.066116 | 80 | py |
models | models-master/official/nlp/modeling/layers/routing.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers for Mixture of Experts (MoE) routing.
For MoE routing, we need to separate a set of tokens to sets of tokens.
Later on, different sets of tokens can potentially go to different experts.
"""
import tensorflow as tf
@tf.keras.utils.register_keras_serializable(package="Text")
class TokenImportanceWithMovingAvg(tf.keras.layers.Layer):
"""Routing based on per-token importance value."""
def __init__(self,
vocab_size,
init_importance,
moving_average_beta=0.995,
**kwargs):
self._vocab_size = vocab_size
self._init_importance = init_importance
self._moving_average_beta = moving_average_beta
super().__init__(**kwargs)
def build(self, input_shape):
self._importance_embedding = self.add_weight(
name="importance_embed",
shape=(self._vocab_size),
initializer=tf.keras.initializers.Constant(self._init_importance),
trainable=False)
def get_config(self):
config = {
"vocab_size":
self._vocab_size,
"init_importance":
self._init_importance,
"moving_average_beta":
self._moving_average_beta,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def update_token_importance(self, token_ids, importance):
token_ids = tf.reshape(token_ids, shape=[-1])
importance = tf.reshape(importance, shape=[-1])
beta = self._moving_average_beta
old_importance = tf.gather(self._importance_embedding, token_ids)
self._importance_embedding.assign(tf.tensor_scatter_nd_update(
self._importance_embedding,
tf.expand_dims(token_ids, axis=1),
old_importance * beta + tf.cast(importance * (1.0 - beta),
dtype=tf.float32)))
def call(self, inputs):
return tf.gather(self._importance_embedding, inputs)
@tf.keras.utils.register_keras_serializable(package="Text")
class SelectTopK(tf.keras.layers.Layer):
"""Select top-k + random-k tokens according to importance."""
def __init__(self,
top_k=None,
random_k=None,
**kwargs):
self._top_k = top_k
self._random_k = random_k
super().__init__(**kwargs)
def get_config(self):
config = {
"top_k":
self._top_k,
"random_k":
self._random_k,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
if self._random_k is None:
# Pure top-k, not randomness.
pos = tf.argsort(inputs, direction="DESCENDING")
selected = tf.slice(pos, [0, 0], [-1, self._top_k])
not_selected = tf.slice(pos, [0, self._top_k], [-1, -1])
elif self._top_k is None:
# Pure randomness, no top-k.
pos = tf.argsort(tf.random.uniform(shape=tf.shape(inputs)),
direction="DESCENDING")
selected = tf.slice(pos, [0, 0], [-1, self._random_k])
not_selected = tf.slice(pos, [0, self._random_k], [-1, -1])
else:
# Top-k plus randomness.
pos = tf.argsort(inputs, direction="DESCENDING")
selected_top_k = tf.slice(pos, [0, 0], [-1, self._top_k])
pos_left = tf.slice(pos, [0, self._top_k], [-1, -1])
# Randomly shuffle pos_left
sort_index = tf.argsort(
tf.random.uniform(shape=tf.shape(pos_left)),
direction="DESCENDING")
pos_left = tf.gather(pos_left, sort_index, batch_dims=1, axis=1)
selected_rand = tf.slice(pos_left, [0, 0], [-1, self._random_k])
not_selected = tf.slice(pos_left, [0, self._random_k], [-1, -1])
selected = tf.concat([selected_top_k, selected_rand], axis=1)
# Return the indices of selected and not-selected tokens.
return selected, not_selected
| 4,459 | 34.396825 | 75 | py |
models | models-master/official/nlp/modeling/layers/tn_expand_condense_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ExpandCondense tensor network layer."""
import os
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers.tn_expand_condense import TNExpandCondense
class TNLayerTest(tf.test.TestCase, parameterized.TestCase):
"""Unit tests for ExpandCondense TN layer.
"""
def setUp(self):
super().setUp()
self.labels = np.concatenate((np.ones((50, 1)), np.zeros((50, 1))), axis=0)
def _build_model(self, data, proj_multiple=2):
model = tf.keras.models.Sequential()
model.add(
TNExpandCondense(
proj_multiplier=proj_multiple,
use_bias=True,
activation='relu',
input_shape=(data.shape[-1],)))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
return model
@parameterized.parameters((768, 6), (1024, 2))
def test_train(self, input_dim, proj_multiple):
tf.keras.utils.set_random_seed(0)
data = np.random.randint(10, size=(100, input_dim))
model = self._build_model(data, proj_multiple)
model.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Train the model for 5 epochs
history = model.fit(data, self.labels, epochs=5, batch_size=32)
# Check that loss decreases and accuracy increases
self.assertGreater(history.history['loss'][0], history.history['loss'][-1])
self.assertLess(
history.history['accuracy'][0], history.history['accuracy'][-1])
@parameterized.parameters((768, 6), (1024, 2))
def test_weights_change(self, input_dim, proj_multiple):
tf.keras.utils.set_random_seed(0)
data = np.random.randint(10, size=(100, input_dim))
model = self._build_model(data, proj_multiple)
model.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
before = model.get_weights()
model.fit(data, self.labels, epochs=5, batch_size=32)
after = model.get_weights()
# Make sure every layer's weights changed
for i, _ in enumerate(before):
self.assertTrue((after[i] != before[i]).any())
@parameterized.parameters((768, 6), (1024, 2))
def test_output_shape(self, input_dim, proj_multiple):
data = np.random.randint(10, size=(100, input_dim))
model = self._build_model(data, proj_multiple)
input_shape = data.shape
actual_output_shape = model(data).shape
expected_output_shape = model.compute_output_shape(input_shape)
self.assertEqual(expected_output_shape, actual_output_shape)
@parameterized.parameters((768, 6), (1024, 2))
def test_expandcondense_num_parameters(self, input_dim, proj_multiple):
data = np.random.randint(10, size=(100, input_dim))
proj_size = proj_multiple * data.shape[-1]
model = tf.keras.models.Sequential()
model.add(
TNExpandCondense(
proj_multiplier=proj_multiple,
use_bias=True,
activation='relu',
input_shape=(data.shape[-1],)))
w1_params = data.shape[-1]**2
w2_params = 128 * 128 * (proj_size // data.shape[-1])
w3_params = 128 * 128 * (proj_size // data.shape[-1])
w4_params = (data.shape[-1] // 128) * 128 * data.shape[-1]
bias_params = ((data.shape[-1] // 128) * 128 *
(proj_size // data.shape[-1]))
expected_num_parameters = (w1_params + w2_params + w3_params +
w4_params) + bias_params
self.assertEqual(expected_num_parameters, model.count_params())
@parameterized.parameters((912, 6), (200, 2))
def test_incorrect_sizes(self, input_dim, proj_multiple):
data = np.random.randint(10, size=(100, input_dim))
with self.assertRaises(AssertionError):
model = self._build_model(data, proj_multiple)
model.compile(optimizer='adam', loss='binary_crossentropy')
@parameterized.parameters((768, 6), (1024, 2))
def test_config(self, input_dim, proj_multiple):
data = np.random.randint(10, size=(100, input_dim))
model = self._build_model(data, proj_multiple)
expected_num_parameters = model.layers[0].count_params()
# Serialize model and use config to create new layer
model_config = model.get_config()
layer_config = model_config['layers'][1]['config']
new_model = TNExpandCondense.from_config(layer_config)
# Build the layer so we can count params below
new_model.build(layer_config['batch_input_shape'])
# Check that original layer had same num params as layer built from config
self.assertEqual(expected_num_parameters, new_model.count_params())
@parameterized.parameters((768, 6), (1024, 2))
def test_model_save(self, input_dim, proj_multiple):
data = np.random.randint(10, size=(100, input_dim))
model = self._build_model(data, proj_multiple)
model.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Train the model for 5 epochs
model.fit(data, self.labels, epochs=5, batch_size=32)
save_path = os.path.join(self.get_temp_dir(), 'test_model')
model.save(save_path)
loaded_model = tf.keras.models.load_model(save_path)
# Compare model predictions and loaded_model predictions
self.assertAllEqual(model.predict(data), loaded_model.predict(data))
if __name__ == '__main__':
tf.test.main()
| 5,890 | 35.81875 | 79 | py |
models | models-master/official/nlp/modeling/ops/segment_extractor.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for extracting segments from sentences in documents."""
import tensorflow as tf
# Get a random tensor like `positions` and make some decisions
def _get_random(positions, random_fn):
flat_random = random_fn(
shape=tf.shape(positions.flat_values),
minval=0,
maxval=1,
dtype=tf.float32)
return positions.with_flat_values(flat_random)
# For every position j in a row, sample a position preceeding j or
# a position which is [0, j-1]
def _random_int_up_to(maxval, random_fn):
# Need to cast because the int kernel for uniform doesn't support bcast.
# We add one because maxval is exclusive, and this will get rounded down
# when we cast back to int.
float_maxval = tf.cast(maxval, tf.float32)
return tf.cast(
random_fn(
shape=tf.shape(maxval),
minval=tf.zeros_like(float_maxval),
maxval=float_maxval),
dtype=maxval.dtype)
def _random_int_from_range(minval, maxval, random_fn):
# Need to cast because the int kernel for uniform doesn't support bcast.
# We add one because maxval is exclusive, and this will get rounded down
# when we cast back to int.
float_minval = tf.cast(minval, tf.float32)
float_maxval = tf.cast(maxval, tf.float32)
return tf.cast(
random_fn(tf.shape(maxval), minval=float_minval, maxval=float_maxval),
maxval.dtype)
def _sample_from_other_batch(sentences, random_fn):
"""Samples sentences from other batches."""
# other_batch: <int64>[num_sentences]: The batch to sample from for each
# sentence.
other_batch = random_fn(
shape=[tf.size(sentences)],
minval=0,
maxval=sentences.nrows() - 1,
dtype=tf.int64)
other_batch += tf.cast(other_batch >= sentences.value_rowids(), tf.int64)
# other_sentence: <int64>[num_sentences]: The sentence within each batch
# that we sampled.
other_sentence = _random_int_up_to(
tf.gather(sentences.row_lengths(), other_batch), random_fn)
return sentences.with_values(tf.stack([other_batch, other_sentence], axis=1))
def get_sentence_order_labels(sentences,
random_threshold=0.5,
random_next_threshold=0.5,
random_fn=tf.random.uniform):
"""Extract segments and labels for sentence order prediction (SOP) task.
Extracts the segment and labels for the sentence order prediction task
defined in "ALBERT: A Lite BERT for Self-Supervised Learning of Language
Representations" (https://arxiv.org/pdf/1909.11942.pdf)
Args:
sentences: a `RaggedTensor` of shape [batch, (num_sentences)] with string
dtype.
random_threshold: (optional) A float threshold between 0 and 1, used to
determine whether to extract a random, out-of-batch sentence or a
suceeding sentence. Higher value favors succeeding sentence.
random_next_threshold: (optional) A float threshold between 0 and 1, used to
determine whether to extract either a random, out-of-batch, or succeeding
sentence or a preceeding sentence. Higher value favors preceeding
sentences.
random_fn: (optional) An op used to generate random float values.
Returns:
a tuple of (preceeding_or_random_next, is_suceeding_or_random) where:
preceeding_or_random_next: a `RaggedTensor` of strings with the same shape
as `sentences` and contains either a preceeding, suceeding, or random
out-of-batch sentence respective to its counterpart in `sentences` and
dependent on its label in `is_preceeding_or_random_next`.
is_suceeding_or_random: a `RaggedTensor` of bool values with the
same shape as `sentences` and is True if it's corresponding sentence in
`preceeding_or_random_next` is a random or suceeding sentence, False
otherwise.
"""
# Create a RaggedTensor in the same shape as sentences ([doc, (sentences)])
# whose values are index positions.
positions = tf.ragged.range(sentences.row_lengths())
row_lengths_broadcasted = tf.expand_dims(positions.row_lengths(),
-1) + 0 * positions
row_lengths_broadcasted_flat = row_lengths_broadcasted.flat_values
# Generate indices for all preceeding, succeeding and random.
# For every position j in a row, sample a position preceeding j or
# a position which is [0, j-1]
all_preceding = tf.ragged.map_flat_values(_random_int_up_to, positions,
random_fn)
# For every position j, sample a position following j, or a position
# which is [j, row_max]
all_succeeding = positions.with_flat_values(
tf.ragged.map_flat_values(_random_int_from_range,
positions.flat_values + 1,
row_lengths_broadcasted_flat, random_fn))
# Convert to format that is convenient for `gather_nd`
rows_broadcasted = tf.expand_dims(tf.range(sentences.nrows()),
-1) + 0 * positions
all_preceding_nd = tf.stack([rows_broadcasted, all_preceding], -1)
all_succeeding_nd = tf.stack([rows_broadcasted, all_succeeding], -1)
all_random_nd = _sample_from_other_batch(positions, random_fn)
# There's a few spots where there is no "preceding" or "succeeding" item (e.g.
# first and last sentences in a document). Mark where these are and we will
# patch them up to grab a random sentence from another document later.
all_zeros = tf.zeros_like(positions)
all_ones = tf.ones_like(positions)
valid_preceding_mask = tf.cast(
tf.concat([all_zeros[:, :1], all_ones[:, 1:]], -1), tf.bool)
valid_succeeding_mask = tf.cast(
tf.concat([all_ones[:, :-1], all_zeros[:, -1:]], -1), tf.bool)
# Decide what to use for the segment: (1) random, out-of-batch, (2) preceeding
# item, or (3) succeeding.
# Should get out-of-batch instead of succeeding item
should_get_random = ((_get_random(positions, random_fn) > random_threshold)
| tf.logical_not(valid_succeeding_mask))
random_or_succeeding_nd = tf.compat.v1.where(should_get_random, all_random_nd,
all_succeeding_nd)
# Choose which items should get a random succeeding item. Force positions that
# don't have a valid preceeding items to get a random succeeding item.
should_get_random_or_succeeding = (
(_get_random(positions, random_fn) > random_next_threshold)
| tf.logical_not(valid_preceding_mask))
gather_indices = tf.compat.v1.where(should_get_random_or_succeeding,
random_or_succeeding_nd, all_preceding_nd)
return (tf.gather_nd(sentences,
gather_indices), should_get_random_or_succeeding)
def get_next_sentence_labels(sentences,
random_threshold=0.5,
random_fn=tf.random.uniform):
"""Extracts the next sentence label from sentences.
Args:
sentences: A `RaggedTensor` of strings w/ shape [batch, (num_sentences)].
random_threshold: (optional) A float threshold between 0 and 1, used to
determine whether to extract a random sentence or the immediate next
sentence. Higher value favors next sentence.
random_fn: (optional) An op used to generate random float values.
Returns:
A tuple of (next_sentence_or_random, is_next_sentence) where:
next_sentence_or_random: A `Tensor` with shape [num_sentences] that
contains either the subsequent sentence of `segment_a` or a randomly
injected sentence.
is_next_sentence: A `Tensor` of bool w/ shape [num_sentences]
that contains whether or not `next_sentence_or_random` is truly a
subsequent sentence or not.
"""
# shift everyone to get the next sentence predictions positions
positions = tf.ragged.range(sentences.row_lengths())
# Shift every position down to the right.
next_sentences_pos = (positions + 1) % tf.expand_dims(sentences.row_lengths(),
1)
rows_broadcasted = tf.expand_dims(tf.range(sentences.nrows()),
-1) + 0 * positions
next_sentences_pos_nd = tf.stack([rows_broadcasted, next_sentences_pos], -1)
all_random_nd = _sample_from_other_batch(positions, random_fn)
# Mark the items that don't have a next sentence (e.g. the last
# sentences in the document). We will patch these up and force them to grab a
# random sentence from a random document.
valid_next_sentences = tf.cast(
tf.concat([
tf.ones_like(positions)[:, :-1],
tf.zeros([positions.nrows(), 1], dtype=tf.int64)
], -1), tf.bool)
is_random = ((_get_random(positions, random_fn) > random_threshold)
| tf.logical_not(valid_next_sentences))
gather_indices = tf.compat.v1.where(is_random, all_random_nd,
next_sentences_pos_nd)
return tf.gather_nd(sentences, gather_indices), tf.logical_not(is_random)
| 9,596 | 44.483412 | 80 | py |
models | models-master/official/nlp/modeling/ops/decoding_module.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for Decoding Strategies (beam_search, top_k, top_p and greedy)."""
import abc
from typing import Any, Callable, Dict, Optional, Tuple
import tensorflow as tf
from tensorflow.python.framework import dtypes
from official.modeling import tf_utils
Output = Tuple[tf.Tensor, tf.Tensor, Optional[tf.Tensor]]
InternalState = Tuple[tf.Tensor, tf.Tensor, tf.Tensor, Dict]
InitialState = Tuple[Dict[str, Any], Dict[str, Any]]
class StateKeys:
"""Keys to dictionary storing the state of Decoding loop."""
# Variable storing the loop index.
CUR_INDEX = "CUR_INDEX"
# Top sequences that are alive for each batch item. Alive sequences are ones
# that have not generated an EOS token. Sequences that reach EOS are marked as
# finished and moved to the FINISHED_SEQ tensor.
# Has shape [batch_size, beam_size, CUR_INDEX + 1] for SequenceBeamSearch and
# [batch_size, CUR_INDEX + 1] otherwise.
ALIVE_SEQ = "ALIVE_SEQ"
# Log probabilities of each alive sequence. Shape [batch_size, beam_size]
ALIVE_LOG_PROBS = "ALIVE_LOG_PROBS"
# Dictionary of cached values for each alive sequence. The cache stores
# the encoder output, attention bias, and the decoder attention output from
# the previous iteration.
ALIVE_CACHE = "ALIVE_CACHE"
# The initial model state/cache after model processing the initial token.
# The cache will be filled if extra_cache_output is true.
INITIAL_OUTPUT_CACHE = "INITIAL_OUTPUT_CACHE"
# Top finished sequences for each batch item.
# Has shape [batch_size, beam_size, CUR_INDEX + 1]. Sequences that are
# shorter than CUR_INDEX + 1 are padded with 0s.
FINISHED_SEQ = "FINISHED_SEQ"
# Scores for each finished sequence. Score = log probability / length norm
# Shape [batch_size, beam_size]
FINISHED_SCORES = "FINISHED_SCORES"
# Flags indicating which sequences in the finished sequences are finished.
# At the beginning, all of the sequences in FINISHED_SEQ are filler values.
# True -> finished sequence, False -> filler. Shape [batch_size, beam_size]
FINISHED_FLAGS = "FINISHED_FLAGS"
def log_prob_from_logits(logits):
return logits - tf.reduce_logsumexp(logits, axis=-1, keepdims=True)
def shape_list(tensor):
"""Return a list of the tensor's shape, and ensure no None values in list."""
return tf_utils.get_shape_list(tensor)
def get_shape_keep_last_dim(tensor):
shape_list_obj = shape_list(tensor)
for i in range(len(shape_list_obj) - 1):
shape_list_obj[i] = None
if isinstance(shape_list_obj[-1], tf.Tensor):
shape_list_obj[-1] = None
return tf.TensorShape(shape_list_obj)
def expand_to_same_rank(tensor, target):
"""Expands a given tensor to target's rank to be broadcastable.
Args:
tensor: input tensor to tile. Shape: [b, d1, ..., da]
target: target tensor. Shape: [b, d1, ..., da, ..., dn]
Returns:
Tiled tensor of shape [b, d1, ..., da, 1, ..., 1] with same rank of target
Raises:
ValueError, if the shape rank of rank tensor/target is None.
"""
if tensor.shape.rank is None:
raise ValueError("Expect rank for tensor shape, but got None.")
if target.shape.rank is None:
raise ValueError("Expect rank for target shape, but got None.")
with tf.name_scope("expand_rank"):
diff_rank = target.shape.rank - tensor.shape.rank
for _ in range(diff_rank):
tensor = tf.expand_dims(tensor, -1)
return tensor
class DecodingModule(tf.Module, metaclass=abc.ABCMeta):
"""A base class for the API required for decoding (go/decoding-tf-nlp)."""
def __init__(self,
length_normalization_fn: Callable[[int, tf.DType], float],
dtype: tf.DType = tf.float32,
decoding_name: Optional[str] = None,
extra_cache_output: bool = False):
"""Initialize the Decoding Module.
Args:
length_normalization_fn: Closure for returning length normalization
parameter. Function accepts input as length, dtype and returns float.
dtype: A tensorflow data type used for score computation. The default is
tf.float32.
decoding_name: an optional name for the decoding loop tensors.
extra_cache_output: If true, the first cache will be in the states.
"""
self.length_normalization_fn = length_normalization_fn
self.dtype = tf.as_dtype(dtype)
self.decoding_name = decoding_name
def generate(self,
initial_ids: tf.Tensor,
initial_cache: Dict[str, tf.Tensor],
initial_log_probs: Optional[tf.Tensor] = None) -> Output:
"""Implements the decoding strategy (beam_search or sampling).
Args:
initial_ids: initial ids to pass into the symbols_to_logits_fn. int tensor
with shape [batch_size, 1]
initial_cache: dictionary for caching model outputs from previous step.
initial_log_probs: Optionally initial log probs if there is a prefix
sequence we want to start to decode from.
Returns:
Tuple of tensors representing
finished_sequence: shape [batch, max_seq_length]
finished_scores: [batch]
first_cache: The cache after init token
"""
batch_size = (
initial_ids.shape.as_list()[0]
if self.padded_decode else tf.shape(initial_ids)[0])
state, state_shapes = self._create_initial_state(initial_ids, initial_cache,
batch_size,
initial_log_probs)
def _generate_step(state):
topk_seq, topk_log_probs, topk_ids, new_cache = self._grow_alive_seq(
state, batch_size)
new_finished_flags = self._finished_flags(topk_ids, state)
alive_state = self._get_new_alive_state(topk_seq,
topk_log_probs,
new_finished_flags,
new_cache)
finished_state = self._get_new_finished_state(state,
topk_seq,
topk_log_probs,
new_finished_flags,
batch_size)
new_state = {
StateKeys.CUR_INDEX: state[StateKeys.CUR_INDEX] + 1
}
new_state.update(alive_state)
new_state.update(finished_state)
if self.extra_cache_output:
i = state[StateKeys.CUR_INDEX]
old_cache = state[StateKeys.INITIAL_OUTPUT_CACHE]
def update_with_cache(new_state, cache):
"""Updates new_state with cache."""
new_state.update({StateKeys.INITIAL_OUTPUT_CACHE: cache})
tf.cond(
tf.equal(i, 0), lambda: update_with_cache(new_state, new_cache),
lambda: update_with_cache(new_state, old_cache))
return [new_state]
finished_state = tf.nest.map_structure(
tf.stop_gradient,
tf.while_loop(
self._continue_search,
_generate_step,
loop_vars=[state],
shape_invariants=[state_shapes],
parallel_iterations=1,
name=self.decoding_name))
final_state = self._process_finished_state(finished_state[0])
return final_state
@abc.abstractmethod
def _create_initial_state(
self,
initial_ids: tf.Tensor,
initial_cache: Dict[str, tf.Tensor],
batch_size: int,
initial_log_probs: Optional[tf.Tensor] = None) -> InitialState:
"""Return initial state dictionary and its shape invariants."""
pass
@abc.abstractmethod
def _grow_alive_seq(self,
state: Dict[str, Any],
batch_size: int) -> InternalState:
"""Grow alive sequences by one token.
Args:
state: A dictionary with the current loop state.
batch_size: The given batch size
Returns:
Tuple of
(Top sequences,
Scores of returned sequences,
New ids,
New alive cache)
"""
pass
@abc.abstractmethod
def _get_new_alive_state(
self,
new_seq: tf.Tensor,
new_log_probs: tf.Tensor,
new_finished_flags: tf.Tensor,
new_cache: Dict[str, tf.Tensor]) -> Dict[str, Any]:
"""Gather the sequences that are still alive.
Args:
new_seq: New sequences generated by growing the current alive sequences
int32 tensor with shape
new_log_probs: Log probabilities of new sequences float32 tensor with
shape
new_finished_flags: A boolean Tensor indicates which sequences are live.
new_cache: Dict of cached values for each sequence.
Returns:
Dictionary with alive keys from StateKeys.
"""
pass
@abc.abstractmethod
def _get_new_finished_state(self,
state: Dict[str, Any],
new_seq: tf.Tensor,
new_log_probs: tf.Tensor,
new_finished_flags: tf.Tensor,
batch_size: int) -> Dict[str, tf.Tensor]:
"""Combine new and old finished sequences.
Args:
state: A dictionary with the current loop state.
new_seq: New sequences generated by growing the current alive sequences
int32 tensor.
new_log_probs: Log probabilities of new sequences float32 tensor with
shape.
new_finished_flags: A boolean Tensor indicates which sequences are live.
batch_size: The given batch size.
Returns:
Dictionary with finished keys from StateKeys.
"""
pass
@abc.abstractmethod
def _process_finished_state(self, finished_state: Dict[str, Any]) -> Output:
"""Process the alive/finished state to return final sequences and scores."""
pass
@abc.abstractmethod
def _continue_search(self, state: Dict[str, Any]) -> tf.Tensor:
"""Returns a bool tensor if the decoding loop should continue."""
pass
@abc.abstractmethod
def _finished_flags(self,
topk_ids: tf.Tensor,
state: Dict[str, Any]) -> tf.Tensor:
"""Calculate the finished flags."""
pass
def inf(self):
"""Returns a value close to infinity, but is still finite in `dtype`.
This is useful to get a very large value that is still zero when multiplied
by zero. The floating-point "Inf" value is NaN when multiplied by zero.
Returns:
A very large value.
"""
if self.dtype == dtypes.float32 or self.dtype == dtypes.bfloat16:
return 1e7
elif self.dtype == dtypes.float16:
return dtypes.float16.max
else:
raise AssertionError("Invalid dtype: %s" % self.dtype)
| 11,269 | 35.590909 | 80 | py |
models | models-master/official/nlp/modeling/ops/sampling_module.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sampling module for top_k, top_p and greedy decoding."""
import abc
from typing import Any, Callable, Dict, Optional
import numpy as np
import tensorflow as tf
from official.nlp.modeling.ops import decoding_module
def greedy(log_probs):
"""Returns the top ids and scores based on greedy decoding."""
log_probs, ids = tf.math.top_k(log_probs, k=1)
return log_probs, ids
def sample_logits_with_temperature(logits, temperature):
"""Applies a sampling temperature.
Temperature skews the distribution towards high probability
tokens and lowers the mass in tail distribution.
Args:
logits: Input logits for next token.
temperature: Tensor for specifying the sampling temperature.
Returns:
Logits with applied temperature.
"""
return logits / temperature
def sample_top_k(logits, top_k):
"""Chooses top_k logits and sets the others to negative infinity.
Args:
logits: Input logits for next token.
top_k: Tensor to specify the top_k values.
Returns:
Logits with top_k filtering applied.
"""
top_k = tf.clip_by_value(
top_k, clip_value_min=1, clip_value_max=tf.shape(logits)[-1])
top_k_logits = tf.math.top_k(logits, k=top_k)
indices_to_remove = logits < tf.expand_dims(top_k_logits[0][..., -1], -1)
top_k_logits = set_tensor_by_indices_to_value(logits, indices_to_remove,
np.NINF)
return top_k_logits
def sample_top_p(logits, top_p):
"""Chooses most probable logits with cumulative probabilities upto top_p.
Sets the remaining logits to negative infinity.
Args:
logits: Input logits for next token.
top_p: Float tensor with a value >=0 and < 1.0
Returns:
Logits with top_p filtering applied.
"""
sorted_indices = tf.argsort(logits, direction="DESCENDING")
# Flatten logits as tf.gather on TPU needs axis to be compile time constant.
logits_shape = decoding_module.shape_list(logits)
range_for_gather = tf.expand_dims(tf.range(0, logits_shape[0]), axis=1)
range_for_gather = tf.tile(range_for_gather * logits_shape[1],
[1, logits_shape[1]]) + sorted_indices
flattened_logits = tf.reshape(logits, [-1])
flattened_sorted_indices = tf.reshape(range_for_gather, [-1])
sorted_logits = tf.reshape(
tf.gather(flattened_logits, flattened_sorted_indices),
[logits_shape[0], logits_shape[1]])
cumulative_probs = tf.cumsum(tf.nn.softmax(sorted_logits, axis=-1), axis=-1)
# Remove tokens with cumulative probability above the threshold.
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep the first token above threshold.
sorted_indices_to_remove = tf.roll(sorted_indices_to_remove, 1, axis=-1)
sorted_indices_to_remove = tf.concat([
tf.zeros_like(sorted_indices_to_remove[:, :1]),
sorted_indices_to_remove[:, 1:]
], -1)
# Scatter sorted indices to original indexes.
indices_to_remove = scatter_values_on_batch_indices(sorted_indices_to_remove,
sorted_indices)
top_p_logits = set_tensor_by_indices_to_value(logits, indices_to_remove,
np.NINF)
return top_p_logits
def scatter_values_on_batch_indices(values, batch_indices):
"""Scatter `values` into a tensor using `batch_indices`.
Args:
values: tensor of shape [batch_size, vocab_size] containing the values to
scatter
batch_indices: tensor of shape [batch_size, vocab_size] containing the
indices to insert (should be a permutation in range(0, n))
Returns:
Tensor of shape [batch_size, vocab_size] with values inserted at
batch_indices
"""
tensor_shape = decoding_module.shape_list(batch_indices)
broad_casted_batch_dims = tf.reshape(
tf.broadcast_to(
tf.expand_dims(tf.range(tensor_shape[0]), axis=-1), tensor_shape),
[1, -1])
pair_indices = tf.transpose(
tf.concat([broad_casted_batch_dims,
tf.reshape(batch_indices, [1, -1])], 0))
return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), tensor_shape)
def set_tensor_by_indices_to_value(input_tensor, indices, value):
"""Where indices is True, set the value in input_tensor to value.
Args:
input_tensor: float (batch_size, dim)
indices: bool (batch_size, dim)
value: float scalar
Returns:
output_tensor: same shape as input_tensor.
"""
value_tensor = tf.zeros_like(input_tensor) + value
output_tensor = tf.where(indices, value_tensor, input_tensor)
return output_tensor
class SamplingModule(decoding_module.DecodingModule, metaclass=abc.ABCMeta):
"""Implementation for sampling strategies (go/decoding-tf-nlp)."""
def __init__(self,
symbols_to_logits_fn,
vocab_size: int,
max_decode_length: int,
eos_id: int,
padded_decode: bool,
length_normalization_fn: Optional[Callable[[int, tf.DType],
float]] = None,
top_k=0,
top_p=1.0,
sample_temperature=0.0,
enable_greedy: bool = True,
dtype: tf.DType = tf.float32,
decoding_name: Optional[str] = None,
extra_cache_output: bool = False):
"""Initialize sampling module."""
self.symbols_to_logits_fn = symbols_to_logits_fn
self.length_normalization_fn = length_normalization_fn
self.eos_id = eos_id
self.padded_decode = padded_decode
self.dtype = tf.as_dtype(dtype)
self.vocab_size = tf.convert_to_tensor(vocab_size, dtype=tf.int32)
self.max_decode_length = max_decode_length
self.top_k = tf.convert_to_tensor(top_k, dtype=tf.int32)
self.top_p = tf.convert_to_tensor(top_p, dtype=tf.float32)
self.sample_temperature = tf.convert_to_tensor(
sample_temperature, dtype=tf.float32)
self.enable_greedy = enable_greedy
self.decoding_name = decoding_name
self.extra_cache_output = extra_cache_output
super(SamplingModule, self).__init__(
length_normalization_fn=length_normalization_fn,
dtype=dtype,
decoding_name=decoding_name,
extra_cache_output=extra_cache_output)
def _grow_alive_seq(self,
state: Dict[str, Any],
batch_size: int) -> decoding_module.InternalState:
"""Grow alive sequences by one token.
This function will implement the decoding strategies like top_p, top_k
and greedy for the choosing the next logit.
Args:
state: A dictionary with the current loop state.
batch_size: The given batch size
Returns:
Tuple of
(Top sequences [batch, curr_index + 1] or [batch, max_decode_length + 1],
Scores of returned sequences [batch, 1],
New ids [batch, 1],
New alive cache)
"""
i = state[decoding_module.StateKeys.CUR_INDEX]
alive_seq = state[decoding_module.StateKeys.ALIVE_SEQ]
alive_log_probs = state[decoding_module.StateKeys.ALIVE_LOG_PROBS]
alive_cache = state[decoding_module.StateKeys.ALIVE_CACHE]
if self.padded_decode:
ids = tf.slice(alive_seq, [0, i], [batch_size, 1])
else:
ids = alive_seq
new_logits, new_cache = self.symbols_to_logits_fn(ids, i, alive_cache)
candidate_log_probs = decoding_module.log_prob_from_logits(
new_logits)
original_log_probs = candidate_log_probs + alive_log_probs
topk_log_probs, topk_ids = None, None
if self.enable_greedy:
topk_log_probs, topk_ids = greedy(original_log_probs)
else:
temperature_fn = sample_logits_with_temperature
sampled_logits = tf.cond(
self.sample_temperature > 0.0,
lambda: temperature_fn(new_logits, self.sample_temperature),
lambda: new_logits)
sampled_logits = tf.cond(
self.top_k > 0,
lambda: sample_top_k(sampled_logits, self.top_k),
lambda: sampled_logits)
sampled_logits = tf.cond(
self.top_p < 1,
lambda: sample_top_p(sampled_logits, self.top_p),
lambda: sampled_logits)
topk_ids = tf.random.categorical(
sampled_logits, dtype=tf.int32, num_samples=1)
topk_log_probs = tf.gather(
original_log_probs, topk_ids, axis=1, batch_dims=1)
if self.padded_decode:
topk_seq = tf.transpose(alive_seq, perm=[1, 0])
topk_seq = tf.tensor_scatter_nd_update(
topk_seq, [[i + 1]], tf.expand_dims(tf.squeeze(topk_ids, -1), 0))
topk_seq = tf.transpose(topk_seq, perm=[1, 0])
else:
topk_seq = tf.concat([alive_seq, topk_ids], axis=-1)
return topk_seq, topk_log_probs, topk_ids, new_cache
def _create_initial_state(
self,
initial_ids: tf.Tensor,
initial_cache: Dict[str, tf.Tensor],
batch_size: int,
initial_log_probs: Optional[tf.Tensor] = None
) -> decoding_module.InitialState:
"""Return initial state dictionary and its shape invariants."""
for key, value in initial_cache.items():
for inner_value in tf.nest.flatten(value):
if inner_value.dtype != self.dtype:
raise TypeError(
"initial_cache element for key '%s' has dtype %s that does not "
"match sampling_module's dtype of %s. Value: %s" %
(key, value.dtype.name, self.dtype.name, inner_value))
# Current loop index (starts at 0)
cur_index = tf.constant(0)
# Alive sequence with shape [batch_size, 1]
alive_seq = initial_ids
alive_seq = tf.expand_dims(alive_seq, axis=-1)
if self.padded_decode:
alive_seq = tf.tile(alive_seq, [1, self.max_decode_length + 1])
# Initial log probabilities with shape [batch_size, 1].
if initial_log_probs is None:
initial_log_probs = tf.constant([[0.]], dtype=self.dtype)
alive_log_probs = tf.tile(initial_log_probs, [batch_size, 1])
else:
alive_log_probs = initial_log_probs
alive_cache = initial_cache
# Initialize tensor storing finished sequences [batch_size, 1, 1].
finished_seq = tf.zeros(tf.shape(alive_seq), tf.int32)
# Set scores of the initial finished seqs to negative infinity.
finished_scores = tf.zeros([batch_size, 1], dtype=self.dtype)
# Initialize finished flags with all False values.
finished_flags = tf.zeros([batch_size, 1], tf.bool)
# Create state dictionary and state shapes.
state = {
decoding_module.StateKeys.CUR_INDEX: cur_index,
decoding_module.StateKeys.ALIVE_SEQ: alive_seq,
decoding_module.StateKeys.ALIVE_LOG_PROBS: alive_log_probs,
decoding_module.StateKeys.ALIVE_CACHE: alive_cache,
decoding_module.StateKeys.FINISHED_SEQ: finished_seq,
decoding_module.StateKeys.FINISHED_SCORES: finished_scores,
decoding_module.StateKeys.FINISHED_FLAGS: finished_flags
}
if self.padded_decode:
state_shape_invariants = {
decoding_module.StateKeys.CUR_INDEX:
tf.TensorShape([]),
decoding_module.StateKeys.ALIVE_SEQ:
tf.TensorShape([batch_size, self.max_decode_length + 1]),
decoding_module.StateKeys.ALIVE_LOG_PROBS:
tf.TensorShape([batch_size, 1]),
decoding_module.StateKeys.ALIVE_CACHE:
tf.nest.map_structure(lambda state: state.get_shape(),
alive_cache),
decoding_module.StateKeys.FINISHED_SEQ:
tf.TensorShape([batch_size, self.max_decode_length + 1]),
decoding_module.StateKeys.FINISHED_SCORES:
tf.TensorShape([batch_size, 1]),
decoding_module.StateKeys.FINISHED_FLAGS:
tf.TensorShape([batch_size, 1])
}
else:
state_shape_invariants = {
decoding_module.StateKeys.CUR_INDEX:
tf.TensorShape([]),
decoding_module.StateKeys.ALIVE_SEQ:
tf.TensorShape([None, None]),
decoding_module.StateKeys.ALIVE_LOG_PROBS:
tf.TensorShape([None, 1]),
decoding_module.StateKeys.ALIVE_CACHE:
tf.nest.map_structure(decoding_module.get_shape_keep_last_dim,
alive_cache),
decoding_module.StateKeys.FINISHED_SEQ:
tf.TensorShape([None, None]),
decoding_module.StateKeys.FINISHED_SCORES:
tf.TensorShape([None, 1]),
decoding_module.StateKeys.FINISHED_FLAGS:
tf.TensorShape([None, 1])
}
if self.extra_cache_output:
state.update(
{decoding_module.StateKeys.INITIAL_OUTPUT_CACHE: alive_cache})
if self.padded_decode:
state_shape_invariants.update({
decoding_module.StateKeys.INITIAL_OUTPUT_CACHE:
tf.nest.map_structure(lambda state: state.get_shape(),
alive_cache)
})
else:
state_shape_invariants.update({
decoding_module.StateKeys.INITIAL_OUTPUT_CACHE:
tf.nest.map_structure(decoding_module.get_shape_keep_last_dim,
alive_cache),
})
return state, state_shape_invariants
def _get_new_alive_state(self, new_seq: tf.Tensor, new_log_probs: tf.Tensor,
new_finished_flags: tf.Tensor,
new_cache: Dict[str, tf.Tensor]) -> Dict[str, Any]:
"""Gather the sequences that are still alive.
This function resets the sequences in the alive_state that are finished.
Args:
new_seq: New sequences generated by growing the current alive sequences
int32 tensor with shape [batch_size, cur_index + 1]
new_log_probs: Log probabilities of new sequences float32 tensor with
shape [batch_size, 1]
new_finished_flags: A boolean Tensor indicates which sequences are live
inside the beam.
new_cache: Dict of cached values for each sequence.
Returns:
Dictionary with alive keys.
"""
new_seq = tf.multiply(
new_seq, tf.cast(tf.logical_not(new_finished_flags), new_seq.dtype))
return {
decoding_module.StateKeys.ALIVE_SEQ: new_seq,
decoding_module.StateKeys.ALIVE_LOG_PROBS: new_log_probs,
decoding_module.StateKeys.ALIVE_CACHE: new_cache
}
def _get_new_finished_state(self, state: Dict[str, Any], new_seq: tf.Tensor,
new_log_probs: tf.Tensor,
new_finished_flags: tf.Tensor,
batch_size: int) -> Dict[str, tf.Tensor]:
"""Combine new and old finished sequences.
Args:
state: A dictionary with the current loop state.
new_seq: New sequences generated by growing the current alive sequences
int32 tensor [batch, curr_index + 1] or [batch, max_decode_length + 1].
new_log_probs: Log probabilities of new sequences float32 tensor with
shape [batch, 1].
new_finished_flags: A boolean Tensor indicates which sequences are live.
batch_size: The given batch size.
Returns:
Dictionary with finished keys from StateKeys.
"""
i = state[decoding_module.StateKeys.CUR_INDEX]
finished_seq = state[decoding_module.StateKeys.FINISHED_SEQ]
finished_scores = state[decoding_module.StateKeys.FINISHED_SCORES]
finished_flags = state[decoding_module.StateKeys.FINISHED_FLAGS]
if not self.padded_decode:
finished_seq = tf.concat(
[finished_seq, tf.zeros([batch_size, 1], tf.int32)], axis=-1)
new_scores = new_log_probs
if self.length_normalization_fn is not None:
length_norm = self.length_normalization_fn(i + 1, self.dtype)
new_scores = new_log_probs / length_norm
new_seq = tf.multiply(
new_seq, tf.cast(tf.logical_not(finished_flags), new_seq.dtype))
new_scores = tf.multiply(
new_scores, tf.cast(tf.logical_not(finished_flags), new_scores.dtype))
finished_seq += tf.multiply(new_seq,
tf.cast(new_finished_flags, new_seq.dtype))
finished_scores += tf.multiply(
new_scores, tf.cast(new_finished_flags, new_scores.dtype))
new_finished_flags = tf.logical_or(new_finished_flags, finished_flags)
return {
decoding_module.StateKeys.FINISHED_SEQ: finished_seq,
decoding_module.StateKeys.FINISHED_SCORES: finished_scores,
decoding_module.StateKeys.FINISHED_FLAGS: new_finished_flags
}
def _process_finished_state(
self, finished_state: Dict[str, Any]) -> decoding_module.Output:
"""Process the alive/finished state to return final sequences and scores."""
alive_seq = finished_state[decoding_module.StateKeys.ALIVE_SEQ]
alive_log_probs = finished_state[decoding_module.StateKeys.ALIVE_LOG_PROBS]
finished_seq = finished_state[decoding_module.StateKeys.FINISHED_SEQ]
finished_scores = finished_state[decoding_module.StateKeys.FINISHED_SCORES]
finished_flags = finished_state[decoding_module.StateKeys.FINISHED_FLAGS]
finished_cond = tf.reduce_any(finished_flags, 1, name="finished_cond")
if self.length_normalization_fn is not None:
length_norm = self.length_normalization_fn(self.max_decode_length + 1,
self.dtype)
alive_log_probs = alive_log_probs / length_norm
seq_cond = decoding_module.expand_to_same_rank(finished_cond, finished_seq)
score_cond = decoding_module.expand_to_same_rank(finished_cond,
finished_scores)
finished_seq = tf.where(seq_cond, finished_seq, alive_seq)
finished_scores = tf.where(score_cond, finished_scores, alive_log_probs)
if self.extra_cache_output:
return finished_seq, finished_scores, finished_state[
decoding_module.StateKeys.INITIAL_OUTPUT_CACHE]
return finished_seq, finished_scores
def _continue_search(self, state) -> tf.Tensor:
i = state[decoding_module.StateKeys.CUR_INDEX]
# Have we reached max decoding length?
not_at_end = tf.less(i, self.max_decode_length)
# Have all sampled sequences reached an EOS?
all_has_eos = tf.reduce_all(
state[decoding_module.StateKeys.FINISHED_FLAGS],
axis=None,
name="search_finish_cond")
return tf.logical_and(not_at_end, tf.logical_not(all_has_eos))
def _finished_flags(self, topk_ids, state) -> tf.Tensor:
new_finished_flags = tf.equal(topk_ids, self.eos_id)
new_finished_flags = tf.logical_or(
new_finished_flags, state[decoding_module.StateKeys.FINISHED_FLAGS])
return new_finished_flags
| 19,240 | 39.592827 | 80 | py |
models | models-master/official/nlp/modeling/ops/beam_search.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Beam search to find the translated sequence with the highest probability."""
import numpy as np
import tensorflow as tf
def inf(dtype):
"""Returns a value close to infinity, but is still finite in `dtype`.
This is useful to get a very large value that is still zero when multiplied by
zero. The floating-point "Inf" value is NaN when multiplied by zero.
Args:
dtype: A dtype. The returned value will be finite when casted to this dtype.
Returns:
A very large value.
"""
if dtype == "float32" or dtype == "bfloat16":
return 1e7
elif dtype == "float16":
# Disable no-member lint error, as the linter thinks np.float16 does not
# exist for some reason.
return np.finfo(np.float16).max # pylint: disable=no-member
else:
raise AssertionError("Invalid dtype: %s" % dtype)
class _StateKeys(object):
"""Keys to dictionary storing the state of the beam search loop."""
# Variable storing the loop index.
CUR_INDEX = "CUR_INDEX"
# Top sequences that are alive for each batch item. Alive sequences are ones
# that have not generated an EOS token. Sequences that reach EOS are marked as
# finished and moved to the FINISHED_SEQ tensor.
# Has shape [batch_size, beam_size, CUR_INDEX + 1]
ALIVE_SEQ = "ALIVE_SEQ"
# Log probabilities of each alive sequence. Shape [batch_size, beam_size]
ALIVE_LOG_PROBS = "ALIVE_LOG_PROBS"
# Dictionary of cached values for each alive sequence. The cache stores
# the encoder output, attention bias, and the decoder attention output from
# the previous iteration.
ALIVE_CACHE = "ALIVE_CACHE"
# Top finished sequences for each batch item.
# Has shape [batch_size, beam_size, CUR_INDEX + 1]. Sequences that are
# shorter than CUR_INDEX + 1 are padded with 0s.
FINISHED_SEQ = "FINISHED_SEQ"
# Scores for each finished sequence. Score = log probability / length norm
# Shape [batch_size, beam_size]
FINISHED_SCORES = "FINISHED_SCORES"
# Flags indicating which sequences in the finished sequences are finished.
# At the beginning, all of the sequences in FINISHED_SEQ are filler values.
# True -> finished sequence, False -> filler. Shape [batch_size, beam_size]
FINISHED_FLAGS = "FINISHED_FLAGS"
def _expand_to_same_rank(tensor, target):
"""Expands a given tensor to target's rank to be broadcastable.
Args:
tensor: input tensor to tile. Shape: [b, d1, ..., da]
target: target tensor. Shape: [b, d1, ..., da, ..., dn]
Returns:
Tiled tensor of shape [b, d1, ..., da, 1, ..., 1] with same rank of target.
Raises:
ValueError, if the shape rank of rank tensor/target is None.
"""
if tensor.shape.rank is None:
raise ValueError("Expect rank for tensor shape, but got None.")
if target.shape.rank is None:
raise ValueError("Expect rank for target shape, but got None.")
with tf.name_scope("expand_rank"):
diff_rank = target.shape.rank - tensor.shape.rank
for _ in range(diff_rank):
tensor = tf.expand_dims(tensor, -1)
return tensor
class SequenceBeamSearch(tf.Module):
"""Implementation of beam search loop."""
def __init__(
self,
symbols_to_logits_fn,
vocab_size,
beam_size,
alpha,
max_decode_length,
eos_id,
padded_decode,
dtype=tf.float32,
noise_multiplier: float = 0.0,
decoding_name=None,
):
"""Initialize sequence beam search.
Args:
symbols_to_logits_fn: A function to provide logits, which is the interface
to the Transformer model. The passed in arguments are: ids -> A tensor
with shape [batch_size * beam_size, index]. index -> A scalar. cache ->
A nested dictionary of tensors [batch_size * beam_size, ...]. The
function must return a tuple of logits and the updated cache: logits ->
A tensor with shape [batch * beam_size, vocab_size]. updated cache -> A
nested dictionary with the same structure as the input cache.
vocab_size: An integer, the size of the vocabulary, used for topk
computation.
beam_size: An integer, number of beams for beam search.
alpha: A float, defining the strength of length normalization.
max_decode_length: An integer, the maximum number of steps to decode a
sequence.
eos_id: An integer. ID of end of sentence token.
padded_decode: A bool, indicating if max_sequence_length padding is used
for beam search.
dtype: A tensorflow data type used for score computation. The default is
tf.float32.
noise_multiplier: The amount of noise.
decoding_name: an optional name for the decoding loop tensors.
"""
self.symbols_to_logits_fn = symbols_to_logits_fn
self.vocab_size = vocab_size
self.beam_size = beam_size
self.alpha = alpha
self.max_decode_length = max_decode_length
self.eos_id = eos_id
self.padded_decode = padded_decode
self.dtype = tf.as_dtype(dtype)
self.decoding_name = decoding_name
self.noise_multiplier = noise_multiplier
def search(self, initial_ids, initial_cache):
"""Beam search for sequences with highest scores.
Args:
initial_ids: initial ids to pass into the symbols_to_logits_fn. int tensor
with shape [batch_size, 1]
initial_cache: dictionary storing values to be passed into the
symbols_to_logits_fn.
Returns:
finished_seq and finished_scores.
"""
batch_size = (
initial_ids.shape.as_list()[0]
if self.padded_decode else tf.shape(initial_ids)[0])
state, state_shapes = self._create_initial_state(initial_ids, initial_cache,
batch_size)
def _grow_alive_seq(state):
"""Grow alive sequences by one token, collect top 2*beam_size sequences.
2*beam_size sequences are collected because some sequences may have
reached the EOS token. 2*beam_size ensures that at least beam_size
sequences are still alive.
Args:
state: A dictionary with the current loop state.
Returns:
Tuple of
(Top 2*beam_size sequences [batch_size, 2 * beam_size, cur_index + 1],
Scores of returned sequences [batch_size, 2 * beam_size],
New alive cache, for each of the 2 * beam_size sequences)
"""
i = state[_StateKeys.CUR_INDEX]
alive_seq = state[_StateKeys.ALIVE_SEQ]
alive_log_probs = state[_StateKeys.ALIVE_LOG_PROBS]
alive_cache = state[_StateKeys.ALIVE_CACHE]
beams_to_keep = 2 * self.beam_size
# Get logits for the next candidate IDs for the alive sequences. Get the
# new cache values at the same time.
if self.padded_decode:
flat_ids = tf.reshape(
tf.slice(alive_seq, [0, 0, i], [batch_size, self.beam_size, 1]),
[batch_size * self.beam_size, -1])
else:
flat_ids = flatten_beam_dim(alive_seq) # [batch_size * beam_size]
flat_cache = tf.nest.map_structure(flatten_beam_dim, alive_cache)
flat_logits, flat_cache = self.symbols_to_logits_fn(
flat_ids, i, flat_cache)
if self.noise_multiplier > 0:
noise = tf.random.uniform(flat_logits.shape, dtype=flat_logits.dtype)
# Generates standard Gumbel(0, 1) noise, GSE Tensors
noise = -tf.math.log(-tf.math.log(noise))
# NOMUTANTS -- may not impact final result.
flat_logits = flat_logits + noise * self.noise_multiplier
# Unflatten logits to shape [batch_size, beam_size, vocab_size]
logits = _unflatten_beam_dim(flat_logits, batch_size, self.beam_size)
new_cache = tf.nest.map_structure(
lambda t: _unflatten_beam_dim(t, batch_size, self.beam_size),
flat_cache)
# Convert logits to normalized log probs
candidate_log_probs = _log_prob_from_logits(logits)
# Calculate new log probabilities if each of the alive sequences were
# extended # by the candidate IDs.
# Shape [batch_size, beam_size, vocab_size]
log_probs = candidate_log_probs + tf.expand_dims(alive_log_probs, axis=2)
# Each batch item has beam_size * vocab_size candidate sequences. For each
# batch item, get the k candidates with the highest log probabilities.
flat_log_probs = tf.reshape(log_probs,
[-1, self.beam_size * self.vocab_size])
topk_log_probs, topk_indices = tf.nn.top_k(
flat_log_probs, k=beams_to_keep)
# Extract the alive sequences that generate the highest log probabilities
# after being extended.
topk_beam_indices = topk_indices // self.vocab_size
topk_seq, new_cache = self._gather_beams([alive_seq, new_cache],
topk_beam_indices, batch_size,
beams_to_keep)
# Append the most probable IDs to the topk sequences
topk_ids = topk_indices % self.vocab_size
if self.padded_decode:
topk_seq = tf.transpose(topk_seq, perm=[2, 0, 1])
# TODO(b/145533236, hongkuny): Reverts once TF fix the validation.
topk_seq = tf.tensor_scatter_nd_update(topk_seq, [[i + 1]],
tf.expand_dims(topk_ids, axis=0))
topk_seq = tf.transpose(topk_seq, perm=[1, 2, 0])
else:
topk_seq = tf.concat(
[topk_seq, tf.expand_dims(topk_ids, axis=2)], axis=2)
return topk_seq, topk_log_probs, topk_ids, new_cache
def _get_new_alive_state(new_seq, new_log_probs, new_finished_flags,
new_cache):
"""Gather the top k sequences that are still alive.
Args:
new_seq: New sequences generated by growing the current alive sequences
int32 tensor with shape [batch_size, 2 * beam_size, cur_index + 1]
new_log_probs: Log probabilities of new sequences float32 tensor with
shape [batch_size, beam_size]
new_finished_flags: A boolean Tensor indicates which sequences are live
inside the beam.
new_cache: Dict of cached values for each sequence.
Returns:
Dictionary with alive keys from _StateKeys:
{Top beam_size sequences that are still alive (don't end with eos_id)
Log probabilities of top alive sequences
Dict cache storing decoder states for top alive sequences}
"""
# To prevent finished sequences from being considered, set log probs to
# -inf.
new_log_probs += tf.cast(new_finished_flags,
self.dtype) * -inf(self.dtype)
_, topk_indexes = tf.nn.top_k(new_log_probs, k=self.beam_size)
top_alive_seq, top_alive_log_probs, top_alive_cache = (
self._gather_beams([new_seq, new_log_probs, new_cache],
topk_indexes, batch_size, self.beam_size))
return {
_StateKeys.ALIVE_SEQ: top_alive_seq,
_StateKeys.ALIVE_LOG_PROBS: top_alive_log_probs,
_StateKeys.ALIVE_CACHE: top_alive_cache
}
def _get_new_finished_state(state, new_seq, new_log_probs,
new_finished_flags):
"""Combine new and old finished sequences, and gather the top k sequences.
Args:
state: A dictionary with the current loop state.
new_seq: New sequences generated by growing the current alive sequences
int32 tensor with shape [batch_size, beam_size, i + 1]
new_log_probs: Log probabilities of new sequences float32 tensor with
shape [batch_size, beam_size]
new_finished_flags: A boolean Tensor indicates which sequences are live
inside the beam.
Returns:
Dictionary with finished keys from _StateKeys:
{Top beam_size finished sequences based on score,
Scores of finished sequences,
Finished flags of finished sequences}
"""
i = state[_StateKeys.CUR_INDEX]
finished_seq = state[_StateKeys.FINISHED_SEQ]
finished_scores = state[_StateKeys.FINISHED_SCORES]
finished_flags = state[_StateKeys.FINISHED_FLAGS]
# First append a column of 0-ids to finished_seq to increment the length.
# New shape of finished_seq: [batch_size, beam_size, i + 1]
if not self.padded_decode:
finished_seq = tf.concat(
[finished_seq,
tf.zeros([batch_size, self.beam_size, 1], tf.int32)],
axis=2)
# Calculate new seq scores from log probabilities.
length_norm = _length_normalization(self.alpha, i + 1, dtype=self.dtype)
new_scores = new_log_probs / length_norm
# Set the scores of the still-alive seq in new_seq to large negative
# values.
new_scores += ((1. - tf.cast(new_finished_flags, self.dtype)) *
-inf(self.dtype))
# Combine sequences, scores, and flags.
finished_seq = tf.concat([finished_seq, new_seq], axis=1)
finished_scores = tf.concat([finished_scores, new_scores], axis=1)
finished_flags = tf.concat([finished_flags, new_finished_flags], axis=1)
# Return the finished sequences with the best scores.
_, topk_indexes = tf.nn.top_k(finished_scores, k=self.beam_size)
top_finished_seq, top_finished_scores, top_finished_flags = (
self._gather_beams([finished_seq, finished_scores, finished_flags],
topk_indexes, batch_size, self.beam_size))
return {
_StateKeys.FINISHED_SEQ: top_finished_seq,
_StateKeys.FINISHED_SCORES: top_finished_scores,
_StateKeys.FINISHED_FLAGS: top_finished_flags
}
def _search_step(state):
"""Beam search loop body.
Grow alive sequences by a single ID. Sequences that have reached the EOS
token are marked as finished. The alive and finished sequences with the
highest log probabilities and scores are returned.
A sequence's finished score is calculating by dividing the log probability
by the length normalization factor. Without length normalization, the
search is more likely to return shorter sequences.
Args:
state: A dictionary with the current loop state.
Returns:
new state dictionary.
"""
# Grow alive sequences by one token.
new_seq, new_log_probs, topk_ids, new_cache = _grow_alive_seq(state)
new_finished_flags = tf.equal(topk_ids, self.eos_id)
# Collect top beam_size alive sequences
alive_state = _get_new_alive_state(new_seq, new_log_probs,
new_finished_flags, new_cache)
# Combine newly finished sequences with existing finished sequences, and
# collect the top k scoring sequences.
finished_state = _get_new_finished_state(state, new_seq, new_log_probs,
new_finished_flags)
# Increment loop index and create new state dictionary
new_state = {_StateKeys.CUR_INDEX: state[_StateKeys.CUR_INDEX] + 1}
new_state.update(alive_state)
new_state.update(finished_state)
return [new_state]
finished_state = tf.nest.map_structure(
tf.stop_gradient,
tf.while_loop(
self._continue_search,
_search_step,
loop_vars=[state],
shape_invariants=[state_shapes],
parallel_iterations=1,
name=self.decoding_name))
finished_state = finished_state[0]
return self._process_finished_state(finished_state)
def _process_finished_state(self, finished_state):
alive_seq = finished_state[_StateKeys.ALIVE_SEQ]
alive_log_probs = finished_state[_StateKeys.ALIVE_LOG_PROBS]
finished_seq = finished_state[_StateKeys.FINISHED_SEQ]
finished_scores = finished_state[_StateKeys.FINISHED_SCORES]
finished_flags = finished_state[_StateKeys.FINISHED_FLAGS]
# TF2 changes tf.where behavior. Should make parameters broadcastable.
finished_cond = tf.reduce_any(finished_flags, 1, name="finished_cond")
seq_cond = _expand_to_same_rank(finished_cond, finished_seq)
score_cond = _expand_to_same_rank(finished_cond, finished_scores)
# Account for corner case where there are no finished sequences for a
# particular batch item. In that case, return alive sequences for that batch
# item.
finished_seq = tf.where(seq_cond, finished_seq, alive_seq)
finished_scores = tf.where(score_cond, finished_scores, alive_log_probs)
return finished_seq, finished_scores
def _create_initial_state(self, initial_ids, initial_cache, batch_size):
"""Return initial state dictionary and its shape invariants."""
for key, value in initial_cache.items():
for inner_value in tf.nest.flatten(value):
if inner_value.dtype != self.dtype:
raise TypeError(
"initial_cache element for key '%s' has dtype %s that does not "
"match SequenceBeamSearch's dtype of %s. Value: %s" %
(key, inner_value.dtype.name, self.dtype.name, inner_value))
# Current loop index (starts at 0)
cur_index = tf.constant(0)
# Create alive sequence with shape [batch_size, beam_size, 1]
alive_seq = expand_to_beam_size(initial_ids, self.beam_size)
alive_seq = tf.expand_dims(alive_seq, axis=2)
if self.padded_decode:
alive_seq = tf.tile(alive_seq, [1, 1, self.max_decode_length + 1])
# Create tensor for storing initial log probabilities.
# Assume initial_ids are prob 1.0
initial_log_probs = tf.constant([[0.] + [-float("inf")] *
(self.beam_size - 1)],
dtype=self.dtype)
alive_log_probs = tf.tile(initial_log_probs, [batch_size, 1])
# Expand all values stored in the dictionary to the beam size, so that each
# beam has a separate cache.
alive_cache = tf.nest.map_structure(
lambda t: expand_to_beam_size(t, self.beam_size), initial_cache)
# Initialize tensor storing finished sequences with filler values.
finished_seq = tf.zeros(tf.shape(alive_seq), tf.int32)
# Set scores of the initial finished seqs to negative infinity.
finished_scores = tf.ones([batch_size, self.beam_size],
dtype=self.dtype) * -inf(self.dtype)
# Initialize finished flags with all False values.
finished_flags = tf.zeros([batch_size, self.beam_size], tf.bool)
# Create state dictionary
state = {
_StateKeys.CUR_INDEX: cur_index,
_StateKeys.ALIVE_SEQ: alive_seq,
_StateKeys.ALIVE_LOG_PROBS: alive_log_probs,
_StateKeys.ALIVE_CACHE: alive_cache,
_StateKeys.FINISHED_SEQ: finished_seq,
_StateKeys.FINISHED_SCORES: finished_scores,
_StateKeys.FINISHED_FLAGS: finished_flags
}
# Create state invariants for each value in the state dictionary. Each
# dimension must be a constant or None. A None dimension means either:
# 1) the dimension's value is a tensor that remains the same but may
# depend on the input sequence to the model (e.g. batch size).
# 2) the dimension may have different values on different iterations.
if self.padded_decode:
state_shape_invariants = {
_StateKeys.CUR_INDEX:
tf.TensorShape([]),
_StateKeys.ALIVE_SEQ:
tf.TensorShape(
[batch_size, self.beam_size, self.max_decode_length + 1]),
_StateKeys.ALIVE_LOG_PROBS:
tf.TensorShape([batch_size, self.beam_size]),
_StateKeys.ALIVE_CACHE:
tf.nest.map_structure(lambda state: state.get_shape(),
alive_cache),
_StateKeys.FINISHED_SEQ:
tf.TensorShape(
[batch_size, self.beam_size, self.max_decode_length + 1]),
_StateKeys.FINISHED_SCORES:
tf.TensorShape([batch_size, self.beam_size]),
_StateKeys.FINISHED_FLAGS:
tf.TensorShape([batch_size, self.beam_size])
}
else:
state_shape_invariants = {
_StateKeys.CUR_INDEX:
tf.TensorShape([]),
_StateKeys.ALIVE_SEQ:
tf.TensorShape([None, self.beam_size, None]),
_StateKeys.ALIVE_LOG_PROBS:
tf.TensorShape([None, self.beam_size]),
_StateKeys.ALIVE_CACHE:
tf.nest.map_structure(_get_shape_keep_last_dim, alive_cache),
_StateKeys.FINISHED_SEQ:
tf.TensorShape([None, self.beam_size, None]),
_StateKeys.FINISHED_SCORES:
tf.TensorShape([None, self.beam_size]),
_StateKeys.FINISHED_FLAGS:
tf.TensorShape([None, self.beam_size])
}
return state, state_shape_invariants
def _continue_search(self, state):
"""Return whether to continue the search loop.
The loops should terminate when
1) when decode length has been reached, or
2) when the worst score in the finished sequences is better than the best
score in the alive sequences (i.e. the finished sequences are provably
unchanging)
Args:
state: A dictionary with the current loop state.
Returns:
Bool tensor with value True if loop should continue, False if loop should
terminate.
"""
i = state[_StateKeys.CUR_INDEX]
alive_log_probs = state[_StateKeys.ALIVE_LOG_PROBS]
finished_scores = state[_StateKeys.FINISHED_SCORES]
finished_flags = state[_StateKeys.FINISHED_FLAGS]
not_at_max_decode_length = tf.less(i, self.max_decode_length)
# Calculate largest length penalty (the larger penalty, the better score).
max_length_norm = _length_normalization(
self.alpha, self.max_decode_length, dtype=self.dtype)
# Get the best possible scores from alive sequences.
# This tf.slice/tf.squeeze is equivalent to alive_log_probs[:, 0] which
# emits a tf.strided_slice. tf.slice is easier to reason about as we aren't
# actually taking a non trivial stride.
best_alive_scores = tf.squeeze(tf.slice(alive_log_probs, [0, 0], [-1, 1]),
axis=1) / max_length_norm
# Compute worst score in finished sequences for each batch element
finished_scores *= tf.cast(finished_flags,
self.dtype) # set filler scores to zero
lowest_finished_scores = tf.reduce_min(finished_scores, axis=1)
# If there are no finished sequences in a batch element, then set the lowest
# finished score to -INF for that element.
finished_batches = tf.reduce_any(finished_flags, 1)
lowest_finished_scores += ((1.0 - tf.cast(finished_batches, self.dtype)) *
-inf(self.dtype))
worst_finished_score_better_than_best_alive_score = tf.reduce_all(
tf.greater(lowest_finished_scores, best_alive_scores))
return tf.logical_and(
not_at_max_decode_length,
tf.logical_not(worst_finished_score_better_than_best_alive_score))
@staticmethod
def _gather_beams(nested, beam_indices, batch_size, new_beam_size):
"""Gather beams from nested structure of tensors.
Each tensor in nested represents a batch of beams, where beam refers to a
single search state (beam search involves searching through multiple states
in parallel).
This function is used to gather the top beams, specified by
beam_indices, from the nested tensors.
Args:
nested: Nested structure (tensor, list, tuple or dict) containing tensors
with shape [batch_size, beam_size, ...].
beam_indices: int32 tensor with shape [batch_size, new_beam_size]. Each
value in beam_indices must be between [0, beam_size), and are not
necessarily unique.
batch_size: int size of batch
new_beam_size: int number of beams to be pulled from the nested tensors.
Returns:
Nested structure containing tensors with shape
[batch_size, new_beam_size, ...]
"""
# Computes the i'th coodinate that contains the batch index for gather_nd.
# Batch pos is a tensor like [[0,0,0,0,],[1,1,1,1],..].
batch_pos = tf.range(batch_size * new_beam_size) // new_beam_size
batch_pos = tf.reshape(batch_pos, [batch_size, new_beam_size])
# Create coordinates to be passed to tf.gather_nd. Stacking creates a tensor
# with shape [batch_size, beam_size, 2], where the last dimension contains
# the (i, j) gathering coordinates.
coordinates = tf.stack([batch_pos, beam_indices], axis=2)
return tf.nest.map_structure(lambda state: tf.gather_nd(state, coordinates),
nested)
def sequence_beam_search(
symbols_to_logits_fn,
initial_ids,
initial_cache,
vocab_size,
beam_size,
alpha,
max_decode_length,
eos_id,
padded_decode=False,
dtype="float32",
noise_multiplier: float = 0.0,
decoding_name=None,
):
"""Search for sequence of subtoken ids with the largest probability.
Args:
symbols_to_logits_fn: A function that takes in ids, index, and cache as
arguments. The passed in arguments will have shape: ids -> A tensor with
shape [batch_size * beam_size, index]. index -> A scalar. cache -> A
nested dictionary of tensors [batch_size * beam_size, ...]. The function
must return a tuple of logits and new cache: logits -> A tensor with shape
[batch * beam_size, vocab_size]. new cache -> A nested dictionary with the
same shape/structure as the inputted cache.
initial_ids: An int32 tensor with shape [batch_size]. Starting ids for each
batch item.
initial_cache: A dictionary, containing starting decoder variables
information.
vocab_size: An integer, the size of tokens.
beam_size: An integer, the number of beams.
alpha: A float, defining the strength of length normalization.
max_decode_length: An integer, the maximum length to decoded a sequence.
eos_id: An integer, ID of eos token, used to determine when a sequence has
finished.
padded_decode: A bool, indicating if max_sequence_length padding is used for
beam search.
dtype: A tensorflow data type used for score computation. The default is
tf.float32.
noise_multiplier: The amount of noise.
decoding_name: an optional name for the decoding loop tensors.
Returns:
Top decoded sequences [batch_size, beam_size, max_decode_length]
sequence scores [batch_size, beam_size]
"""
sbs = SequenceBeamSearch(
symbols_to_logits_fn,
vocab_size,
beam_size,
alpha,
max_decode_length,
eos_id,
padded_decode,
dtype,
noise_multiplier,
decoding_name,
)
return sbs.search(initial_ids, initial_cache)
def _log_prob_from_logits(logits):
return logits - tf.reduce_logsumexp(logits, axis=2, keepdims=True)
def _length_normalization(alpha, length, dtype=tf.float32):
"""Return length normalization factor."""
return tf.pow(((5. + tf.cast(length, dtype)) / 6.), alpha)
def expand_to_beam_size(tensor, beam_size):
"""Tiles a given tensor by beam_size.
Args:
tensor: tensor to tile [batch_size, ...]
beam_size: How much to tile the tensor by.
Returns:
Tiled tensor [batch_size, beam_size, ...]
"""
tensor = tf.expand_dims(tensor, axis=1)
tile_dims = [1] * tensor.shape.ndims
tile_dims[1] = beam_size
return tf.tile(tensor, tile_dims)
def flatten_beam_dim(tensor):
"""Reshapes first two dimensions into a single dimension.
Args:
tensor: Tensor to reshape of shape [A, B, ...]
Returns:
Reshaped tensor of shape [A*B, ...]
"""
shape = _shape_list(tensor)
shape[0] *= shape[1]
shape.pop(1) # Remove beam dim
return tf.reshape(tensor, shape)
def _shape_list(tensor):
"""Return a list of the tensor's shape, and ensure no None values in list."""
# Get statically known shape (may contain None's for unknown dimensions)
shape = tensor.get_shape().as_list()
# Ensure that the shape values are not None
dynamic_shape = tf.shape(tensor)
for i in range(len(shape)): # pylint: disable=consider-using-enumerate
if shape[i] is None:
shape[i] = dynamic_shape[i]
return shape
def _get_shape_keep_last_dim(tensor):
shape_list = _shape_list(tensor)
# Only the last
for i in range(len(shape_list) - 1):
shape_list[i] = None
if isinstance(shape_list[-1], tf.Tensor):
shape_list[-1] = None
return tf.TensorShape(shape_list)
def _unflatten_beam_dim(tensor, batch_size, beam_size):
"""Reshapes first dimension back to [batch_size, beam_size].
Args:
tensor: Tensor to reshape of shape [batch_size*beam_size, ...]
batch_size: Tensor, original batch size.
beam_size: int, original beam size.
Returns:
Reshaped tensor of shape [batch_size, beam_size, ...]
"""
shape = _shape_list(tensor)
new_shape = [batch_size, beam_size] + shape[1:]
return tf.reshape(tensor, new_shape)
| 29,534 | 39.129076 | 80 | py |
models | models-master/official/nlp/modeling/ops/decoding_module_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test decoding utility methods."""
import abc
import tensorflow as tf
from official.nlp.modeling.ops import decoding_module
def length_normalization(length, dtype):
"""Return length normalization factor."""
return tf.pow(((5. + tf.cast(length, dtype)) / 6.), 0.0)
class TestSubclass(decoding_module.DecodingModule, metaclass=abc.ABCMeta):
def __init__(self,
length_normalization_fn=length_normalization,
extra_cache_output=True,
dtype=tf.float32):
super(TestSubclass, self).__init__(
length_normalization_fn=length_normalization, dtype=dtype)
def _create_initial_state(self, initial_ids, initial_cache, batch_size):
pass
def _grow_alive_seq(self, state, batch_size):
pass
def _process_finished_state(self, finished_state):
pass
def _get_new_finished_state(self, state, new_seq, new_log_probs,
new_finished_flags, batch_size):
pass
def _finished_flags(self, topk_ids, state):
pass
def _continue_search(self, state):
pass
def _get_new_alive_state(self, new_seq, new_log_probs, new_finished_flags,
new_cache):
pass
class DecodingModuleTest(tf.test.TestCase):
def test_get_shape_keep_last_dim(self):
y = tf.constant(4.0)
x = tf.ones([7, tf.cast(tf.sqrt(y), tf.int32), 2, 5])
shape = decoding_module.get_shape_keep_last_dim(x)
self.assertAllEqual([None, None, None, 5], shape.as_list())
def test_shape_list(self):
x = tf.ones([7, 1])
shape = decoding_module.shape_list(x)
self.assertAllEqual([7, 1], shape)
def test_inf(self):
d = TestSubclass()
inf_value = d.inf()
self.assertAllEqual(inf_value, tf.constant(10000000., tf.float32))
def test_length_normalization(self):
d = TestSubclass()
normalized_length = d.length_normalization_fn(32, tf.float32)
self.assertAllEqual(normalized_length, tf.constant(1.0, tf.float32))
if __name__ == '__main__':
tf.test.main()
| 2,613 | 29.395349 | 76 | py |
models | models-master/official/nlp/modeling/ops/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ops package definition."""
from official.nlp.modeling.ops.beam_search import sequence_beam_search
from official.nlp.modeling.ops.beam_search import SequenceBeamSearch
from official.nlp.modeling.ops.sampling_module import SamplingModule
from official.nlp.modeling.ops.segment_extractor import get_next_sentence_labels
from official.nlp.modeling.ops.segment_extractor import get_sentence_order_labels
| 1,011 | 47.190476 | 81 | py |
models | models-master/official/nlp/modeling/ops/beam_search_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test beam search helper methods."""
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.modeling.ops import beam_search
class BeamSearchTests(tf.test.TestCase, parameterized.TestCase):
def test_expand_to_beam_size(self):
x = tf.ones([7, 4, 2, 5])
x = beam_search.expand_to_beam_size(x, 3)
shape = tf.shape(x)
self.assertAllEqual([7, 3, 4, 2, 5], shape)
def test_get_shape_keep_last_dim(self):
y = tf.constant(4.0)
x = tf.ones([7, tf.cast(tf.sqrt(y), tf.int32), 2, 5])
shape = beam_search._get_shape_keep_last_dim(x)
self.assertAllEqual([None, None, None, 5], shape.as_list())
def test_flatten_beam_dim(self):
x = tf.ones([7, 4, 2, 5])
x = beam_search.flatten_beam_dim(x)
self.assertAllEqual([28, 2, 5], tf.shape(x))
def test_unflatten_beam_dim(self):
x = tf.ones([28, 2, 5])
x = beam_search._unflatten_beam_dim(x, 7, 4)
self.assertAllEqual([7, 4, 2, 5], tf.shape(x))
def test_gather_beams(self):
x = tf.reshape(tf.range(24), [2, 3, 4])
# x looks like: [[[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
#
# [[12 13 14 15]
# [16 17 18 19]
# [20 21 22 23]]]
y = beam_search.SequenceBeamSearch._gather_beams(x, [[1, 2], [0, 2]], 2, 2)
self.assertAllEqual(
[[[4, 5, 6, 7], [8, 9, 10, 11]], [[12, 13, 14, 15], [20, 21, 22, 23]]],
y)
@parameterized.named_parameters([
('padded_decode_true_with_name', True, 0.0, 'decoding'),
('padded_decode_false_with_name', False, 0.0, 'decoding'),
('padded_decode_true_without_name', True, 0.0, None),
('padded_decode_false_without_name', False, 0.0, None),
('padded_decode_false_with_noise', False, 0.5, 'decoding'),
])
def test_sequence_beam_search(self, padded_decode, noise_multiplier, name):
# batch_size*beam_size, max_decode_length, vocab_size
probabilities = tf.constant([[[0.2, 0.7, 0.1], [0.5, 0.3, 0.2],
[0.1, 0.8, 0.1]],
[[0.1, 0.8, 0.1], [0.3, 0.4, 0.3],
[0.2, 0.1, 0.7]]])
# batch_size, max_decode_length, num_heads, embed_size per head
x = tf.zeros([1, 3, 2, 32], dtype=tf.float32)
cache = {'layer_%d' % layer: {'k': x, 'v': x} for layer in range(2)}
def _get_test_symbols_to_logits_fn():
"""Test function that returns logits for next token."""
def symbols_to_logits_fn(_, i, cache):
logits = tf.cast(probabilities[:, i, :], tf.float32)
return logits, cache
return symbols_to_logits_fn
predictions, _ = beam_search.sequence_beam_search(
symbols_to_logits_fn=_get_test_symbols_to_logits_fn(),
initial_ids=tf.zeros([1], dtype=tf.int32),
initial_cache=cache,
vocab_size=3,
beam_size=2,
alpha=0.6,
max_decode_length=3,
eos_id=9,
padded_decode=padded_decode,
dtype=tf.float32,
noise_multiplier=noise_multiplier,
decoding_name=name,
)
if noise_multiplier > 0:
self.assertAllEqual([[[0, 1, 0, 1], [0, 0, 2, 2]]], predictions)
else:
self.assertAllEqual([[[0, 1, 0, 1], [0, 1, 1, 2]]], predictions)
if __name__ == '__main__':
tf.test.main()
| 3,960 | 35.33945 | 79 | py |
models | models-master/official/nlp/modeling/ops/segment_extractor_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# encoding=utf-8
"""Tests for sentence prediction labels."""
import functools
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.modeling.ops import segment_extractor
class NextSentencePredictionTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters([
dict(
test_description="all random",
sentences=[[b"Hello there.", b"La la la.", b"Such is life."],
[b"Who let the dogs out?", b"Who?."]],
expected_segment=[[
b"Who let the dogs out?", b"Who?.", b"Who let the dogs out?"
], [b"Hello there.", b"Hello there."]],
expected_labels=[
[False, False, False],
[False, False],
],
random_threshold=0.0,
),
dict(
test_description="all next",
sentences=[[b"Hello there.", b"La la la.", b"Such is life."],
[b"Who let the dogs out?", b"Who?."]],
expected_segment=[
[b"La la la.", b"Such is life.", b"Who let the dogs out?"],
[b"Who?.", b"Hello there."],
],
expected_labels=[
[True, True, False],
[True, False],
],
random_threshold=1.0,
),
])
def testNextSentencePrediction(self,
sentences,
expected_segment,
expected_labels,
random_threshold=0.5,
test_description=""):
sentences = tf.ragged.constant(sentences)
# Set seed and rig the shuffle function to a deterministic reverse function
# instead. This is so that we have consistent and deterministic results.
extracted_segment, actual_labels = (
segment_extractor.get_next_sentence_labels(
sentences,
random_threshold,
random_fn=functools.partial(
tf.random.stateless_uniform, seed=(2, 3))))
self.assertAllEqual(expected_segment, extracted_segment)
self.assertAllEqual(expected_labels, actual_labels)
class SentenceOrderLabelsTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters([
dict(
test_description="all random",
sentences=[[b"Hello there.", b"La la la.", b"Such is life."],
[b"Who let the dogs out?", b"Who?."]],
expected_segment=[[
b"Who let the dogs out?", b"Who?.", b"Who let the dogs out?"
], [b"Hello there.", b"Hello there."]],
expected_labels=[[True, True, True], [True, True]],
random_threshold=0.0,
random_next_threshold=0.0,
),
dict(
test_description="all next",
sentences=[[b"Hello there.", b"La la la.", b"Such is life."],
[b"Who let the dogs out?", b"Who?."]],
expected_segment=[[
b"La la la.", b"Such is life.", b"Who let the dogs out?"
], [b"Who?.", b"Hello there."]],
expected_labels=[[True, True, True], [True, True]],
random_threshold=1.0,
random_next_threshold=0.0,
),
dict(
test_description="all preceeding",
sentences=[[b"Hello there.", b"La la la.", b"Such is life."],
[b"Who let the dogs out?", b"Who?."]],
expected_segment=[
[b"La la la.", b"Hello there.", b"Hello there."],
[b"Who?.", b"Who let the dogs out?"],
],
expected_labels=[
[True, False, False],
[True, False],
],
random_threshold=1.0,
random_next_threshold=1.0,
),
])
def testSentenceOrderPrediction(self,
sentences,
expected_segment,
expected_labels,
random_threshold=0.5,
random_next_threshold=0.5,
test_description=""):
sentences = tf.ragged.constant(sentences)
# Set seed and rig the shuffle function to a deterministic reverse function
# instead. This is so that we have consistent and deterministic results.
extracted_segment, actual_labels = (
segment_extractor.get_sentence_order_labels(
sentences,
random_threshold=random_threshold,
random_next_threshold=random_next_threshold,
random_fn=functools.partial(
tf.random.stateless_uniform, seed=(2, 3))))
self.assertAllEqual(expected_segment, extracted_segment)
self.assertAllEqual(expected_labels, actual_labels)
if __name__ == "__main__":
tf.test.main()
| 5,398 | 37.841727 | 79 | py |
models | models-master/official/nlp/modeling/losses/weighted_sparse_categorical_crossentropy_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for masked LM loss."""
import numpy as np
import tensorflow as tf
from official.nlp.modeling import layers
from official.nlp.modeling import networks
from official.nlp.modeling.losses import weighted_sparse_categorical_crossentropy
class ClassificationLossTest(tf.test.TestCase):
def create_lm_model(self,
vocab_size,
sequence_length,
hidden_size,
num_predictions,
output="predictions"):
# First, create a transformer stack that we can use to get the LM's
# vocabulary weight.
xformer_stack = networks.BertEncoder(
vocab_size=vocab_size,
num_layers=1,
sequence_length=sequence_length,
hidden_size=hidden_size,
num_attention_heads=4,
)
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
_ = xformer_stack([word_ids, mask, type_ids])
# Create a maskedLM from the transformer stack.
test_layer = layers.MaskedLM(
embedding_table=xformer_stack.get_embedding_table(), output=output)
# Create a model from the masked LM layer.
lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size))
masked_lm_positions = tf.keras.Input(
shape=(num_predictions,), dtype=tf.int32)
output = test_layer(lm_input_tensor, masked_positions=masked_lm_positions)
return tf.keras.Model([lm_input_tensor, masked_lm_positions], output)
def test_loss_3d_input(self):
"""Test overall loss with a 3-dimensional input, from a masked LM."""
vocab_size = 100
sequence_length = 32
hidden_size = 64
num_predictions = 21
model = self.create_lm_model(
vocab_size=vocab_size,
sequence_length=sequence_length,
hidden_size=hidden_size,
num_predictions=num_predictions)
# Get the output of the masked LM.
batch_size = 3
lm_input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, hidden_size))
masked_position_data = np.random.randint(
2, size=(batch_size, num_predictions))
output_data = model.predict([lm_input_data, masked_position_data])
# Calculate loss.
labels = np.random.randint(vocab_size, size=(batch_size, num_predictions))
weights = np.random.randint(2, size=(batch_size, num_predictions))
per_example_loss_data = weighted_sparse_categorical_crossentropy.loss(
predictions=output_data, labels=labels, weights=weights)
# Total loss data should have one value, and that value shouldn't be zero
# in this case (as we're using random data).
expected_shape = [] # Scalar
self.assertEqual(expected_shape, per_example_loss_data.shape.as_list())
self.assertNotAllClose(
tf.zeros_like(per_example_loss_data), per_example_loss_data)
def test_loss_weights_3d_input(self):
"""Test masked loss with a 3-dimensional input, from a masked LM."""
vocab_size = 100
sequence_length = 32
hidden_size = 64
num_predictions = 21
model = self.create_lm_model(
vocab_size=vocab_size,
sequence_length=sequence_length,
hidden_size=hidden_size,
num_predictions=num_predictions)
# Get the output of the masked LM.
batch_size = 3
lm_input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, hidden_size))
masked_position_data = np.random.randint(
2, size=(batch_size, num_predictions))
output_data = model.predict([lm_input_data, masked_position_data])
# Calculate a fully masked weight tensor. This should give a loss of zero.
labels = np.random.randint(vocab_size, size=(batch_size, num_predictions))
null_weights = np.zeros((batch_size, num_predictions))
weighted_loss_data = weighted_sparse_categorical_crossentropy.loss(
predictions=output_data, labels=labels, weights=null_weights)
# Because the tensor is fully masked, the loss should be 0.
self.assertAllClose(0, weighted_loss_data)
def test_mismatched_predictions_and_labels_ranks_squeezes(self):
"""Test that the loss asserts when rank(predictions)-1 != rank(labels)."""
batch_size = 3
output_data = np.random.random_sample((batch_size, 10))
labels = np.random.randint(10, size=(batch_size, 1))
# All that this test tests is that the squeeze is successful.
_ = weighted_sparse_categorical_crossentropy.loss(
predictions=output_data, labels=labels)
def test_mismatched_weights_and_labels_ranks_fail(self):
"""Test that the loss asserts when rank(predictions) != rank(labels)."""
batch_size = 3
output_data = np.random.random_sample((batch_size, 10, 15))
labels = np.random.randint(10, size=(batch_size, 10))
weights = np.random.randint(2, size=(batch_size))
with self.assertRaisesRegex(RuntimeError, ".*of the same rank.*"):
_ = weighted_sparse_categorical_crossentropy.loss(
predictions=output_data, labels=labels, weights=weights)
def test_tf_tensor_inputs(self):
"""Test that tf.Tensors can be used as inputs to the loss function."""
batch_size = 3
output_data = tf.convert_to_tensor(
np.random.random_sample((batch_size, 10, 15)))
labels = tf.convert_to_tensor(np.random.randint(10, size=(batch_size, 10)))
weights = tf.convert_to_tensor(np.random.randint(2, size=(batch_size, 10)))
# We're not trying to validate numerical correctness, just ensure that
# we can in fact pass tensors to these functions without causing runtime
# errors from the shape checking code.
_ = weighted_sparse_categorical_crossentropy.loss(
predictions=output_data, labels=labels, weights=weights)
def test_legacy_lm_loss_compatibility(self):
"""Test to validate computational correctness during refactors."""
# This is the empirical output of a masked LM with the following parameters:
# batch_size = 3
# vocab_size = 5
# sequence_length = 4
# num_predictions = 2
output_data = np.array(
[[[-2.5286622, -1.0963473, -1.4925185, -2.4451098, -1.2923571],
[-2.7117882, -1.1205841, -4.02187, -0.9966936, -1.5119683]],
[[-2.5379114, -0.82479054, -2.287932, -1.3747153, -2.053741],
[-2.5379114, -0.82479054, -2.287932, -1.3747153, -2.053741]],
[[-2.7760355, -1.8219438, -3.0924666, -1.0779881, -0.9407509],
[-2.7760355, -1.8219438, -3.0924666, -1.0779881, -0.9407509]]])
labels = np.array([[4, 0], [2, 2], [2, 1]])
# Validate that overall loss calculations are the same.
weights = np.array([[1, 0], [0, 0], [0, 0]])
loss_data = weighted_sparse_categorical_crossentropy.loss(
predictions=output_data,
labels=labels,
weights=weights,
from_logits=True)
expected_loss_data = 1.2923441
self.assertAllClose(expected_loss_data, loss_data, rtol=1e-3)
def test_legacy_classification_loss_compatibility(self):
"""Test to validate computational correctness during refactors."""
# This is the empirical output of a classifier with the following params:
# batch_size = 2
# num_classes = 3
output_data = np.array([[-1.6094601e-03, -1.0966038e+01, -6.4434357e+00],
[-1.6975292e-03, -6.4009643e+00, -1.0226612e+01]])
labels = np.array([2, 1])
# Validate that overall loss calculations are the same.
weights = None
loss_data = weighted_sparse_categorical_crossentropy.loss(
predictions=output_data,
labels=labels,
weights=weights,
from_logits=True)
expected_loss_data = 6.4222
self.assertAllClose(expected_loss_data, loss_data, rtol=1e-3)
if __name__ == "__main__":
tf.test.main()
| 8,464 | 40.699507 | 81 | py |
models | models-master/official/nlp/modeling/losses/weighted_sparse_categorical_crossentropy.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Weighted sparse categorical cross-entropy losses."""
import tensorflow as tf
def _adjust_labels(labels, predictions):
"""Adjust the 'labels' tensor by squeezing it if needed."""
labels = tf.cast(labels, tf.int32)
if len(predictions.shape) == len(labels.shape):
labels = tf.squeeze(labels, [-1])
return labels, predictions
def _validate_rank(labels, predictions, weights):
if weights is not None and len(weights.shape) != len(labels.shape):
raise RuntimeError(
("Weight and label tensors were not of the same rank. weights.shape "
"was %s, and labels.shape was %s.") %
(predictions.shape, labels.shape))
if (len(predictions.shape) - 1) != len(labels.shape):
raise RuntimeError(
("Weighted sparse categorical crossentropy expects `labels` to have a "
"rank of one less than `predictions`. labels.shape was %s, and "
"predictions.shape was %s.") % (labels.shape, predictions.shape))
def loss(labels, predictions, weights=None, from_logits=False):
"""Calculate a per-batch sparse categorical crossentropy loss.
This loss function assumes that the predictions are post-softmax.
Args:
labels: The labels to evaluate against. Should be a set of integer indices
ranging from 0 to (vocab_size-1).
predictions: The network predictions. Should have softmax already applied.
weights: An optional weight array of the same shape as the 'labels' array.
If None, all examples will be used.
from_logits: Whether the input predictions are logits.
Returns:
A loss scalar.
Raises:
RuntimeError if the passed tensors do not have the same rank.
"""
# When using these functions with the Keras core API, we will need to squeeze
# the labels tensor - Keras adds a spurious inner dimension.
labels, predictions = _adjust_labels(labels, predictions)
_validate_rank(labels, predictions, weights)
example_losses = tf.keras.losses.sparse_categorical_crossentropy(
labels, predictions, from_logits=from_logits)
if weights is None:
return tf.reduce_mean(example_losses)
weights = tf.cast(weights, predictions.dtype)
return tf.math.divide_no_nan(
tf.reduce_sum(example_losses * weights), tf.reduce_sum(weights))
| 2,859 | 38.722222 | 79 | py |
models | models-master/official/nlp/modeling/losses/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses contains common loss computation used in NLP (subject to change)."""
from official.nlp.modeling.losses.weighted_sparse_categorical_crossentropy import loss as weighted_sparse_categorical_crossentropy_loss
| 824 | 47.529412 | 135 | py |
models | models-master/official/nlp/tasks/electra_task_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.tasks.electra_task."""
import tensorflow as tf
from official.nlp.configs import bert
from official.nlp.configs import electra
from official.nlp.configs import encoders
from official.nlp.data import pretrain_dataloader
from official.nlp.tasks import electra_task
class ElectraPretrainTaskTest(tf.test.TestCase):
def test_task(self):
config = electra_task.ElectraPretrainConfig(
model=electra.ElectraPretrainerConfig(
generator_encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522,
num_layers=1)),
discriminator_encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522,
num_layers=1)),
num_masked_tokens=20,
sequence_length=128,
cls_heads=[
bert.ClsHeadConfig(
inner_dim=10, num_classes=2, name="next_sentence")
]),
train_data=pretrain_dataloader.BertPretrainDataConfig(
input_path="dummy",
max_predictions_per_seq=20,
seq_length=128,
global_batch_size=1))
task = electra_task.ElectraPretrainTask(config)
model = task.build_model()
metrics = task.build_metrics()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
task.validation_step(next(iterator), model, metrics=metrics)
if __name__ == "__main__":
tf.test.main()
| 2,282 | 36.42623 | 74 | py |
models | models-master/official/nlp/tasks/masked_lm.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Masked language task."""
import dataclasses
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling import tf_utils
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import data_loader_factory
from official.nlp.modeling import layers
from official.nlp.modeling import models
@dataclasses.dataclass
class MaskedLMConfig(cfg.TaskConfig):
"""The model config."""
model: bert.PretrainerConfig = dataclasses.field(
default_factory=lambda: bert.PretrainerConfig( # pylint: disable=g-long-lambda
cls_heads=[
bert.ClsHeadConfig(
inner_dim=768,
num_classes=2,
dropout_rate=0.1,
name='next_sentence',
)
]
)
)
# TODO(b/154564893): Mathematically, scale_loss should be True.
# However, it works better with scale_loss being False.
scale_loss: bool = False
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(
default_factory=cfg.DataConfig
)
@task_factory.register_task_cls(MaskedLMConfig)
class MaskedLMTask(base_task.Task):
"""Task object for Mask language modeling."""
def _build_encoder(self, encoder_cfg):
return encoders.build_encoder(encoder_cfg)
def build_model(self, params=None):
config = params or self.task_config.model
encoder_cfg = config.encoder
encoder_network = self._build_encoder(encoder_cfg)
cls_heads = [
layers.ClassificationHead(**cfg.as_dict()) for cfg in config.cls_heads
] if config.cls_heads else []
return models.BertPretrainerV2(
mlm_activation=tf_utils.get_activation(config.mlm_activation),
mlm_initializer=tf.keras.initializers.TruncatedNormal(
stddev=config.mlm_initializer_range),
encoder_network=encoder_network,
classification_heads=cls_heads)
def build_losses(self,
labels,
model_outputs,
metrics,
aux_losses=None) -> tf.Tensor:
with tf.name_scope('MaskedLMTask/losses'):
metrics = dict([(metric.name, metric) for metric in metrics])
lm_prediction_losses = tf.keras.losses.sparse_categorical_crossentropy(
labels['masked_lm_ids'],
tf.cast(model_outputs['mlm_logits'], tf.float32),
from_logits=True)
lm_label_weights = labels['masked_lm_weights']
lm_numerator_loss = tf.reduce_sum(lm_prediction_losses *
lm_label_weights)
lm_denominator_loss = tf.reduce_sum(lm_label_weights)
mlm_loss = tf.math.divide_no_nan(lm_numerator_loss, lm_denominator_loss)
metrics['lm_example_loss'].update_state(mlm_loss)
if 'next_sentence_labels' in labels:
sentence_labels = labels['next_sentence_labels']
sentence_outputs = tf.cast(
model_outputs['next_sentence'], dtype=tf.float32)
sentence_loss = tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(
sentence_labels, sentence_outputs, from_logits=True))
metrics['next_sentence_loss'].update_state(sentence_loss)
total_loss = mlm_loss + sentence_loss
else:
total_loss = mlm_loss
if aux_losses:
total_loss += tf.add_n(aux_losses)
return total_loss
def build_inputs(self, params, input_context=None):
"""Returns tf.data.Dataset for pretraining."""
if params.input_path == 'dummy':
def dummy_data(_):
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
dummy_lm = tf.zeros((1, params.max_predictions_per_seq), dtype=tf.int32)
return dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids,
masked_lm_positions=dummy_lm,
masked_lm_ids=dummy_lm,
masked_lm_weights=tf.cast(dummy_lm, dtype=tf.float32),
next_sentence_labels=tf.zeros((1, 1), dtype=tf.int32))
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
return data_loader_factory.get_data_loader(params).load(input_context)
def build_metrics(self, training=None):
del training
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='masked_lm_accuracy'),
tf.keras.metrics.Mean(name='lm_example_loss')
]
# TODO(hongkuny): rethink how to manage metrics creation with heads.
if self.task_config.train_data.use_next_sentence_label:
metrics.append(
tf.keras.metrics.SparseCategoricalAccuracy(
name='next_sentence_accuracy'))
metrics.append(tf.keras.metrics.Mean(name='next_sentence_loss'))
return metrics
def process_metrics(self, metrics, labels, model_outputs):
with tf.name_scope('MaskedLMTask/process_metrics'):
metrics = dict([(metric.name, metric) for metric in metrics])
if 'masked_lm_accuracy' in metrics:
metrics['masked_lm_accuracy'].update_state(
labels['masked_lm_ids'], model_outputs['mlm_logits'],
labels['masked_lm_weights'])
if 'next_sentence_accuracy' in metrics:
metrics['next_sentence_accuracy'].update_state(
labels['next_sentence_labels'], model_outputs['next_sentence'])
def train_step(self, inputs, model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer, metrics):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
with tf.GradientTape() as tape:
outputs = model(inputs, training=True)
# Computes per-replica loss.
loss = self.build_losses(
labels=inputs,
model_outputs=outputs,
metrics=metrics,
aux_losses=model.losses)
if self.task_config.scale_loss:
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / tf.distribute.get_strategy().num_replicas_in_sync
tvars = model.trainable_variables
if self.task_config.scale_loss:
grads = tape.gradient(scaled_loss, tvars)
else:
grads = tape.gradient(loss, tvars)
optimizer.apply_gradients(list(zip(grads, tvars)))
self.process_metrics(metrics, inputs, outputs)
return {self.loss: loss}
def validation_step(self, inputs, model: tf.keras.Model, metrics):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
outputs = self.inference_step(inputs, model)
loss = self.build_losses(
labels=inputs,
model_outputs=outputs,
metrics=metrics,
aux_losses=model.losses)
self.process_metrics(metrics, inputs, outputs)
return {self.loss: loss}
| 7,977 | 36.810427 | 85 | py |
models | models-master/official/nlp/tasks/translation.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the translation task."""
import dataclasses
import os
from typing import Optional
from absl import logging
import sacrebleu
import tensorflow as tf
import tensorflow_text as tftxt
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling.hyperparams import base_config
from official.nlp.data import data_loader_factory
from official.nlp.metrics import bleu
from official.nlp.modeling import models
def _pad_tensors_to_same_length(x, y):
"""Pad x and y so that the results have the same length (second dimension)."""
x_length = tf.shape(x)[1]
y_length = tf.shape(y)[1]
max_length = tf.maximum(x_length, y_length)
x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
return x, y
def _padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns the cross entropy loss and weight tensors: float32 tensors with
shape [batch_size, max(length_logits, length_labels)]
"""
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.cast(vocab_size - 1, tf.float32)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.math.log(confidence) + tf.cast(vocab_size - 1, tf.float32)
* low_confidence * tf.math.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.cast(tf.not_equal(labels, 0), tf.float32)
return xentropy * weights, weights
@dataclasses.dataclass
class EncDecoder(base_config.Config):
"""Configurations for Encoder/Decoder."""
num_layers: int = 6
num_attention_heads: int = 8
intermediate_size: int = 2048
activation: str = "relu"
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
intermediate_dropout: float = 0.1
use_bias: bool = False
norm_first: bool = True
norm_epsilon: float = 1e-6
@dataclasses.dataclass
class ModelConfig(base_config.Config):
"""A base Seq2Seq model configuration."""
encoder: EncDecoder = dataclasses.field(default_factory=EncDecoder)
decoder: EncDecoder = dataclasses.field(default_factory=EncDecoder)
embedding_width: int = 512
dropout_rate: float = 0.1
# Decoding.
padded_decode: bool = False
decode_max_length: Optional[int] = None
beam_size: int = 4
alpha: float = 0.6
# Training.
label_smoothing: float = 0.1
@dataclasses.dataclass
class TranslationConfig(cfg.TaskConfig):
"""The translation task config."""
model: ModelConfig = dataclasses.field(default_factory=ModelConfig)
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(
default_factory=cfg.DataConfig
)
# Tokenization
sentencepiece_model_path: str = ""
# Evaluation.
print_translations: Optional[bool] = None
def write_test_record(params, model_dir):
"""Writes the test input to a tfrecord."""
# Get raw data from tfds.
params = params.replace(transform_and_batch=False)
dataset = data_loader_factory.get_data_loader(params).load()
references = []
total_samples = 0
output_file = os.path.join(model_dir, "eval.tf_record")
writer = tf.io.TFRecordWriter(output_file)
for d in dataset:
references.append(d[params.tgt_lang].numpy().decode())
example = tf.train.Example(
features=tf.train.Features(
feature={
"unique_id": tf.train.Feature(
int64_list=tf.train.Int64List(value=[total_samples])),
params.src_lang: tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[d[params.src_lang].numpy()])),
params.tgt_lang: tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[d[params.tgt_lang].numpy()])),
}))
writer.write(example.SerializeToString())
total_samples += 1
batch_size = params.global_batch_size
num_dummy_example = batch_size - total_samples % batch_size
for i in range(num_dummy_example):
example = tf.train.Example(
features=tf.train.Features(
feature={
"unique_id": tf.train.Feature(
int64_list=tf.train.Int64List(value=[total_samples + i])),
params.src_lang: tf.train.Feature(
bytes_list=tf.train.BytesList(value=[b""])),
params.tgt_lang: tf.train.Feature(
bytes_list=tf.train.BytesList(value=[b""])),
}))
writer.write(example.SerializeToString())
writer.close()
return references, output_file
@task_factory.register_task_cls(TranslationConfig)
class TranslationTask(base_task.Task):
"""A single-replica view of training procedure.
Tasks provide artifacts for training/evalution procedures, including
loading/iterating over Datasets, initializing the model, calculating the loss
and customized metrics with reduction.
"""
def __init__(self, params: cfg.TaskConfig, logging_dir=None, name=None):
super().__init__(params, logging_dir, name=name)
self._sentencepiece_model_path = params.sentencepiece_model_path
if params.sentencepiece_model_path:
self._sp_tokenizer = tftxt.SentencepieceTokenizer(
model=tf.io.gfile.GFile(params.sentencepiece_model_path, "rb").read(),
add_eos=True)
try:
empty_str_tokenized = self._sp_tokenizer.tokenize("").numpy()
except tf.errors.InternalError:
raise ValueError(
"EOS token not in tokenizer vocab."
"Please make sure the tokenizer generates a single token for an "
"empty string.")
self._eos_id = empty_str_tokenized.item()
self._vocab_size = self._sp_tokenizer.vocab_size().numpy()
else:
raise ValueError("Setencepiece model path not provided.")
if (params.validation_data.input_path or
params.validation_data.tfds_name) and self._logging_dir:
self._references, self._tf_record_input_path = write_test_record(
params.validation_data, self.logging_dir)
def build_model(self) -> tf.keras.Model:
"""Creates model architecture.
Returns:
A model instance.
"""
model_cfg = self.task_config.model
encoder_kwargs = model_cfg.encoder.as_dict()
encoder_layer = models.TransformerEncoder(**encoder_kwargs)
decoder_kwargs = model_cfg.decoder.as_dict()
decoder_layer = models.TransformerDecoder(**decoder_kwargs)
return models.Seq2SeqTransformer(
vocab_size=self._vocab_size,
embedding_width=model_cfg.embedding_width,
dropout_rate=model_cfg.dropout_rate,
padded_decode=model_cfg.padded_decode,
decode_max_length=model_cfg.decode_max_length,
beam_size=model_cfg.beam_size,
alpha=model_cfg.alpha,
encoder_layer=encoder_layer,
decoder_layer=decoder_layer,
eos_id=self._eos_id)
def build_inputs(self,
params: cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None):
"""Returns a dataset."""
if params.is_training:
dataloader_params = params
else:
input_path = self._tf_record_input_path
# Read from padded tf records instead.
dataloader_params = params.replace(
input_path=input_path,
tfds_name="",
tfds_split="",
has_unique_id=True)
dataloader_params = dataloader_params.replace(
sentencepiece_model_path=self._sentencepiece_model_path)
return data_loader_factory.get_data_loader(dataloader_params).load(
input_context)
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
"""Standard interface to compute losses.
Args:
labels: optional label tensors.
model_outputs: a nested structure of output tensors.
aux_losses: auxiliary loss tensors, i.e. `losses` in keras.Model.
Returns:
The total loss tensor.
"""
del aux_losses
smoothing = self.task_config.model.label_smoothing
xentropy, weights = _padded_cross_entropy_loss(model_outputs, labels,
smoothing, self._vocab_size)
return tf.reduce_sum(xentropy) / tf.reduce_sum(weights)
def train_step(self,
inputs,
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics=None):
"""Does forward and backward.
With distribution strategies, this method runs on devices.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
with tf.GradientTape() as tape:
outputs = model(inputs, training=True)
# Computes per-replica loss.
loss = self.build_losses(labels=inputs["targets"], model_outputs=outputs)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / tf.distribute.get_strategy().num_replicas_in_sync
# For mixed precision, when a LossScaleOptimizer is used, the loss is
# scaled to avoid numeric underflow.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, inputs["targets"], outputs)
return logs
def validation_step(self, inputs, model: tf.keras.Model, metrics=None):
unique_ids = inputs.pop("unique_id")
# Validation loss
outputs = model(inputs, training=False)
# Computes per-replica loss to help understand if we are overfitting.
loss = self.build_losses(labels=inputs["targets"], model_outputs=outputs)
inputs.pop("targets")
# Beam search to calculate metrics.
model_outputs = model(inputs, training=False)
outputs = model_outputs
logs = {
self.loss: loss,
"inputs": inputs["inputs"],
"unique_ids": unique_ids,
}
logs.update(outputs)
return logs
def aggregate_logs(self, state=None, step_outputs=None):
"""Aggregates over logs returned from a validation step."""
if state is None:
state = {}
for in_token_ids, out_token_ids, unique_ids in zip(
step_outputs["inputs"],
step_outputs["outputs"],
step_outputs["unique_ids"]):
for in_ids, out_ids, u_id in zip(
in_token_ids.numpy(), out_token_ids.numpy(), unique_ids.numpy()):
state[u_id] = (in_ids, out_ids)
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
def _decode(ids):
return self._sp_tokenizer.detokenize(ids).numpy().decode()
def _trim_and_decode(ids):
"""Trim EOS and PAD tokens from ids, and decode to return a string."""
try:
index = list(ids).index(self._eos_id)
return _decode(ids[:index])
except ValueError: # No EOS found in sequence
return _decode(ids)
translations = []
for u_id in sorted(aggregated_logs):
if u_id >= len(self._references):
continue
src = _trim_and_decode(aggregated_logs[u_id][0])
translation = _trim_and_decode(aggregated_logs[u_id][1])
translations.append(translation)
if self.task_config.print_translations:
# Deccoding the in_ids to reflect what the model sees.
logging.info("Translating:\n\tInput: %s\n\tOutput: %s\n\tReference: %s",
src, translation, self._references[u_id])
sacrebleu_score = sacrebleu.corpus_bleu(
translations, [self._references]).score
bleu_score = bleu.bleu_on_list(self._references, translations)
return {"sacrebleu_score": sacrebleu_score,
"bleu_score": bleu_score}
| 13,556 | 35.640541 | 80 | py |
models | models-master/official/nlp/tasks/electra_task.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ELECTRA pretraining task (Joint Masked LM and Replaced Token Detection)."""
import dataclasses
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling import tf_utils
from official.nlp.configs import bert
from official.nlp.configs import electra
from official.nlp.configs import encoders
from official.nlp.data import pretrain_dataloader
from official.nlp.modeling import layers
from official.nlp.modeling import models
@dataclasses.dataclass
class ElectraPretrainConfig(cfg.TaskConfig):
"""The model config."""
model: electra.ElectraPretrainerConfig = dataclasses.field(
default_factory=lambda: electra.ElectraPretrainerConfig( # pylint: disable=g-long-lambda
cls_heads=[
bert.ClsHeadConfig(
inner_dim=768,
num_classes=2,
dropout_rate=0.1,
name='next_sentence',
)
]
)
)
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(
default_factory=cfg.DataConfig
)
def _build_pretrainer(
config: electra.ElectraPretrainerConfig) -> models.ElectraPretrainer:
"""Instantiates ElectraPretrainer from the config."""
generator_encoder_cfg = config.generator_encoder
discriminator_encoder_cfg = config.discriminator_encoder
# Copy discriminator's embeddings to generator for easier model serialization.
discriminator_network = encoders.build_encoder(discriminator_encoder_cfg)
if config.tie_embeddings:
embedding_layer = discriminator_network.get_embedding_layer()
generator_network = encoders.build_encoder(
generator_encoder_cfg, embedding_layer=embedding_layer)
else:
generator_network = encoders.build_encoder(generator_encoder_cfg)
generator_encoder_cfg = generator_encoder_cfg.get()
return models.ElectraPretrainer(
generator_network=generator_network,
discriminator_network=discriminator_network,
vocab_size=generator_encoder_cfg.vocab_size,
num_classes=config.num_classes,
sequence_length=config.sequence_length,
num_token_predictions=config.num_masked_tokens,
mlm_activation=tf_utils.get_activation(
generator_encoder_cfg.hidden_activation),
mlm_initializer=tf.keras.initializers.TruncatedNormal(
stddev=generator_encoder_cfg.initializer_range),
classification_heads=[
layers.ClassificationHead(**cfg.as_dict()) for cfg in config.cls_heads
],
disallow_correct=config.disallow_correct)
@task_factory.register_task_cls(ElectraPretrainConfig)
class ElectraPretrainTask(base_task.Task):
"""ELECTRA Pretrain Task (Masked LM + Replaced Token Detection)."""
def build_model(self):
return _build_pretrainer(self.task_config.model)
def build_losses(self,
labels,
model_outputs,
metrics,
aux_losses=None) -> tf.Tensor:
metrics = dict([(metric.name, metric) for metric in metrics])
# generator lm and (optional) nsp loss.
lm_prediction_losses = tf.keras.losses.sparse_categorical_crossentropy(
labels['masked_lm_ids'],
tf.cast(model_outputs['lm_outputs'], tf.float32),
from_logits=True)
lm_label_weights = labels['masked_lm_weights']
lm_numerator_loss = tf.reduce_sum(lm_prediction_losses * lm_label_weights)
lm_denominator_loss = tf.reduce_sum(lm_label_weights)
mlm_loss = tf.math.divide_no_nan(lm_numerator_loss, lm_denominator_loss)
metrics['lm_example_loss'].update_state(mlm_loss)
if 'next_sentence_labels' in labels:
sentence_labels = labels['next_sentence_labels']
sentence_outputs = tf.cast(
model_outputs['sentence_outputs'], dtype=tf.float32)
sentence_loss = tf.keras.losses.sparse_categorical_crossentropy(
sentence_labels, sentence_outputs, from_logits=True)
metrics['next_sentence_loss'].update_state(sentence_loss)
total_loss = mlm_loss + sentence_loss
else:
total_loss = mlm_loss
# discriminator replaced token detection (rtd) loss.
rtd_logits = model_outputs['disc_logits']
rtd_labels = tf.cast(model_outputs['disc_label'], tf.float32)
input_mask = tf.cast(labels['input_mask'], tf.float32)
rtd_ind_loss = tf.nn.sigmoid_cross_entropy_with_logits(
logits=rtd_logits, labels=rtd_labels)
rtd_numerator = tf.reduce_sum(input_mask * rtd_ind_loss)
rtd_denominator = tf.reduce_sum(input_mask)
rtd_loss = tf.math.divide_no_nan(rtd_numerator, rtd_denominator)
metrics['discriminator_loss'].update_state(rtd_loss)
total_loss = total_loss + \
self.task_config.model.discriminator_loss_weight * rtd_loss
if aux_losses:
total_loss += tf.add_n(aux_losses)
metrics['total_loss'].update_state(total_loss)
return total_loss
def build_inputs(self, params, input_context=None):
"""Returns tf.data.Dataset for pretraining."""
if params.input_path == 'dummy':
def dummy_data(_):
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
dummy_lm = tf.zeros((1, params.max_predictions_per_seq), dtype=tf.int32)
return dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids,
masked_lm_positions=dummy_lm,
masked_lm_ids=dummy_lm,
masked_lm_weights=tf.cast(dummy_lm, dtype=tf.float32),
next_sentence_labels=tf.zeros((1, 1), dtype=tf.int32))
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
return pretrain_dataloader.BertPretrainDataLoader(params).load(
input_context)
def build_metrics(self, training=None):
del training
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='masked_lm_accuracy'),
tf.keras.metrics.Mean(name='lm_example_loss'),
tf.keras.metrics.SparseCategoricalAccuracy(
name='discriminator_accuracy'),
]
if self.task_config.train_data.use_next_sentence_label:
metrics.append(
tf.keras.metrics.SparseCategoricalAccuracy(
name='next_sentence_accuracy'))
metrics.append(tf.keras.metrics.Mean(name='next_sentence_loss'))
metrics.append(tf.keras.metrics.Mean(name='discriminator_loss'))
metrics.append(tf.keras.metrics.Mean(name='total_loss'))
return metrics
def process_metrics(self, metrics, labels, model_outputs):
metrics = dict([(metric.name, metric) for metric in metrics])
if 'masked_lm_accuracy' in metrics:
metrics['masked_lm_accuracy'].update_state(labels['masked_lm_ids'],
model_outputs['lm_outputs'],
labels['masked_lm_weights'])
if 'next_sentence_accuracy' in metrics:
metrics['next_sentence_accuracy'].update_state(
labels['next_sentence_labels'], model_outputs['sentence_outputs'])
if 'discriminator_accuracy' in metrics:
disc_logits_expanded = tf.expand_dims(model_outputs['disc_logits'], -1)
discrim_full_logits = tf.concat(
[-1.0 * disc_logits_expanded, disc_logits_expanded], -1)
metrics['discriminator_accuracy'].update_state(
model_outputs['disc_label'], discrim_full_logits,
labels['input_mask'])
def train_step(self, inputs, model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer, metrics):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
with tf.GradientTape() as tape:
outputs = model(inputs, training=True)
# Computes per-replica loss.
loss = self.build_losses(
labels=inputs,
model_outputs=outputs,
metrics=metrics,
aux_losses=model.losses)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / tf.distribute.get_strategy().num_replicas_in_sync
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
optimizer.apply_gradients(list(zip(grads, tvars)))
self.process_metrics(metrics, inputs, outputs)
return {self.loss: loss}
def validation_step(self, inputs, model: tf.keras.Model, metrics):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
outputs = model(inputs, training=False)
loss = self.build_losses(
labels=inputs,
model_outputs=outputs,
metrics=metrics,
aux_losses=model.losses)
self.process_metrics(metrics, inputs, outputs)
return {self.loss: loss}
| 9,850 | 38.562249 | 95 | py |
models | models-master/official/nlp/tasks/tagging.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tagging (e.g., NER/POS) task."""
from typing import List, Optional, Tuple
import dataclasses
import orbit
from seqeval import metrics as seqeval_metrics
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.nlp.data import data_loader_factory
from official.nlp.modeling import models
from official.nlp.tasks import utils
@dataclasses.dataclass
class ModelConfig(base_config.Config):
"""A base span labeler configuration."""
encoder: encoders.EncoderConfig = dataclasses.field(default_factory=encoders.EncoderConfig)
head_dropout: float = 0.1
head_initializer_range: float = 0.02
@dataclasses.dataclass
class TaggingConfig(cfg.TaskConfig):
"""The model config."""
# At most one of `init_checkpoint` and `hub_module_url` can be specified.
init_checkpoint: str = ''
hub_module_url: str = ''
model: ModelConfig = dataclasses.field(default_factory=ModelConfig)
# The real class names, the order of which should match real label id.
# Note that a word may be tokenized into multiple word_pieces tokens, and
# we asssume the real label id (non-negative) is assigned to the first token
# of the word, and a negative label id is assigned to the remaining tokens.
# The negative label id will not contribute to loss and metrics.
class_names: Optional[List[str]] = None
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
def _masked_labels_and_weights(y_true):
"""Masks negative values from token level labels.
Args:
y_true: Token labels, typically shape (batch_size, seq_len), where tokens
with negative labels should be ignored during loss/accuracy calculation.
Returns:
(masked_y_true, masked_weights) where `masked_y_true` is the input
with each negative label replaced with zero and `masked_weights` is 0.0
where negative labels were replaced and 1.0 for original labels.
"""
# Ignore the classes of tokens with negative values.
mask = tf.greater_equal(y_true, 0)
# Replace negative labels, which are out of bounds for some loss functions,
# with zero.
masked_y_true = tf.where(mask, y_true, 0)
return masked_y_true, tf.cast(mask, tf.float32)
@task_factory.register_task_cls(TaggingConfig)
class TaggingTask(base_task.Task):
"""Task object for tagging (e.g., NER or POS)."""
def build_model(self):
if self.task_config.hub_module_url and self.task_config.init_checkpoint:
raise ValueError('At most one of `hub_module_url` and '
'`init_checkpoint` can be specified.')
if self.task_config.hub_module_url:
encoder_network = utils.get_encoder_from_hub(
self.task_config.hub_module_url)
else:
encoder_network = encoders.build_encoder(self.task_config.model.encoder)
return models.BertTokenClassifier(
network=encoder_network,
num_classes=len(self.task_config.class_names),
initializer=tf.keras.initializers.TruncatedNormal(
stddev=self.task_config.model.head_initializer_range),
dropout_rate=self.task_config.model.head_dropout,
output='logits',
output_encoder_outputs=True)
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
logits = tf.cast(model_outputs['logits'], tf.float32)
masked_labels, masked_weights = _masked_labels_and_weights(labels)
loss = tf.keras.losses.sparse_categorical_crossentropy(
masked_labels, logits, from_logits=True)
numerator_loss = tf.reduce_sum(loss * masked_weights)
denominator_loss = tf.reduce_sum(masked_weights)
loss = tf.math.divide_no_nan(numerator_loss, denominator_loss)
return loss
def build_inputs(self, params: cfg.DataConfig, input_context=None):
"""Returns tf.data.Dataset for sentence_prediction task."""
if params.input_path == 'dummy':
def dummy_data(_):
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
x = dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids)
# Include some label_id as -1, which will be ignored in loss/metrics.
y = tf.random.uniform(
shape=(1, params.seq_length),
minval=-1,
maxval=len(self.task_config.class_names),
dtype=tf.dtypes.int32)
return (x, y)
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
return data_loader_factory.get_data_loader(params).load(input_context)
def inference_step(self, inputs, model: tf.keras.Model):
"""Performs the forward step."""
logits = model(inputs, training=False)['logits']
return {'logits': logits,
'predict_ids': tf.argmax(logits, axis=-1, output_type=tf.int32)}
def validation_step(self, inputs, model: tf.keras.Model, metrics=None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
outputs = self.inference_step(features, model)
loss = self.build_losses(labels=labels, model_outputs=outputs)
# Negative label ids are padding labels which should be ignored.
real_label_index = tf.where(tf.greater_equal(labels, 0))
predict_ids = tf.gather_nd(outputs['predict_ids'], real_label_index)
label_ids = tf.gather_nd(labels, real_label_index)
return {
self.loss: loss,
'predict_ids': predict_ids,
'label_ids': label_ids,
}
def aggregate_logs(self, state=None, step_outputs=None):
"""Aggregates over logs returned from a validation step."""
if state is None:
state = {'predict_class': [], 'label_class': []}
def id_to_class_name(batched_ids):
class_names = []
for per_example_ids in batched_ids:
class_names.append([])
for per_token_id in per_example_ids.numpy().tolist():
class_names[-1].append(self.task_config.class_names[per_token_id])
return class_names
# Convert id to class names, because `seqeval_metrics` relies on the class
# name to decide IOB tags.
state['predict_class'].extend(id_to_class_name(step_outputs['predict_ids']))
state['label_class'].extend(id_to_class_name(step_outputs['label_ids']))
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
"""Reduces aggregated logs over validation steps."""
label_class = aggregated_logs['label_class']
predict_class = aggregated_logs['predict_class']
return {
'f1':
seqeval_metrics.f1_score(label_class, predict_class),
'precision':
seqeval_metrics.precision_score(label_class, predict_class),
'recall':
seqeval_metrics.recall_score(label_class, predict_class),
'accuracy':
seqeval_metrics.accuracy_score(label_class, predict_class),
}
def predict(task: TaggingTask,
params: cfg.DataConfig,
model: tf.keras.Model) -> List[Tuple[int, int, List[int]]]:
"""Predicts on the input data.
Args:
task: A `TaggingTask` object.
params: A `cfg.DataConfig` object.
model: A keras.Model.
Returns:
A list of tuple. Each tuple contains `sentence_id`, `sub_sentence_id` and
a list of predicted ids.
"""
def predict_step(inputs):
"""Replicated prediction calculation."""
x, y = inputs
sentence_ids = x.pop('sentence_id')
sub_sentence_ids = x.pop('sub_sentence_id')
outputs = task.inference_step(x, model)
predict_ids = outputs['predict_ids']
label_mask = tf.greater_equal(y, 0)
return dict(
predict_ids=predict_ids,
label_mask=label_mask,
sentence_ids=sentence_ids,
sub_sentence_ids=sub_sentence_ids)
def aggregate_fn(state, outputs):
"""Concatenates model's outputs."""
if state is None:
state = []
for (batch_predict_ids, batch_label_mask, batch_sentence_ids,
batch_sub_sentence_ids) in zip(outputs['predict_ids'],
outputs['label_mask'],
outputs['sentence_ids'],
outputs['sub_sentence_ids']):
for (tmp_predict_ids, tmp_label_mask, tmp_sentence_id,
tmp_sub_sentence_id) in zip(batch_predict_ids.numpy(),
batch_label_mask.numpy(),
batch_sentence_ids.numpy(),
batch_sub_sentence_ids.numpy()):
real_predict_ids = []
assert len(tmp_predict_ids) == len(tmp_label_mask)
for i in range(len(tmp_predict_ids)):
# Skip the padding label.
if tmp_label_mask[i]:
real_predict_ids.append(tmp_predict_ids[i])
state.append((tmp_sentence_id, tmp_sub_sentence_id, real_predict_ids))
return state
dataset = orbit.utils.make_distributed_dataset(tf.distribute.get_strategy(),
task.build_inputs, params)
outputs = utils.predict(predict_step, aggregate_fn, dataset)
return sorted(outputs, key=lambda x: (x[0], x[1]))
| 10,220 | 37.424812 | 93 | py |
models | models-master/official/nlp/tasks/utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utils for tasks."""
from typing import Any, Callable
import orbit
import tensorflow as tf
import tensorflow_hub as hub
def get_encoder_from_hub(hub_model_path: str) -> tf.keras.Model:
"""Gets an encoder from hub.
Args:
hub_model_path: The path to the tfhub model.
Returns:
A tf.keras.Model.
"""
input_word_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_word_ids')
input_mask = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_mask')
input_type_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_type_ids')
hub_layer = hub.KerasLayer(hub_model_path, trainable=True)
output_dict = {}
dict_input = dict(
input_word_ids=input_word_ids,
input_mask=input_mask,
input_type_ids=input_type_ids)
output_dict = hub_layer(dict_input)
return tf.keras.Model(inputs=dict_input, outputs=output_dict)
def predict(predict_step_fn: Callable[[Any], Any],
aggregate_fn: Callable[[Any, Any], Any], dataset: tf.data.Dataset):
"""Runs prediction.
Args:
predict_step_fn: A callable such as `def predict_step(inputs)`, where
`inputs` are input tensors.
aggregate_fn: A callable such as `def aggregate_fn(state, value)`, where
`value` is the outputs from `predict_step_fn`.
dataset: A `tf.data.Dataset` object.
Returns:
The aggregated predictions.
"""
@tf.function
def predict_step(iterator):
"""Predicts on distributed devices."""
outputs = tf.distribute.get_strategy().run(
predict_step_fn, args=(next(iterator),))
return tf.nest.map_structure(
tf.distribute.get_strategy().experimental_local_results, outputs)
loop_fn = orbit.utils.create_loop_fn(predict_step)
# Set `num_steps` to -1 to exhaust the dataset.
outputs = loop_fn(
iter(dataset), num_steps=-1, state=None, reduce_fn=aggregate_fn) # pytype: disable=wrong-arg-types
return outputs
| 2,575 | 32.454545 | 105 | py |
models | models-master/official/nlp/tasks/question_answering.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Question answering task."""
import dataclasses
import functools
import json
import os
from typing import List, Optional
from absl import logging
import orbit
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.nlp.data import data_loader_factory
from official.nlp.data import squad_lib as squad_lib_wp
from official.nlp.data import squad_lib_sp
from official.nlp.modeling import models
from official.nlp.tasks import utils
from official.nlp.tools import squad_evaluate_v1_1
from official.nlp.tools import squad_evaluate_v2_0
from official.nlp.tools import tokenization
@dataclasses.dataclass
class ModelConfig(base_config.Config):
"""A base span labeler configuration."""
encoder: encoders.EncoderConfig = dataclasses.field(
default_factory=encoders.EncoderConfig
)
@dataclasses.dataclass
class QuestionAnsweringConfig(cfg.TaskConfig):
"""The model config."""
# At most one of `init_checkpoint` and `hub_module_url` can be specified.
init_checkpoint: str = ''
hub_module_url: str = ''
n_best_size: int = 20
max_answer_length: int = 30
null_score_diff_threshold: float = 0.0
model: ModelConfig = dataclasses.field(default_factory=ModelConfig)
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(
default_factory=cfg.DataConfig
)
@dataclasses.dataclass
class RawAggregatedResult:
"""Raw representation for SQuAD predictions."""
unique_id: int
start_logits: List[float]
end_logits: List[float]
start_indexes: Optional[List[int]] = None
end_indexes: Optional[List[int]] = None
class_logits: Optional[float] = None
@task_factory.register_task_cls(QuestionAnsweringConfig)
class QuestionAnsweringTask(base_task.Task):
"""Task object for question answering."""
def __init__(self, params: cfg.TaskConfig, logging_dir=None, name=None):
super().__init__(params, logging_dir, name=name)
if params.validation_data is None:
return
if params.validation_data.tokenization == 'WordPiece':
self.squad_lib = squad_lib_wp
elif params.validation_data.tokenization == 'SentencePiece':
self.squad_lib = squad_lib_sp
else:
raise ValueError('Unsupported tokenization method: {}'.format(
params.validation_data.tokenization))
if params.validation_data.input_path:
self._tf_record_input_path, self._eval_examples, self._eval_features = (
self._preprocess_eval_data(params.validation_data))
def set_preprocessed_eval_input_path(self, eval_input_path):
"""Sets the path to the preprocessed eval data."""
self._tf_record_input_path = eval_input_path
def build_model(self):
if self.task_config.hub_module_url and self.task_config.init_checkpoint:
raise ValueError('At most one of `hub_module_url` and '
'`init_checkpoint` can be specified.')
if self.task_config.hub_module_url:
encoder_network = utils.get_encoder_from_hub(
self.task_config.hub_module_url)
else:
encoder_network = encoders.build_encoder(self.task_config.model.encoder)
encoder_cfg = self.task_config.model.encoder.get()
return models.BertSpanLabeler(
network=encoder_network,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range))
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
start_positions = labels['start_positions']
end_positions = labels['end_positions']
start_logits, end_logits = model_outputs
start_loss = tf.keras.losses.sparse_categorical_crossentropy(
start_positions,
tf.cast(start_logits, dtype=tf.float32),
from_logits=True)
end_loss = tf.keras.losses.sparse_categorical_crossentropy(
end_positions, tf.cast(end_logits, dtype=tf.float32), from_logits=True)
loss = (tf.reduce_mean(start_loss) + tf.reduce_mean(end_loss)) / 2
return loss
def _preprocess_eval_data(self, params):
eval_examples = self.squad_lib.read_squad_examples(
input_file=params.input_path,
is_training=False,
version_2_with_negative=params.version_2_with_negative)
temp_file_path = params.input_preprocessed_data_path or self.logging_dir
if not temp_file_path:
raise ValueError('You must specify a temporary directory, either in '
'params.input_preprocessed_data_path or logging_dir to '
'store intermediate evaluation TFRecord data.')
eval_writer = self.squad_lib.FeatureWriter(
filename=os.path.join(temp_file_path, 'eval.tf_record'),
is_training=False)
eval_features = []
def _append_feature(feature, is_padding):
if not is_padding:
eval_features.append(feature)
eval_writer.process_feature(feature)
# XLNet preprocesses SQuAD examples in a P, Q, class order whereas
# BERT preprocesses in a class, Q, P order.
xlnet_ordering = self.task_config.model.encoder.type == 'xlnet'
kwargs = dict(
examples=eval_examples,
max_seq_length=params.seq_length,
doc_stride=params.doc_stride,
max_query_length=params.query_length,
is_training=False,
output_fn=_append_feature,
batch_size=params.global_batch_size,
xlnet_format=xlnet_ordering)
if params.tokenization == 'SentencePiece':
# squad_lib_sp requires one more argument 'do_lower_case'.
kwargs['do_lower_case'] = params.do_lower_case
kwargs['tokenizer'] = tokenization.FullSentencePieceTokenizer(
sp_model_file=params.vocab_file)
elif params.tokenization == 'WordPiece':
kwargs['tokenizer'] = tokenization.FullTokenizer(
vocab_file=params.vocab_file, do_lower_case=params.do_lower_case)
else:
raise ValueError('Unexpected tokenization: %s' % params.tokenization)
eval_dataset_size = self.squad_lib.convert_examples_to_features(**kwargs)
eval_writer.close()
logging.info('***** Evaluation input stats *****')
logging.info(' Num orig examples = %d', len(eval_examples))
logging.info(' Num split examples = %d', len(eval_features))
logging.info(' Batch size = %d', params.global_batch_size)
logging.info(' Dataset size = %d', eval_dataset_size)
return eval_writer.filename, eval_examples, eval_features
def _dummy_data(self, params, _):
"""Returns dummy data."""
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
x = dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids)
y = dict(
start_positions=tf.constant(0, dtype=tf.int32),
end_positions=tf.constant(1, dtype=tf.int32),
is_impossible=tf.constant(0, dtype=tf.int32))
return x, y
def build_inputs(self, params, input_context=None):
"""Returns tf.data.Dataset for sentence_prediction task."""
if params.input_path == 'dummy':
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dummy_data = functools.partial(self._dummy_data, params)
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
if params.is_training:
dataloader_params = params
else:
input_path = self._tf_record_input_path
dataloader_params = params.replace(input_path=input_path)
return data_loader_factory.get_data_loader(dataloader_params).load(
input_context)
def build_metrics(self, training=None):
if not training:
# We cannot compute start/end_position_accuracy because start/end_position
# labels are not available in the validation dataset (b/173794928).
return []
# TODO(lehou): a list of metrics doesn't work the same as in compile/fit.
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(
name='start_position_accuracy'),
tf.keras.metrics.SparseCategoricalAccuracy(
name='end_position_accuracy'),
]
return metrics
def process_metrics(self, metrics, labels, model_outputs):
metrics = dict([(metric.name, metric) for metric in metrics])
start_logits, end_logits = model_outputs
metrics['start_position_accuracy'].update_state(labels['start_positions'],
start_logits)
metrics['end_position_accuracy'].update_state(labels['end_positions'],
end_logits)
def process_compiled_metrics(self, compiled_metrics, labels, model_outputs):
start_logits, end_logits = model_outputs
compiled_metrics.update_state(
y_true=labels, # labels has keys 'start_positions' and 'end_positions'.
y_pred={
'start_positions': start_logits,
'end_positions': end_logits
})
def validation_step(self, inputs, model: tf.keras.Model, metrics=None):
features, _ = inputs
unique_ids = features.pop('unique_ids')
model_outputs = self.inference_step(features, model)
start_logits, end_logits = model_outputs
# We cannot compute validation_loss here, because start/end_position
# labels are not available in the validation dataset (b/173794928).
logs = {
'unique_ids': unique_ids,
'start_logits': start_logits,
'end_logits': end_logits,
}
return logs
def aggregate_logs(self, state=None, step_outputs=None):
assert step_outputs is not None, 'Got no logs from self.validation_step.'
if state is None:
state = []
for outputs in zip(step_outputs['unique_ids'],
step_outputs['start_logits'],
step_outputs['end_logits']):
numpy_values = [
output.numpy() for output in outputs if output is not None]
for values in zip(*numpy_values):
state.append(RawAggregatedResult(
unique_id=values[0],
start_logits=values[1],
end_logits=values[2]))
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
all_predictions, _, scores_diff = (
self.squad_lib.postprocess_output(
self._eval_examples,
self._eval_features,
aggregated_logs,
self.task_config.n_best_size,
self.task_config.max_answer_length,
self.task_config.validation_data.do_lower_case,
version_2_with_negative=(
self.task_config.validation_data.version_2_with_negative),
null_score_diff_threshold=(
self.task_config.null_score_diff_threshold),
xlnet_format=self.task_config.validation_data.xlnet_format,
verbose=False))
with tf.io.gfile.GFile(self.task_config.validation_data.input_path,
'r') as reader:
dataset_json = json.load(reader)
pred_dataset = dataset_json['data']
if self.task_config.validation_data.version_2_with_negative:
eval_metrics = squad_evaluate_v2_0.evaluate(pred_dataset, all_predictions,
scores_diff)
eval_metrics = {
'exact_match': eval_metrics['final_exact'],
'exact_match_threshold': eval_metrics['final_exact_thresh'],
'final_f1': eval_metrics['final_f1'] / 100.0, # scale back to [0, 1].
'f1_threshold': eval_metrics['final_f1_thresh'],
'has_answer_exact_match': eval_metrics['HasAns_exact'],
'has_answer_f1': eval_metrics['HasAns_f1']
}
else:
eval_metrics = squad_evaluate_v1_1.evaluate(pred_dataset, all_predictions)
eval_metrics = {
'exact_match': eval_metrics['exact_match'],
'final_f1': eval_metrics['final_f1']
}
return eval_metrics
@dataclasses.dataclass
class XLNetQuestionAnsweringConfig(QuestionAnsweringConfig):
"""The config for the XLNet variation of QuestionAnswering."""
pass
@task_factory.register_task_cls(XLNetQuestionAnsweringConfig)
class XLNetQuestionAnsweringTask(QuestionAnsweringTask):
"""XLNet variant of the Question Answering Task.
The main differences include:
- The encoder is an `XLNetBase` class.
- The `SpanLabeling` head is an instance of `XLNetSpanLabeling` which
predicts start/end positions and impossibility score. During inference,
it predicts the top N scores and indexes.
"""
def build_model(self):
if self.task_config.hub_module_url and self.task_config.init_checkpoint:
raise ValueError('At most one of `hub_module_url` and '
'`init_checkpoint` can be specified.')
if self.task_config.hub_module_url:
encoder_network = utils.get_encoder_from_hub(
self.task_config.hub_module_url)
else:
encoder_network = encoders.build_encoder(self.task_config.model.encoder)
encoder_cfg = self.task_config.model.encoder.get()
return models.XLNetSpanLabeler(
network=encoder_network,
start_n_top=self.task_config.n_best_size,
end_n_top=self.task_config.n_best_size,
initializer=tf.keras.initializers.RandomNormal(
stddev=encoder_cfg.initializer_range))
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
start_positions = labels['start_positions']
end_positions = labels['end_positions']
is_impossible = labels['is_impossible']
is_impossible = tf.cast(tf.reshape(is_impossible, [-1]), tf.float32)
start_logits = model_outputs['start_logits']
end_logits = model_outputs['end_logits']
class_logits = model_outputs['class_logits']
start_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
start_positions, start_logits)
end_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
end_positions, end_logits)
is_impossible_loss = tf.keras.losses.binary_crossentropy(
is_impossible, class_logits, from_logits=True)
loss = (tf.reduce_mean(start_loss) + tf.reduce_mean(end_loss)) / 2
loss += tf.reduce_mean(is_impossible_loss) / 2
return loss
def process_metrics(self, metrics, labels, model_outputs):
metrics = dict([(metric.name, metric) for metric in metrics])
start_logits = model_outputs['start_logits']
end_logits = model_outputs['end_logits']
metrics['start_position_accuracy'].update_state(labels['start_positions'],
start_logits)
metrics['end_position_accuracy'].update_state(labels['end_positions'],
end_logits)
def process_compiled_metrics(self, compiled_metrics, labels, model_outputs):
start_logits = model_outputs['start_logits']
end_logits = model_outputs['end_logits']
compiled_metrics.update_state(
y_true=labels, # labels has keys 'start_positions' and 'end_positions'.
y_pred={
'start_positions': start_logits,
'end_positions': end_logits,
})
def _dummy_data(self, params, _):
"""Returns dummy data."""
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
zero = tf.constant(0, dtype=tf.int32)
x = dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids,
class_index=zero,
is_impossible=zero,
paragraph_mask=dummy_ids,
start_positions=tf.zeros((1), dtype=tf.int32))
y = dict(
start_positions=tf.zeros((1), dtype=tf.int32),
end_positions=tf.ones((1), dtype=tf.int32),
is_impossible=zero)
return x, y
def validation_step(self, inputs, model: tf.keras.Model, metrics=None):
features, _ = inputs
unique_ids = features.pop('unique_ids')
model_outputs = self.inference_step(features, model)
start_top_predictions = model_outputs['start_top_predictions']
end_top_predictions = model_outputs['end_top_predictions']
start_indexes = model_outputs['start_top_index']
end_indexes = model_outputs['end_top_index']
class_logits = model_outputs['class_logits']
logs = {
'unique_ids': unique_ids,
'start_top_predictions': start_top_predictions,
'end_top_predictions': end_top_predictions,
'start_indexes': start_indexes,
'end_indexes': end_indexes,
'class_logits': class_logits,
}
return logs
def aggregate_logs(self, state=None, step_outputs=None):
assert step_outputs is not None, 'Got no logs from self.validation_step.'
if state is None:
state = []
for outputs in zip(step_outputs['unique_ids'],
step_outputs['start_top_predictions'],
step_outputs['end_top_predictions'],
step_outputs['start_indexes'],
step_outputs['end_indexes'],
step_outputs['class_logits']):
numpy_values = [
output.numpy() for output in outputs]
for (unique_id, start_top_predictions, end_top_predictions, start_indexes,
end_indexes, class_logits) in zip(*numpy_values):
state.append(RawAggregatedResult(
unique_id=unique_id,
start_logits=start_top_predictions.tolist(),
end_logits=end_top_predictions.tolist(),
start_indexes=start_indexes.tolist(),
end_indexes=end_indexes.tolist(),
class_logits=class_logits))
return state
def predict(task: QuestionAnsweringTask, params: cfg.DataConfig,
model: tf.keras.Model):
"""Predicts on the input data.
Args:
task: A `QuestionAnsweringTask` object.
params: A `cfg.DataConfig` object.
model: A keras.Model.
Returns:
A tuple of `all_predictions`, `all_nbest` and `scores_diff`, which
are dict and can be written to json files including prediction json file,
nbest json file and null_odds json file.
"""
tf_record_input_path, eval_examples, eval_features = (
task._preprocess_eval_data(params)) # pylint: disable=protected-access
# `tf_record_input_path` will overwrite `params.input_path`,
# when `task.buid_inputs()` is called.
task.set_preprocessed_eval_input_path(tf_record_input_path)
def predict_step(inputs):
"""Replicated prediction calculation."""
return task.validation_step(inputs, model)
dataset = orbit.utils.make_distributed_dataset(tf.distribute.get_strategy(),
task.build_inputs, params)
aggregated_outputs = utils.predict(predict_step, task.aggregate_logs, dataset)
all_predictions, all_nbest, scores_diff = (
task.squad_lib.postprocess_output(
eval_examples,
eval_features,
aggregated_outputs,
task.task_config.n_best_size,
task.task_config.max_answer_length,
task.task_config.validation_data.do_lower_case,
version_2_with_negative=(params.version_2_with_negative),
null_score_diff_threshold=task.task_config.null_score_diff_threshold,
xlnet_format=task.task_config.validation_data.xlnet_format,
verbose=False))
return all_predictions, all_nbest, scores_diff
| 19,969 | 38.701789 | 80 | py |
models | models-master/official/nlp/tasks/dual_encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dual encoder (retrieval) task."""
from typing import Mapping, Tuple
# Import libraries
from absl import logging
import dataclasses
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.nlp.data import data_loader_factory
from official.nlp.modeling import models
from official.nlp.tasks import utils
@dataclasses.dataclass
class ModelConfig(base_config.Config):
"""A dual encoder (retrieval) configuration."""
# Normalize input embeddings if set to True.
normalize: bool = True
# Maximum input sequence length.
max_sequence_length: int = 64
# Parameters for training a dual encoder model with additive margin, see
# https://www.ijcai.org/Proceedings/2019/0746.pdf for more details.
logit_scale: float = 1
logit_margin: float = 0
bidirectional: bool = False
# Defining k for calculating metrics recall@k.
eval_top_k: Tuple[int, ...] = (1, 3, 10)
encoder: encoders.EncoderConfig = dataclasses.field(
default_factory=encoders.EncoderConfig
)
@dataclasses.dataclass
class DualEncoderConfig(cfg.TaskConfig):
"""The model config."""
# At most one of `init_checkpoint` and `hub_module_url` can
# be specified.
init_checkpoint: str = ''
hub_module_url: str = ''
# Defines the concrete model config at instantiation time.
model: ModelConfig = dataclasses.field(default_factory=ModelConfig)
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(
default_factory=cfg.DataConfig
)
@task_factory.register_task_cls(DualEncoderConfig)
class DualEncoderTask(base_task.Task):
"""Task object for dual encoder."""
def build_model(self):
"""Interface to build model. Refer to base_task.Task.build_model."""
if self.task_config.hub_module_url and self.task_config.init_checkpoint:
raise ValueError('At most one of `hub_module_url` and '
'`init_checkpoint` can be specified.')
if self.task_config.hub_module_url:
encoder_network = utils.get_encoder_from_hub(
self.task_config.hub_module_url)
else:
encoder_network = encoders.build_encoder(self.task_config.model.encoder)
# Currently, we only supports bert-style dual encoder.
return models.DualEncoder(
network=encoder_network,
max_seq_length=self.task_config.model.max_sequence_length,
normalize=self.task_config.model.normalize,
logit_scale=self.task_config.model.logit_scale,
logit_margin=self.task_config.model.logit_margin,
output='logits')
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
"""Interface to compute losses. Refer to base_task.Task.build_losses."""
del labels
left_logits = model_outputs['left_logits']
right_logits = model_outputs['right_logits']
batch_size = tf_utils.get_shape_list(left_logits, name='batch_size')[0]
ranking_labels = tf.range(batch_size)
loss = tf_utils.safe_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=ranking_labels,
logits=left_logits))
if self.task_config.model.bidirectional:
right_rank_loss = tf_utils.safe_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=ranking_labels,
logits=right_logits))
loss += right_rank_loss
return tf.reduce_mean(loss)
def build_inputs(self, params, input_context=None) -> tf.data.Dataset:
"""Returns tf.data.Dataset for sentence_prediction task."""
if params.input_path != 'dummy':
return data_loader_factory.get_data_loader(params).load(input_context)
def dummy_data(_):
dummy_ids = tf.zeros((10, params.seq_length), dtype=tf.int32)
x = dict(
left_word_ids=dummy_ids,
left_mask=dummy_ids,
left_type_ids=dummy_ids,
right_word_ids=dummy_ids,
right_mask=dummy_ids,
right_type_ids=dummy_ids)
return x
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
def build_metrics(self, training=None):
del training
metrics = [tf.keras.metrics.Mean(name='batch_size_per_core')]
for k in self.task_config.model.eval_top_k:
metrics.append(tf.keras.metrics.SparseTopKCategoricalAccuracy(
k=k, name=f'left_recall_at_{k}'))
if self.task_config.model.bidirectional:
metrics.append(tf.keras.metrics.SparseTopKCategoricalAccuracy(
k=k, name=f'right_recall_at_{k}'))
return metrics
def process_metrics(self, metrics, labels, model_outputs):
del labels
metrics = dict([(metric.name, metric) for metric in metrics])
left_logits = model_outputs['left_logits']
right_logits = model_outputs['right_logits']
batch_size = tf_utils.get_shape_list(
left_logits, name='sequence_output_tensor')[0]
ranking_labels = tf.range(batch_size)
for k in self.task_config.model.eval_top_k:
metrics[f'left_recall_at_{k}'].update_state(ranking_labels, left_logits)
if self.task_config.model.bidirectional:
metrics[f'right_recall_at_{k}'].update_state(ranking_labels,
right_logits)
metrics['batch_size_per_core'].update_state(batch_size)
def validation_step(self,
inputs,
model: tf.keras.Model,
metrics=None) -> Mapping[str, tf.Tensor]:
outputs = model(inputs)
loss = self.build_losses(
labels=None, model_outputs=outputs, aux_losses=model.losses)
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, None, outputs)
logs.update({m.name: m.result() for m in metrics})
elif model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, None, outputs)
logs.update({m.name: m.result() for m in model.metrics})
return logs
def initialize(self, model):
"""Load a pretrained checkpoint (if exists) and then train from iter 0."""
ckpt_dir_or_file = self.task_config.init_checkpoint
logging.info('Trying to load pretrained checkpoint from %s',
ckpt_dir_or_file)
if ckpt_dir_or_file and tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
if not ckpt_dir_or_file:
logging.info('No checkpoint file found from %s. Will not load.',
ckpt_dir_or_file)
return
pretrain2finetune_mapping = {
'encoder': model.checkpoint_items['encoder'],
}
ckpt = tf.train.Checkpoint(**pretrain2finetune_mapping)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
| 7,777 | 35.862559 | 80 | py |
models | models-master/official/nlp/tasks/masked_lm_determinism_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that masked LM models are deterministic when determinism is enabled."""
import tensorflow as tf
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import pretrain_dataloader
from official.nlp.tasks import masked_lm
class MLMTaskTest(tf.test.TestCase):
def _build_dataset(self, params, vocab_size):
def dummy_data(_):
dummy_ids = tf.random.uniform((1, params.seq_length), maxval=vocab_size,
dtype=tf.int32)
dummy_mask = tf.ones((1, params.seq_length), dtype=tf.int32)
dummy_type_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
dummy_lm = tf.zeros((1, params.max_predictions_per_seq), dtype=tf.int32)
return dict(
input_word_ids=dummy_ids,
input_mask=dummy_mask,
input_type_ids=dummy_type_ids,
masked_lm_positions=dummy_lm,
masked_lm_ids=dummy_lm,
masked_lm_weights=tf.cast(dummy_lm, dtype=tf.float32),
next_sentence_labels=tf.zeros((1, 1), dtype=tf.int32))
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
def _build_and_run_model(self, config, num_steps=5):
task = masked_lm.MaskedLMTask(config)
model = task.build_model()
metrics = task.build_metrics()
dataset = self._build_dataset(config.train_data,
config.model.encoder.get().vocab_size)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
# Run training
for _ in range(num_steps):
logs = task.train_step(next(iterator), model, optimizer, metrics=metrics)
for metric in metrics:
logs[metric.name] = metric.result()
# Run validation
validation_logs = task.validation_step(next(iterator), model,
metrics=metrics)
for metric in metrics:
validation_logs[metric.name] = metric.result()
return logs, validation_logs, model.weights
def test_task_determinism(self):
config = masked_lm.MaskedLMConfig(
init_checkpoint=self.get_temp_dir(),
scale_loss=True,
model=bert.PretrainerConfig(
encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522,
num_layers=1)),
cls_heads=[
bert.ClsHeadConfig(
inner_dim=10, num_classes=2, name="next_sentence")
]),
train_data=pretrain_dataloader.BertPretrainDataConfig(
max_predictions_per_seq=20,
seq_length=128,
global_batch_size=1))
tf.keras.utils.set_random_seed(1)
logs1, validation_logs1, weights1 = self._build_and_run_model(config)
tf.keras.utils.set_random_seed(1)
logs2, validation_logs2, weights2 = self._build_and_run_model(config)
self.assertEqual(logs1["loss"], logs2["loss"])
self.assertEqual(validation_logs1["loss"], validation_logs2["loss"])
for weight1, weight2 in zip(weights1, weights2):
self.assertAllEqual(weight1, weight2)
if __name__ == "__main__":
tf.config.experimental.enable_op_determinism()
tf.test.main()
| 3,922 | 36.721154 | 80 | py |
models | models-master/official/nlp/tasks/sentence_prediction_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.tasks.sentence_prediction."""
import functools
import os
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import sentence_prediction_dataloader
from official.nlp.tasks import masked_lm
from official.nlp.tasks import sentence_prediction
def _create_fake_dataset(output_path, seq_length, num_classes, num_examples):
"""Creates a fake dataset."""
writer = tf.io.TFRecordWriter(output_path)
def create_int_feature(values):
return tf.train.Feature(
int64_list=tf.train.Int64List(value=np.ravel(values)))
def create_float_feature(values):
return tf.train.Feature(
float_list=tf.train.FloatList(value=np.ravel(values)))
for i in range(num_examples):
features = {}
input_ids = np.random.randint(100, size=(seq_length))
features["input_ids"] = create_int_feature(input_ids)
features["input_mask"] = create_int_feature(np.ones_like(input_ids))
features["segment_ids"] = create_int_feature(np.ones_like(input_ids))
features["segment_ids"] = create_int_feature(np.ones_like(input_ids))
features["example_id"] = create_int_feature([i])
if num_classes == 1:
features["label_ids"] = create_float_feature([np.random.random()])
else:
features["label_ids"] = create_int_feature(
[np.random.random_integers(0, num_classes - 1, size=())])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
class SentencePredictionTaskTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(SentencePredictionTaskTest, self).setUp()
self._train_data_config = (
sentence_prediction_dataloader.SentencePredictionDataConfig(
input_path="dummy", seq_length=128, global_batch_size=1))
def get_model_config(self, num_classes):
return sentence_prediction.ModelConfig(
encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1)),
num_classes=num_classes)
def _run_task(self, config):
task = sentence_prediction.SentencePredictionTask(config)
model = task.build_model()
metrics = task.build_metrics()
strategy = tf.distribute.get_strategy()
dataset = strategy.distribute_datasets_from_function(
functools.partial(task.build_inputs, config.train_data))
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
model.save(os.path.join(self.get_temp_dir(), "saved_model"))
return task.validation_step(next(iterator), model, metrics=metrics)
@parameterized.named_parameters(
("init_cls_pooler", True),
("init_encoder", False),
)
def test_task(self, init_cls_pooler):
# Saves a checkpoint.
pretrain_cfg = bert.PretrainerConfig(
encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1)),
cls_heads=[
bert.ClsHeadConfig(
inner_dim=768, num_classes=2, name="next_sentence")
])
pretrain_model = masked_lm.MaskedLMTask(None).build_model(pretrain_cfg)
# The model variables will be created after the forward call.
_ = pretrain_model(pretrain_model.inputs)
ckpt = tf.train.Checkpoint(
model=pretrain_model, **pretrain_model.checkpoint_items)
init_path = ckpt.save(self.get_temp_dir())
# Creates the task.
config = sentence_prediction.SentencePredictionConfig(
init_checkpoint=init_path,
model=self.get_model_config(num_classes=2),
train_data=self._train_data_config,
init_cls_pooler=init_cls_pooler)
task = sentence_prediction.SentencePredictionTask(config)
model = task.build_model()
metrics = task.build_metrics()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
task.initialize(model)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
task.validation_step(next(iterator), model, metrics=metrics)
@parameterized.named_parameters(
{
"testcase_name": "regression",
"num_classes": 1,
},
{
"testcase_name": "classification",
"num_classes": 2,
},
)
def test_metrics_and_losses(self, num_classes):
config = sentence_prediction.SentencePredictionConfig(
init_checkpoint=self.get_temp_dir(),
model=self.get_model_config(num_classes),
train_data=self._train_data_config)
task = sentence_prediction.SentencePredictionTask(config)
model = task.build_model()
metrics = task.build_metrics()
if num_classes == 1:
self.assertIsInstance(metrics[0], tf.keras.metrics.MeanSquaredError)
else:
self.assertIsInstance(metrics[0],
tf.keras.metrics.SparseCategoricalAccuracy)
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
logs = task.validation_step(next(iterator), model, metrics=metrics)
loss = logs["loss"].numpy()
if num_classes == 1:
self.assertGreater(loss, 1.0)
else:
self.assertLess(loss, 1.0)
@parameterized.parameters(("matthews_corrcoef", 2),
("pearson_spearman_corr", 1),
("f1", 2))
def test_np_metrics(self, metric_type, num_classes):
config = sentence_prediction.SentencePredictionConfig(
metric_type=metric_type,
init_checkpoint=self.get_temp_dir(),
model=self.get_model_config(num_classes),
train_data=self._train_data_config)
task = sentence_prediction.SentencePredictionTask(config)
model = task.build_model()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
strategy = tf.distribute.get_strategy()
distributed_outputs = strategy.run(
functools.partial(task.validation_step, model=model),
args=(next(iterator),))
outputs = tf.nest.map_structure(strategy.experimental_local_results,
distributed_outputs)
aggregated = task.aggregate_logs(step_outputs=outputs)
aggregated = task.aggregate_logs(state=aggregated, step_outputs=outputs)
self.assertIn(metric_type, task.reduce_aggregated_logs(aggregated))
def test_np_metrics_cola_partial_batch(self):
train_data_path = os.path.join(self.get_temp_dir(), "train.tf_record")
num_examples = 5
global_batch_size = 8
seq_length = 16
_create_fake_dataset(
train_data_path,
seq_length=seq_length,
num_classes=2,
num_examples=num_examples)
train_data_config = (
sentence_prediction_dataloader.SentencePredictionDataConfig(
input_path=train_data_path,
seq_length=seq_length,
is_training=True,
label_type="int",
global_batch_size=global_batch_size,
drop_remainder=False,
include_example_id=True))
config = sentence_prediction.SentencePredictionConfig(
metric_type="matthews_corrcoef",
model=self.get_model_config(2),
train_data=train_data_config)
outputs = self._run_task(config)
self.assertEqual(outputs["sentence_prediction"].shape.as_list(), [8, 1])
def _export_bert_tfhub(self):
encoder = encoders.build_encoder(
encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1)))
encoder_inputs_dict = {x.name: x for x in encoder.inputs}
encoder_output_dict = encoder(encoder_inputs_dict)
core_model = tf.keras.Model(
inputs=encoder_inputs_dict, outputs=encoder_output_dict)
hub_destination = os.path.join(self.get_temp_dir(), "hub")
core_model.save(hub_destination, include_optimizer=False, save_format="tf")
return hub_destination
def test_task_with_hub(self):
hub_module_url = self._export_bert_tfhub()
config = sentence_prediction.SentencePredictionConfig(
hub_module_url=hub_module_url,
model=self.get_model_config(2),
train_data=self._train_data_config)
self._run_task(config)
@parameterized.named_parameters(("classification", 5), ("regression", 1))
def test_prediction(self, num_classes):
task_config = sentence_prediction.SentencePredictionConfig(
model=self.get_model_config(num_classes=num_classes),
train_data=self._train_data_config)
task = sentence_prediction.SentencePredictionTask(task_config)
model = task.build_model()
test_data_path = os.path.join(self.get_temp_dir(), "test.tf_record")
seq_length = 16
num_examples = 100
_create_fake_dataset(
test_data_path,
seq_length=seq_length,
num_classes=num_classes,
num_examples=num_examples)
test_data_config = (
sentence_prediction_dataloader.SentencePredictionDataConfig(
input_path=test_data_path,
seq_length=seq_length,
is_training=False,
label_type="int" if num_classes > 1 else "float",
global_batch_size=16,
drop_remainder=False,
include_example_id=True))
predictions = sentence_prediction.predict(task, test_data_config, model)
self.assertLen(predictions, num_examples)
for prediction in predictions:
self.assertEqual(prediction.dtype,
tf.int64 if num_classes > 1 else tf.float32)
if __name__ == "__main__":
tf.test.main()
| 10,430 | 37.349265 | 79 | py |
models | models-master/official/nlp/tasks/tagging_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.tasks.tagging."""
import functools
import os
import numpy as np
import tensorflow as tf
from official.nlp.configs import encoders
from official.nlp.data import tagging_dataloader
from official.nlp.tasks import tagging
def _create_fake_dataset(output_path, seq_length, num_labels, num_examples):
"""Creates a fake dataset."""
writer = tf.io.TFRecordWriter(output_path)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
for i in range(num_examples):
features = {}
input_ids = np.random.randint(100, size=(seq_length))
features["input_ids"] = create_int_feature(input_ids)
features["input_mask"] = create_int_feature(np.ones_like(input_ids))
features["segment_ids"] = create_int_feature(np.ones_like(input_ids))
features["label_ids"] = create_int_feature(
np.random.random_integers(-1, num_labels - 1, size=(seq_length)))
features["sentence_id"] = create_int_feature([i])
features["sub_sentence_id"] = create_int_feature([0])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
class TaggingTest(tf.test.TestCase):
def setUp(self):
super(TaggingTest, self).setUp()
self._encoder_config = encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1))
self._train_data_config = tagging_dataloader.TaggingDataConfig(
input_path="dummy", seq_length=128, global_batch_size=1)
def _run_task(self, config):
task = tagging.TaggingTask(config)
model = task.build_model()
metrics = task.build_metrics()
strategy = tf.distribute.get_strategy()
dataset = strategy.distribute_datasets_from_function(
functools.partial(task.build_inputs, config.train_data))
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
task.validation_step(next(iterator), model, metrics=metrics)
model.save(os.path.join(self.get_temp_dir(), "saved_model"))
def test_task(self):
# Saves a checkpoint.
encoder = encoders.build_encoder(self._encoder_config)
ckpt = tf.train.Checkpoint(encoder=encoder)
saved_path = ckpt.save(self.get_temp_dir())
config = tagging.TaggingConfig(
init_checkpoint=saved_path,
model=tagging.ModelConfig(encoder=self._encoder_config),
train_data=self._train_data_config,
class_names=["O", "B-PER", "I-PER"])
task = tagging.TaggingTask(config)
model = task.build_model()
metrics = task.build_metrics()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
task.validation_step(next(iterator), model, metrics=metrics)
task.initialize(model)
def _export_bert_tfhub(self):
encoder = encoders.build_encoder(
encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1)))
encoder_inputs_dict = {x.name: x for x in encoder.inputs}
encoder_output_dict = encoder(encoder_inputs_dict)
core_model = tf.keras.Model(
inputs=encoder_inputs_dict, outputs=encoder_output_dict)
hub_destination = os.path.join(self.get_temp_dir(), "hub")
core_model.save(hub_destination, include_optimizer=False, save_format="tf")
return hub_destination
def test_task_with_hub(self):
hub_module_url = self._export_bert_tfhub()
config = tagging.TaggingConfig(
hub_module_url=hub_module_url,
class_names=["O", "B-PER", "I-PER"],
train_data=self._train_data_config)
self._run_task(config)
def test_seqeval_metrics(self):
config = tagging.TaggingConfig(
model=tagging.ModelConfig(encoder=self._encoder_config),
train_data=self._train_data_config,
class_names=["O", "B-PER", "I-PER"])
task = tagging.TaggingTask(config)
model = task.build_model()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
strategy = tf.distribute.get_strategy()
distributed_outputs = strategy.run(
functools.partial(task.validation_step, model=model),
args=(next(iterator),))
outputs = tf.nest.map_structure(strategy.experimental_local_results,
distributed_outputs)
aggregated = task.aggregate_logs(step_outputs=outputs)
aggregated = task.aggregate_logs(state=aggregated, step_outputs=outputs)
self.assertCountEqual({"f1", "precision", "recall", "accuracy"},
task.reduce_aggregated_logs(aggregated).keys())
def test_predict(self):
task_config = tagging.TaggingConfig(
model=tagging.ModelConfig(encoder=self._encoder_config),
train_data=self._train_data_config,
class_names=["O", "B-PER", "I-PER"])
task = tagging.TaggingTask(task_config)
model = task.build_model()
test_data_path = os.path.join(self.get_temp_dir(), "test.tf_record")
seq_length = 16
num_examples = 100
_create_fake_dataset(
test_data_path,
seq_length=seq_length,
num_labels=len(task_config.class_names),
num_examples=num_examples)
test_data_config = tagging_dataloader.TaggingDataConfig(
input_path=test_data_path,
seq_length=seq_length,
is_training=False,
global_batch_size=16,
drop_remainder=False,
include_sentence_id=True)
results = tagging.predict(task, test_data_config, model)
self.assertLen(results, num_examples)
self.assertLen(results[0], 3)
if __name__ == "__main__":
tf.test.main()
| 6,407 | 36.91716 | 79 | py |
models | models-master/official/nlp/tasks/dual_encoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.tasks.sentence_prediction."""
import functools
import os
from absl.testing import parameterized
import tensorflow as tf
from official.legacy.bert import configs
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import dual_encoder_dataloader
from official.nlp.tasks import dual_encoder
from official.nlp.tasks import masked_lm
from official.nlp.tools import export_tfhub_lib
class DualEncoderTaskTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(DualEncoderTaskTest, self).setUp()
self._train_data_config = (
dual_encoder_dataloader.DualEncoderDataConfig(
input_path="dummy", seq_length=32))
def get_model_config(self):
return dual_encoder.ModelConfig(
max_sequence_length=32,
encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1)))
def _run_task(self, config):
task = dual_encoder.DualEncoderTask(config)
model = task.build_model()
metrics = task.build_metrics()
strategy = tf.distribute.get_strategy()
dataset = strategy.distribute_datasets_from_function(
functools.partial(task.build_inputs, config.train_data))
dataset.batch(10)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
task.validation_step(next(iterator), model, metrics=metrics)
model.save(os.path.join(self.get_temp_dir(), "saved_model"))
def test_task(self):
config = dual_encoder.DualEncoderConfig(
init_checkpoint=self.get_temp_dir(),
model=self.get_model_config(),
train_data=self._train_data_config)
task = dual_encoder.DualEncoderTask(config)
model = task.build_model()
metrics = task.build_metrics()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
task.validation_step(next(iterator), model, metrics=metrics)
# Saves a checkpoint.
pretrain_cfg = bert.PretrainerConfig(
encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1)))
pretrain_model = masked_lm.MaskedLMTask(None).build_model(pretrain_cfg)
ckpt = tf.train.Checkpoint(
model=pretrain_model, **pretrain_model.checkpoint_items)
ckpt.save(config.init_checkpoint)
task.initialize(model)
def _export_bert_tfhub(self):
bert_config = configs.BertConfig(
vocab_size=30522,
hidden_size=16,
intermediate_size=32,
max_position_embeddings=128,
num_attention_heads=2,
num_hidden_layers=4)
encoder = export_tfhub_lib.get_bert_encoder(bert_config)
model_checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoint")
checkpoint = tf.train.Checkpoint(encoder=encoder)
checkpoint.save(os.path.join(model_checkpoint_dir, "test"))
model_checkpoint_path = tf.train.latest_checkpoint(model_checkpoint_dir)
vocab_file = os.path.join(self.get_temp_dir(), "uncased_vocab.txt")
with tf.io.gfile.GFile(vocab_file, "w") as f:
f.write("dummy content")
export_path = os.path.join(self.get_temp_dir(), "hub")
export_tfhub_lib.export_model(
export_path,
bert_config=bert_config,
encoder_config=None,
model_checkpoint_path=model_checkpoint_path,
vocab_file=vocab_file,
do_lower_case=True,
with_mlm=False)
return export_path
def test_task_with_hub(self):
hub_module_url = self._export_bert_tfhub()
config = dual_encoder.DualEncoderConfig(
hub_module_url=hub_module_url,
model=self.get_model_config(),
train_data=self._train_data_config)
self._run_task(config)
if __name__ == "__main__":
tf.test.main()
| 4,561 | 34.92126 | 77 | py |
models | models-master/official/nlp/tasks/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Models NLP Tasks."""
# pylint: disable=g-multiple-import
from official.nlp.tasks.electra_task import ElectraPretrainConfig, ElectraPretrainTask
from official.nlp.tasks.masked_lm import MaskedLMConfig, MaskedLMTask
from official.nlp.tasks.question_answering import QuestionAnsweringConfig, QuestionAnsweringTask
from official.nlp.tasks.sentence_prediction import SentencePredictionConfig, SentencePredictionTask
from official.nlp.tasks.tagging import TaggingConfig, TaggingTask
from official.nlp.tasks.translation import TranslationConfig, TranslationTask
| 1,178 | 50.26087 | 99 | py |
models | models-master/official/nlp/tasks/sentence_prediction.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sentence prediction (classification) task."""
import dataclasses
from typing import List, Union, Optional
from absl import logging
import numpy as np
import orbit
from scipy import stats
from sklearn import metrics as sklearn_metrics
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.nlp.data import data_loader_factory
from official.nlp.modeling import models
from official.nlp.tasks import utils
METRIC_TYPES = frozenset(
['accuracy', 'f1', 'matthews_corrcoef', 'pearson_spearman_corr'])
@dataclasses.dataclass
class ModelConfig(base_config.Config):
"""A classifier/regressor configuration."""
num_classes: int = 0
use_encoder_pooler: bool = False
encoder: encoders.EncoderConfig = dataclasses.field(default_factory=encoders.EncoderConfig)
@dataclasses.dataclass
class SentencePredictionConfig(cfg.TaskConfig):
"""The model config."""
# At most one of `init_checkpoint` and `hub_module_url` can
# be specified.
init_checkpoint: str = ''
init_cls_pooler: bool = False
hub_module_url: str = ''
metric_type: str = 'accuracy'
# Defines the concrete model config at instantiation time.
model: ModelConfig = dataclasses.field(default_factory=ModelConfig)
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
@task_factory.register_task_cls(SentencePredictionConfig)
class SentencePredictionTask(base_task.Task):
"""Task object for sentence_prediction."""
def __init__(self, params: cfg.TaskConfig, logging_dir=None, name=None):
super().__init__(params, logging_dir, name=name)
if params.metric_type not in METRIC_TYPES:
raise ValueError('Invalid metric_type: {}'.format(params.metric_type))
self.metric_type = params.metric_type
if hasattr(params.train_data, 'label_field'):
self.label_field = params.train_data.label_field
else:
self.label_field = 'label_ids'
def build_model(self):
if self.task_config.hub_module_url and self.task_config.init_checkpoint:
raise ValueError('At most one of `hub_module_url` and '
'`init_checkpoint` can be specified.')
if self.task_config.hub_module_url:
encoder_network = utils.get_encoder_from_hub(
self.task_config.hub_module_url)
else:
encoder_network = encoders.build_encoder(self.task_config.model.encoder)
encoder_cfg = self.task_config.model.encoder.get()
if self.task_config.model.encoder.type == 'xlnet':
return models.XLNetClassifier(
network=encoder_network,
num_classes=self.task_config.model.num_classes,
initializer=tf.keras.initializers.RandomNormal(
stddev=encoder_cfg.initializer_range))
else:
return models.BertClassifier(
network=encoder_network,
num_classes=self.task_config.model.num_classes,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
use_encoder_pooler=self.task_config.model.use_encoder_pooler)
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
label_ids = labels[self.label_field]
if self.task_config.model.num_classes == 1:
loss = tf.keras.losses.mean_squared_error(label_ids, model_outputs)
else:
loss = tf.keras.losses.sparse_categorical_crossentropy(
label_ids, tf.cast(model_outputs, tf.float32), from_logits=True)
if aux_losses:
loss += tf.add_n(aux_losses)
return tf_utils.safe_mean(loss)
def build_inputs(self, params, input_context=None):
"""Returns tf.data.Dataset for sentence_prediction task."""
if params.input_path == 'dummy':
def dummy_data(_):
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
x = dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids)
if self.task_config.model.num_classes == 1:
y = tf.zeros((1,), dtype=tf.float32)
else:
y = tf.zeros((1, 1), dtype=tf.int32)
x[self.label_field] = y
return x
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
return data_loader_factory.get_data_loader(params).load(input_context)
def build_metrics(self, training=None):
del training
if self.task_config.model.num_classes == 1:
metrics = [tf.keras.metrics.MeanSquaredError()]
elif self.task_config.model.num_classes == 2:
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='cls_accuracy'),
tf.keras.metrics.AUC(name='auc', curve='PR'),
]
else:
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='cls_accuracy'),
]
return metrics
def process_metrics(self, metrics, labels, model_outputs):
for metric in metrics:
if metric.name == 'auc':
# Convert the logit to probability and extract the probability of True..
metric.update_state(
labels[self.label_field],
tf.expand_dims(tf.nn.softmax(model_outputs)[:, 1], axis=1))
if metric.name == 'cls_accuracy':
metric.update_state(labels[self.label_field], model_outputs)
def process_compiled_metrics(self, compiled_metrics, labels, model_outputs):
compiled_metrics.update_state(labels[self.label_field], model_outputs)
def validation_step(self, inputs, model: tf.keras.Model, metrics=None):
features, labels = inputs, inputs
outputs = self.inference_step(features, model)
loss = self.build_losses(
labels=labels, model_outputs=outputs, aux_losses=model.losses)
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, labels, outputs)
if model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in metrics or []})
logs.update({m.name: m.result() for m in model.metrics})
if self.metric_type == 'matthews_corrcoef':
logs.update({
'sentence_prediction': # Ensure one prediction along batch dimension.
tf.expand_dims(tf.math.argmax(outputs, axis=1), axis=1),
'labels':
labels[self.label_field],
})
else:
logs.update({
'sentence_prediction': outputs,
'labels': labels[self.label_field],
})
return logs
def aggregate_logs(self, state=None, step_outputs=None):
if self.metric_type == 'accuracy':
return None
if state is None:
state = {'sentence_prediction': [], 'labels': []}
state['sentence_prediction'].append(
np.concatenate([v.numpy() for v in step_outputs['sentence_prediction']],
axis=0))
state['labels'].append(
np.concatenate([v.numpy() for v in step_outputs['labels']], axis=0))
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
if self.metric_type == 'accuracy':
return None
preds = np.concatenate(aggregated_logs['sentence_prediction'], axis=0)
labels = np.concatenate(aggregated_logs['labels'], axis=0)
if self.metric_type == 'f1':
preds = np.argmax(preds, axis=1)
return {self.metric_type: sklearn_metrics.f1_score(labels, preds)}
elif self.metric_type == 'matthews_corrcoef':
preds = np.reshape(preds, -1)
labels = np.reshape(labels, -1)
return {
self.metric_type: sklearn_metrics.matthews_corrcoef(preds, labels)
}
elif self.metric_type == 'pearson_spearman_corr':
preds = np.reshape(preds, -1)
labels = np.reshape(labels, -1)
pearson_corr = stats.pearsonr(preds, labels)[0]
spearman_corr = stats.spearmanr(preds, labels)[0]
corr_metric = (pearson_corr + spearman_corr) / 2
return {self.metric_type: corr_metric}
def initialize(self, model):
"""Load a pretrained checkpoint (if exists) and then train from iter 0."""
ckpt_dir_or_file = self.task_config.init_checkpoint
logging.info('Trying to load pretrained checkpoint from %s',
ckpt_dir_or_file)
if ckpt_dir_or_file and tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
if not ckpt_dir_or_file:
logging.info('No checkpoint file found from %s. Will not load.',
ckpt_dir_or_file)
return
pretrain2finetune_mapping = {
'encoder': model.checkpoint_items['encoder'],
}
if self.task_config.init_cls_pooler:
# This option is valid when use_encoder_pooler is false.
pretrain2finetune_mapping[
'next_sentence.pooler_dense'] = model.checkpoint_items[
'sentence_prediction.pooler_dense']
ckpt = tf.train.Checkpoint(**pretrain2finetune_mapping)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def predict(task: SentencePredictionTask,
params: cfg.DataConfig,
model: tf.keras.Model,
params_aug: Optional[cfg.DataConfig] = None,
test_time_aug_wgt: float = 0.3) -> List[Union[int, float]]:
"""Predicts on the input data.
Args:
task: A `SentencePredictionTask` object.
params: A `cfg.DataConfig` object.
model: A keras.Model.
params_aug: A `cfg.DataConfig` object for augmented data.
test_time_aug_wgt: Test time augmentation weight. The prediction score will
use (1. - test_time_aug_wgt) original prediction plus test_time_aug_wgt
augmented prediction.
Returns:
A list of predictions with length of `num_examples`. For regression task,
each element in the list is the predicted score; for classification task,
each element is the predicted class id.
"""
def predict_step(inputs):
"""Replicated prediction calculation."""
x = inputs
example_id = x.pop('example_id')
outputs = task.inference_step(x, model)
return dict(example_id=example_id, predictions=outputs)
def aggregate_fn(state, outputs):
"""Concatenates model's outputs."""
if state is None:
state = []
for per_replica_example_id, per_replica_batch_predictions in zip(
outputs['example_id'], outputs['predictions']):
state.extend(zip(per_replica_example_id, per_replica_batch_predictions))
return state
dataset = orbit.utils.make_distributed_dataset(tf.distribute.get_strategy(),
task.build_inputs, params)
outputs = utils.predict(predict_step, aggregate_fn, dataset)
# When running on TPU POD, the order of output cannot be maintained,
# so we need to sort by example_id.
outputs = sorted(outputs, key=lambda x: x[0])
is_regression = task.task_config.model.num_classes == 1
if params_aug is not None:
dataset_aug = orbit.utils.make_distributed_dataset(
tf.distribute.get_strategy(), task.build_inputs, params_aug)
outputs_aug = utils.predict(predict_step, aggregate_fn, dataset_aug)
outputs_aug = sorted(outputs_aug, key=lambda x: x[0])
if is_regression:
return [(1. - test_time_aug_wgt) * x[1] + test_time_aug_wgt * y[1]
for x, y in zip(outputs, outputs_aug)]
else:
return [
tf.argmax(
(1. - test_time_aug_wgt) * x[1] + test_time_aug_wgt * y[1],
axis=-1) for x, y in zip(outputs, outputs_aug)
]
if is_regression:
return [x[1] for x in outputs]
else:
return [tf.argmax(x[1], axis=-1) for x in outputs]
| 12,638 | 38.496875 | 93 | py |
models | models-master/official/nlp/tasks/masked_lm_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.tasks.masked_lm."""
import tensorflow as tf
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import pretrain_dataloader
from official.nlp.tasks import masked_lm
class MLMTaskTest(tf.test.TestCase):
def test_task(self):
config = masked_lm.MaskedLMConfig(
init_checkpoint=self.get_temp_dir(),
scale_loss=True,
model=bert.PretrainerConfig(
encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522,
num_layers=1)),
cls_heads=[
bert.ClsHeadConfig(
inner_dim=10, num_classes=2, name="next_sentence")
]),
train_data=pretrain_dataloader.BertPretrainDataConfig(
input_path="dummy",
max_predictions_per_seq=20,
seq_length=128,
global_batch_size=1))
task = masked_lm.MaskedLMTask(config)
model = task.build_model()
metrics = task.build_metrics()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
task.validation_step(next(iterator), model, metrics=metrics)
# Saves a checkpoint.
ckpt = tf.train.Checkpoint(model=model, **model.checkpoint_items)
ckpt.save(config.init_checkpoint)
task.initialize(model)
if __name__ == "__main__":
tf.test.main()
| 2,160 | 33.854839 | 74 | py |
models | models-master/official/nlp/tasks/translation_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.tasks.translation."""
import functools
import os
import orbit
import tensorflow as tf
from sentencepiece import SentencePieceTrainer
from official.nlp.data import wmt_dataloader
from official.nlp.tasks import translation
def _generate_line_file(filepath, lines):
with tf.io.gfile.GFile(filepath, "w") as f:
for l in lines:
f.write("{}\n".format(l))
def _generate_record_file(filepath, src_lines, tgt_lines):
writer = tf.io.TFRecordWriter(filepath)
for src, tgt in zip(src_lines, tgt_lines):
example = tf.train.Example(
features=tf.train.Features(
feature={
"en": tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[src.encode()])),
"reverse_en": tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[tgt.encode()])),
}))
writer.write(example.SerializeToString())
writer.close()
def _train_sentencepiece(input_path, vocab_size, model_path, eos_id=1):
argstr = " ".join([
f"--input={input_path}", f"--vocab_size={vocab_size}",
"--character_coverage=0.995",
f"--model_prefix={model_path}", "--model_type=bpe",
"--bos_id=-1", "--pad_id=0", f"--eos_id={eos_id}", "--unk_id=2"
])
SentencePieceTrainer.Train(argstr)
class TranslationTaskTest(tf.test.TestCase):
def setUp(self):
super(TranslationTaskTest, self).setUp()
self._temp_dir = self.get_temp_dir()
src_lines = [
"abc ede fg",
"bbcd ef a g",
"de f a a g"
]
tgt_lines = [
"dd cc a ef g",
"bcd ef a g",
"gef cd ba"
]
self._record_input_path = os.path.join(self._temp_dir, "inputs.record")
_generate_record_file(self._record_input_path, src_lines, tgt_lines)
self._sentencepeice_input_path = os.path.join(self._temp_dir, "inputs.txt")
_generate_line_file(self._sentencepeice_input_path, src_lines + tgt_lines)
sentencepeice_model_prefix = os.path.join(self._temp_dir, "sp")
_train_sentencepiece(self._sentencepeice_input_path, 11,
sentencepeice_model_prefix)
self._sentencepeice_model_path = "{}.model".format(
sentencepeice_model_prefix)
def test_task(self):
config = translation.TranslationConfig(
model=translation.ModelConfig(
encoder=translation.EncDecoder(num_layers=1),
decoder=translation.EncDecoder(num_layers=1)),
train_data=wmt_dataloader.WMTDataConfig(
input_path=self._record_input_path,
src_lang="en", tgt_lang="reverse_en",
is_training=True, static_batch=True, global_batch_size=24,
max_seq_length=12),
sentencepiece_model_path=self._sentencepeice_model_path)
task = translation.TranslationTask(config)
model = task.build_model()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(iterator), model, optimizer)
def test_no_sentencepiece_path(self):
config = translation.TranslationConfig(
model=translation.ModelConfig(
encoder=translation.EncDecoder(num_layers=1),
decoder=translation.EncDecoder(num_layers=1)),
train_data=wmt_dataloader.WMTDataConfig(
input_path=self._record_input_path,
src_lang="en", tgt_lang="reverse_en",
is_training=True, static_batch=True, global_batch_size=4,
max_seq_length=4),
sentencepiece_model_path=None)
with self.assertRaisesRegex(
ValueError,
"Setencepiece model path not provided."):
translation.TranslationTask(config)
def test_sentencepiece_no_eos(self):
sentencepeice_model_prefix = os.path.join(self._temp_dir, "sp_no_eos")
_train_sentencepiece(self._sentencepeice_input_path, 20,
sentencepeice_model_prefix, eos_id=-1)
sentencepeice_model_path = "{}.model".format(
sentencepeice_model_prefix)
config = translation.TranslationConfig(
model=translation.ModelConfig(
encoder=translation.EncDecoder(num_layers=1),
decoder=translation.EncDecoder(num_layers=1)),
train_data=wmt_dataloader.WMTDataConfig(
input_path=self._record_input_path,
src_lang="en", tgt_lang="reverse_en",
is_training=True, static_batch=True, global_batch_size=4,
max_seq_length=4),
sentencepiece_model_path=sentencepeice_model_path)
with self.assertRaisesRegex(
ValueError,
"EOS token not in tokenizer vocab.*"):
translation.TranslationTask(config)
def test_evaluation(self):
config = translation.TranslationConfig(
model=translation.ModelConfig(
encoder=translation.EncDecoder(num_layers=1),
decoder=translation.EncDecoder(num_layers=1),
padded_decode=False,
decode_max_length=64),
validation_data=wmt_dataloader.WMTDataConfig(
input_path=self._record_input_path, src_lang="en",
tgt_lang="reverse_en", static_batch=True, global_batch_size=4),
sentencepiece_model_path=self._sentencepeice_model_path)
logging_dir = self.get_temp_dir()
task = translation.TranslationTask(config, logging_dir=logging_dir)
dataset = orbit.utils.make_distributed_dataset(tf.distribute.get_strategy(),
task.build_inputs,
config.validation_data)
model = task.build_model()
strategy = tf.distribute.get_strategy()
aggregated = None
for data in dataset:
distributed_outputs = strategy.run(
functools.partial(task.validation_step, model=model),
args=(data,))
outputs = tf.nest.map_structure(strategy.experimental_local_results,
distributed_outputs)
aggregated = task.aggregate_logs(state=aggregated, step_outputs=outputs)
metrics = task.reduce_aggregated_logs(aggregated)
self.assertIn("sacrebleu_score", metrics)
self.assertIn("bleu_score", metrics)
if __name__ == "__main__":
tf.test.main()
| 6,871 | 38.953488 | 80 | py |
models | models-master/official/nlp/tasks/question_answering_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.tasks.question_answering."""
import itertools
import json
import os
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import question_answering_dataloader
from official.nlp.tasks import masked_lm
from official.nlp.tasks import question_answering
class QuestionAnsweringTaskTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(QuestionAnsweringTaskTest, self).setUp()
self._encoder_config = encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1))
self._train_data_config = question_answering_dataloader.QADataConfig(
input_path="dummy", seq_length=128, global_batch_size=1)
val_data = {
"version":
"1.1",
"data": [{
"paragraphs": [{
"context":
"Sky is blue.",
"qas": [{
"question":
"What is blue?",
"id":
"1234",
"answers": [{
"text": "Sky",
"answer_start": 0
}, {
"text": "Sky",
"answer_start": 0
}, {
"text": "Sky",
"answer_start": 0
}]
}]
}]
}]
}
self._val_input_path = os.path.join(self.get_temp_dir(), "val_data.json")
with tf.io.gfile.GFile(self._val_input_path, "w") as writer:
writer.write(json.dumps(val_data, indent=4) + "\n")
self._test_vocab = os.path.join(self.get_temp_dir(), "vocab.txt")
with tf.io.gfile.GFile(self._test_vocab, "w") as writer:
writer.write("[PAD]\n[UNK]\n[CLS]\n[SEP]\n[MASK]\nsky\nis\nblue\n")
def _get_validation_data_config(self, version_2_with_negative=False):
return question_answering_dataloader.QADataConfig(
is_training=False,
input_path=self._val_input_path,
input_preprocessed_data_path=self.get_temp_dir(),
seq_length=128,
global_batch_size=1,
version_2_with_negative=version_2_with_negative,
vocab_file=self._test_vocab,
tokenization="WordPiece",
do_lower_case=True)
def _run_task(self, config):
task = question_answering.QuestionAnsweringTask(config)
model = task.build_model()
metrics = task.build_metrics()
task.initialize(model)
train_dataset = task.build_inputs(config.train_data)
train_iterator = iter(train_dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(train_iterator), model, optimizer, metrics=metrics)
val_dataset = task.build_inputs(config.validation_data)
val_iterator = iter(val_dataset)
logs = task.validation_step(next(val_iterator), model, metrics=metrics)
# Mock that `logs` is from one replica.
logs = {x: (logs[x],) for x in logs}
logs = task.aggregate_logs(step_outputs=logs)
metrics = task.reduce_aggregated_logs(logs)
self.assertIn("final_f1", metrics)
model.save(os.path.join(self.get_temp_dir(), "saved_model"))
@parameterized.parameters(
itertools.product(
(False, True),
("WordPiece", "SentencePiece"),
))
def test_task(self, version_2_with_negative, tokenization):
# Saves a checkpoint.
pretrain_cfg = bert.PretrainerConfig(
encoder=self._encoder_config,
cls_heads=[
bert.ClsHeadConfig(
inner_dim=10, num_classes=3, name="next_sentence")
])
pretrain_model = masked_lm.MaskedLMTask(None).build_model(pretrain_cfg)
ckpt = tf.train.Checkpoint(
model=pretrain_model, **pretrain_model.checkpoint_items)
saved_path = ckpt.save(self.get_temp_dir())
config = question_answering.QuestionAnsweringConfig(
init_checkpoint=saved_path,
model=question_answering.ModelConfig(encoder=self._encoder_config),
train_data=self._train_data_config,
validation_data=self._get_validation_data_config(
version_2_with_negative))
self._run_task(config)
def _export_bert_tfhub(self):
encoder = encoders.build_encoder(
encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1)))
encoder_inputs_dict = {x.name: x for x in encoder.inputs}
encoder_output_dict = encoder(encoder_inputs_dict)
core_model = tf.keras.Model(
inputs=encoder_inputs_dict, outputs=encoder_output_dict)
hub_destination = os.path.join(self.get_temp_dir(), "hub")
core_model.save(hub_destination, include_optimizer=False, save_format="tf")
return hub_destination
def test_task_with_hub(self):
hub_module_url = self._export_bert_tfhub()
config = question_answering.QuestionAnsweringConfig(
hub_module_url=hub_module_url,
model=question_answering.ModelConfig(encoder=self._encoder_config),
train_data=self._train_data_config,
validation_data=self._get_validation_data_config())
self._run_task(config)
@parameterized.named_parameters(("squad1", False), ("squad2", True))
def test_predict(self, version_2_with_negative):
validation_data = self._get_validation_data_config(
version_2_with_negative=version_2_with_negative)
config = question_answering.QuestionAnsweringConfig(
model=question_answering.ModelConfig(encoder=self._encoder_config),
train_data=self._train_data_config,
validation_data=validation_data)
task = question_answering.QuestionAnsweringTask(config)
model = task.build_model()
all_predictions, all_nbest, scores_diff = question_answering.predict(
task, validation_data, model)
self.assertLen(all_predictions, 1)
self.assertLen(all_nbest, 1)
if version_2_with_negative:
self.assertLen(scores_diff, 1)
else:
self.assertEmpty(scores_diff)
class XLNetQuestionAnsweringTaskTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(XLNetQuestionAnsweringTaskTest, self).setUp()
self._encoder_config = encoders.EncoderConfig(
type="xlnet",
xlnet=encoders.XLNetEncoderConfig(vocab_size=30522, num_layers=1))
self._train_data_config = question_answering_dataloader.QADataConfig(
input_path="dummy", seq_length=128,
global_batch_size=2, xlnet_format=True)
val_data = {
"version":
"2.0",
"data": [{
"paragraphs": [{
"context":
"Sky is blue.",
"qas": [{
"question":
"What is blue?",
"id":
"1234",
"answers": [{
"text": "Sky",
"answer_start": 0
}, {
"text": "Sky",
"answer_start": 0
}, {
"text": "Sky",
"answer_start": 0
}]
}]
}]
}]
}
self._val_input_path = os.path.join(self.get_temp_dir(), "val_data.json")
with tf.io.gfile.GFile(self._val_input_path, "w") as writer:
writer.write(json.dumps(val_data, indent=4) + "\n")
self._test_vocab = os.path.join(self.get_temp_dir(), "vocab.txt")
with tf.io.gfile.GFile(self._test_vocab, "w") as writer:
writer.write("[PAD]\n[UNK]\n[CLS]\n[SEP]\n[MASK]\nsky\nis\nblue\n")
def _get_validation_data_config(self):
return question_answering_dataloader.QADataConfig(
is_training=False,
input_path=self._val_input_path,
input_preprocessed_data_path=self.get_temp_dir(),
seq_length=128,
global_batch_size=2,
version_2_with_negative=True,
vocab_file=self._test_vocab,
tokenization="WordPiece",
do_lower_case=True,
xlnet_format=True)
def _run_task(self, config):
task = question_answering.XLNetQuestionAnsweringTask(config)
model = task.build_model()
metrics = task.build_metrics()
task.initialize(model)
train_dataset = task.build_inputs(config.train_data)
train_iterator = iter(train_dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(train_iterator), model, optimizer, metrics=metrics)
val_dataset = task.build_inputs(config.validation_data)
val_iterator = iter(val_dataset)
logs = task.validation_step(next(val_iterator), model, metrics=metrics)
# Mock that `logs` is from one replica.
logs = {x: (logs[x],) for x in logs}
logs = task.aggregate_logs(step_outputs=logs)
metrics = task.reduce_aggregated_logs(logs)
self.assertIn("final_f1", metrics)
self.assertNotIn("loss", metrics)
def test_task(self):
config = question_answering.XLNetQuestionAnsweringConfig(
init_checkpoint="",
n_best_size=5,
model=question_answering.ModelConfig(encoder=self._encoder_config),
train_data=self._train_data_config,
validation_data=self._get_validation_data_config())
self._run_task(config)
if __name__ == "__main__":
tf.test.main()
| 9,939 | 36.368421 | 79 | py |
models | models-master/official/projects/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/bigbird/experiment_configs.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bigbird experiment configurations."""
# pylint: disable=g-doc-return-or-yield,line-too-long
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.nlp.data import question_answering_dataloader
from official.nlp.data import sentence_prediction_dataloader
from official.nlp.tasks import question_answering
from official.nlp.tasks import sentence_prediction
@exp_factory.register_config_factory('bigbird/glue')
def bigbird_glue() -> cfg.ExperimentConfig:
r"""BigBird GLUE."""
config = cfg.ExperimentConfig(
task=sentence_prediction.SentencePredictionConfig(
train_data=sentence_prediction_dataloader
.SentencePredictionDataConfig(),
validation_data=sentence_prediction_dataloader
.SentencePredictionDataConfig(
is_training=False, drop_remainder=False)),
trainer=cfg.TrainerConfig(
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
'adamw': {
'weight_decay_rate':
0.01,
'exclude_from_weight_decay':
['LayerNorm', 'layer_norm', 'bias'],
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 3e-5,
'end_learning_rate': 0.0,
}
},
'warmup': {
'type': 'polynomial'
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
config.task.model.encoder.type = 'bigbird'
return config
@exp_factory.register_config_factory('bigbird/squad')
def bigbird_squad() -> cfg.ExperimentConfig:
r"""BigBird Squad V1/V2."""
config = cfg.ExperimentConfig(
task=question_answering.QuestionAnsweringConfig(
train_data=question_answering_dataloader.QADataConfig(),
validation_data=question_answering_dataloader.QADataConfig()),
trainer=cfg.TrainerConfig(
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
'adamw': {
'weight_decay_rate':
0.01,
'exclude_from_weight_decay':
['LayerNorm', 'layer_norm', 'bias'],
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 8e-5,
'end_learning_rate': 0.0,
}
},
'warmup': {
'type': 'polynomial'
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
config.task.model.encoder.type = 'bigbird'
return config
| 3,774 | 36.376238 | 74 | py |
models | models-master/official/projects/bigbird/recomputing_dropout.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras dropout layer that is aware of `RecomputeContext`."""
import numpy as np
import tensorflow as tf
from official.projects.bigbird import recompute_grad as recompute_grad_lib
from official.projects.bigbird import stateless_dropout as stateless_dropout_lib
# Reimplements internal function
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/framework/smart_cond.py.
def smart_cond(pred, true_fn=None, false_fn=None, name=None):
"""Return either `true_fn()` if predicate `pred` is true else `false_fn()`.
If `pred` is a bool or has a constant value, we return either `true_fn()`
or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.
Arguments:
pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`.
true_fn: The callable to be performed if pred is true.
false_fn: The callable to be performed if pred is false.
name: Optional name prefix when using `tf.cond`.
Returns:
Tensors returned by the call to either `true_fn` or `false_fn`.
Raises:
TypeError: If `true_fn` or `false_fn` is not callable.
"""
if not callable(true_fn):
raise TypeError('`true_fn` must be callable.')
if not callable(false_fn):
raise TypeError('`false_fn` must be callable.')
pred_value = tf.get_static_value(pred)
if isinstance(pred, tf.Variable) or pred_value is None:
return tf.cond(
pred, true_fn=true_fn, false_fn=false_fn, name=name)
if pred_value:
return true_fn()
else:
return false_fn()
# See https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dropout.
class RecomputingDropout(tf.keras.layers.Layer):
"""`tf.keras.layers.Dropout` that supports `recompute_grad`."""
def __init__(self,
rate,
noise_shape=None,
seed=None,
force_recomputation=False,
**kwargs):
"""Initializes `RecomputingDropout`.
Args:
rate: Float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the binary
dropout mask that will be multiplied with the input. For instance, if
inputs have shape `(batch_size, timesteps, features)` and you want the
dropout mask to be the same for all timesteps, you can use
`noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
force_recomputation: If `True`, then raises an error if called outside a
recompute context.
**kwargs: Keyword arguments for `tf.keras.layers.Layer`.
"""
super(RecomputingDropout, self).__init__(**kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
self.force_recomputation = force_recomputation
self.supports_masking = True
# Create a layer-specific seed to combine with the global recompute seed.
self._recompute_seed = (
np.random.randint(-2**31, 2**31, dtype=np.int32)
if seed is None else seed)
def _get_noise_shape(self, inputs):
# Subclasses of `Dropout` may implement `_get_noise_shape(self, inputs)`,
# which will override `self.noise_shape`, and allows for custom noise
# shapes with dynamically sized inputs.
if self.noise_shape is None:
return None
concrete_inputs_shape = tf.shape(inputs)
noise_shape = []
for i, value in enumerate(self.noise_shape):
noise_shape.append(concrete_inputs_shape[i] if value is None else value)
return tf.convert_to_tensor(noise_shape)
def call(self, inputs, training=None):
"""Builds computation graph.
Args:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Returns:
`inputs` masked according to layer configuration.
Raises:
ValueError: If `force_recomputation` is `True` and called outside a
a recompute context.
"""
if training is None:
training = tf.keras.backend.learning_phase()
def dropped_inputs():
"""Randomly drops elements of `inputs` when `training=True`."""
recompute_context = recompute_grad_lib.get_recompute_context()
if recompute_context is None:
if self.force_recomputation:
raise ValueError(
'RecomputeContext is required when force_recomputation=True.')
return tf.nn.dropout(
inputs,
noise_shape=self._get_noise_shape(inputs),
seed=self.seed,
rate=self.rate)
seed = tf.stack([recompute_context.seed, self._recompute_seed])
return stateless_dropout_lib.stateless_dropout(
inputs,
rate=self.rate,
seed=seed,
noise_shape=self._get_noise_shape(inputs))
output = smart_cond(training, dropped_inputs, lambda: tf.identity(inputs))
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'rate': self.rate,
'noise_shape': self.noise_shape,
'seed': self.seed,
'force_recomputation': self.force_recomputation,
}
base_config = super(RecomputingDropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 5,942 | 36.14375 | 97 | py |
models | models-master/official/projects/bigbird/encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer-based text encoder network."""
# pylint: disable=g-classes-have-attributes
import tensorflow as tf
from official.modeling import activations
from official.modeling import tf_utils
from official.nlp import modeling
from official.nlp.modeling import layers
from official.projects.bigbird import recompute_grad
from official.projects.bigbird import recomputing_dropout
_MAX_SEQ_LEN = 4096
class RecomputeTransformerLayer(layers.TransformerScaffold):
"""Transformer layer that recomputes the forward pass during backpropagation."""
def call(self, inputs, training=None):
emb, mask = inputs
def f(*args):
# recompute_grad can only handle tensor inputs. so we enumerate the
# nested input [emb, mask] as follows:
# args[0]: emb
# args[1]: mask[0] = band_mask
# args[2]: mask[1] = encoder_from_mask
# args[3]: mask[2] = encoder_to_mask
# args[4]: mask[3] = blocked_encoder_mask
x = super(RecomputeTransformerLayer,
self).call([args[0], [args[1], args[2], args[3], args[4]]],
training=training)
return x
f = recompute_grad.recompute_grad(f)
return f(emb, *mask)
@tf.keras.utils.register_keras_serializable(package='Text')
class BigBirdEncoder(tf.keras.Model):
"""Transformer-based encoder network with BigBird attentions.
*Note* that the network is constructed by
[Keras Functional API](https://keras.io/guides/functional_api/).
Args:
vocab_size: The size of the token vocabulary.
hidden_size: The size of the transformer hidden layers.
num_layers: The number of transformer layers.
num_attention_heads: The number of attention heads for each transformer. The
hidden size must be divisible by the number of attention heads.
max_position_embeddings: The maximum length of position embeddings that this
encoder can consume. If None, max_position_embeddings uses the value from
sequence length. This determines the variable shape for positional
embeddings.
type_vocab_size: The number of types that the 'type_ids' input can take.
intermediate_size: The intermediate size for the transformer layers.
block_size: int. A BigBird Attention parameter: size of block in from/to
sequences.
num_rand_blocks: int. A BigBird Attention parameter: number of random chunks
per row.
activation: The activation to use for the transformer layers.
dropout_rate: The dropout rate to use for the transformer layers.
attention_dropout_rate: The dropout rate to use for the attention layers
within the transformer layers.
initializer: The initialzer to use for all weights in this encoder.
embedding_width: The width of the word embeddings. If the embedding width is
not equal to hidden size, embedding parameters will be factorized into two
matrices in the shape of ['vocab_size', 'embedding_width'] and
['embedding_width', 'hidden_size'] ('embedding_width' is usually much
smaller than 'hidden_size').
use_gradient_checkpointing: Use gradient checkpointing to trade-off compute
for memory.
"""
def __init__(self,
vocab_size,
hidden_size=768,
num_layers=12,
num_attention_heads=12,
max_position_embeddings=_MAX_SEQ_LEN,
type_vocab_size=16,
intermediate_size=3072,
block_size=64,
num_rand_blocks=3,
activation=activations.gelu,
dropout_rate=0.1,
attention_dropout_rate=0.1,
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
embedding_width=None,
use_gradient_checkpointing=False,
**kwargs):
activation = tf.keras.activations.get(activation)
initializer = tf.keras.initializers.get(initializer)
if use_gradient_checkpointing:
tf.keras.layers.Dropout = recomputing_dropout.RecomputingDropout
layer_cls = RecomputeTransformerLayer
else:
layer_cls = layers.TransformerScaffold
self._self_setattr_tracking = False
self._config_dict = {
'vocab_size': vocab_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'num_attention_heads': num_attention_heads,
'max_position_embeddings': max_position_embeddings,
'type_vocab_size': type_vocab_size,
'intermediate_size': intermediate_size,
'block_size': block_size,
'num_rand_blocks': num_rand_blocks,
'activation': tf_utils.serialize_activation(
activation, use_legacy_format=True
),
'dropout_rate': dropout_rate,
'attention_dropout_rate': attention_dropout_rate,
'initializer': tf_utils.serialize_initializer(
initializer, use_legacy_format=True
),
'embedding_width': embedding_width,
}
word_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_word_ids')
mask = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_mask')
type_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_type_ids')
if embedding_width is None:
embedding_width = hidden_size
self._embedding_layer = modeling.layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=initializer,
name='word_embeddings')
word_embeddings = self._embedding_layer(word_ids)
# Always uses dynamic slicing for simplicity.
self._position_embedding_layer = modeling.layers.PositionEmbedding(
initializer=initializer,
max_length=max_position_embeddings,
name='position_embedding')
position_embeddings = self._position_embedding_layer(word_embeddings)
self._type_embedding_layer = modeling.layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=initializer,
use_one_hot=True,
name='type_embeddings')
type_embeddings = self._type_embedding_layer(type_ids)
embeddings = tf.keras.layers.Add()(
[word_embeddings, position_embeddings, type_embeddings])
self._embedding_norm_layer = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)
embeddings = self._embedding_norm_layer(embeddings)
embeddings = tf.keras.layers.Dropout(rate=dropout_rate)(embeddings)
# We project the 'embedding' output to 'hidden_size' if it is not already
# 'hidden_size'.
if embedding_width != hidden_size:
self._embedding_projection = tf.keras.layers.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
kernel_initializer=initializer,
name='embedding_projection')
embeddings = self._embedding_projection(embeddings)
self._transformer_layers = []
data = embeddings
masks = layers.BigBirdMasks(block_size=block_size)(
data, mask)
encoder_outputs = []
attn_head_dim = hidden_size // num_attention_heads
for i in range(num_layers):
layer = layer_cls(
num_attention_heads,
intermediate_size,
activation,
attention_cls=layers.BigBirdAttention,
attention_cfg=dict(
num_heads=num_attention_heads,
key_dim=attn_head_dim,
kernel_initializer=initializer,
from_block_size=block_size,
to_block_size=block_size,
num_rand_blocks=num_rand_blocks,
max_rand_mask_length=max_position_embeddings,
seed=i),
dropout_rate=dropout_rate,
attention_dropout_rate=dropout_rate,
kernel_initializer=initializer)
self._transformer_layers.append(layer)
data = layer([data, masks])
encoder_outputs.append(data)
outputs = dict(
sequence_output=encoder_outputs[-1], encoder_outputs=encoder_outputs)
super().__init__(
inputs=[word_ids, mask, type_ids], outputs=outputs, **kwargs)
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_embedding_layer(self):
return self._embedding_layer
def get_config(self):
return self._config_dict
@property
def transformer_layers(self):
"""List of Transformer layers in the encoder."""
return self._transformer_layers
@property
def pooler_layer(self):
"""The pooler dense layer after the transformer layers."""
return self._pooler_layer
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 9,349 | 37.319672 | 82 | py |
models | models-master/official/projects/bigbird/recompute_grad.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for rematerialization.
Incubates a version of tf.recompute_grad that is XLA compatible.
"""
import collections
import os
import threading
from typing import Deque, List, NamedTuple, Optional, Sequence
from absl import logging
import numpy as np
import tensorflow as tf
class RecomputeContext(
NamedTuple('RecomputeContext', [
('is_recomputing', bool),
('seed', tf.Tensor),
('children', Deque['RecomputeContext']),
])):
"""Context for recomputation.
Attributes:
is_recomputing: Whether we are in a recomputation phase.
seed: Scalar integer tensor that should be used with stateless random ops
for deterministic behavior and correct computation of the gradient.
children: Nested `RecomputeContext` instances. Used internally by
`recompute_grad` to track nested instances of `RecomputeContext`.
"""
def __enter__(self):
return _context_stack.push(self)
def __exit__(self, exc_type, exc_value, traceback):
_context_stack.pop(self)
# Simplified version of `_DefaultStack` in
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/framework/ops.py.
class _ContextStack(threading.local):
"""A thread-local stack for providing implicit recompute contexts."""
def __init__(self):
super(_ContextStack, self).__init__()
self._stack = []
def top(self) -> Optional[RecomputeContext]:
return self._stack[-1] if self._stack else None
def push(self, context: RecomputeContext):
self._stack.append(context)
return context
def pop(self, context: RecomputeContext):
if self._stack[-1] is not context:
raise AssertionError('Nesting violated for RecomputeContext.')
self._stack.pop()
_context_stack = _ContextStack()
def get_recompute_context() -> Optional[RecomputeContext]:
"""Returns the current recomputing context if it exists."""
return _context_stack.top()
# Adapted from
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/control_flow_util.py.
def _get_containing_xla_context(graph: tf.Graph) -> Optional[object]:
"""Returns the first ancestor `XLAControlFlowContext` in the `graph`."""
ctxt = graph._get_control_flow_context() # pylint: disable=protected-access
while ctxt:
if ctxt.IsXLAContext():
return ctxt
ctxt = ctxt.outer_context
return None
def _in_xla_context(graph: Optional[tf.Graph] = None) -> bool:
"""Detects whether we are in an XLA context."""
if '--tf_xla_auto_jit=2' in os.environ.get('TF_XLA_FLAGS', ''):
return True
graph = tf.compat.v1.get_default_graph() if graph is None else graph
while True:
if _get_containing_xla_context(graph) is not None:
return True
try:
graph = graph.outer_graph
except AttributeError:
return False
def _force_data_dependency(
first_compute: Sequence[tf.Tensor],
then_compute: Sequence[tf.Tensor]) -> List[tf.Tensor]:
"""Force all of `then_compute` to depend on all of `first_compute`.
Uses a dummy data dependency, which is useful when running on TPUs because
XLA ignores control dependencies. Only supports float arguments.
Args:
first_compute: Sequence of `Tensor`s to be executed before `then_compute`.
then_compute: Sequence of `Tensor`s to executed after `first_compute`.
Returns:
Sequence of `Tensor`s with same length of `then_compute`.
Raises:
ValueError: if ranks are unknown or types are not floating.
"""
def _first_element(x):
if x.shape.ndims is None:
raise ValueError('Rank of Tensor %s must be known' % x)
ndims = x.shape.ndims
begin = tf.zeros(ndims, dtype=tf.int32)
size = tf.ones(ndims, dtype=tf.int32)
return tf.reshape(tf.slice(x, begin, size), [])
first_compute_sum = tf.add_n(
[_first_element(x) for x in first_compute if x is not None])
dtype = first_compute_sum.dtype
if not dtype.is_floating:
raise ValueError('_force_data_dependency only supports floating dtypes.')
zero = np.finfo(dtype.as_numpy_dtype).tiny * first_compute_sum
return [
x + tf.cast(zero, x.dtype) if x is not None else None
for x in then_compute
]
def _make_seed_if_none(seed: Optional[tf.Tensor]) -> tf.Tensor:
"""Uses the global generator to make a seed if necessary."""
if seed is not None:
return seed
generator = tf.random.experimental.get_global_generator()
# The two seeds for stateless random ops don't have individual semantics and
# are scrambled together, so providing one seed is fine. This makes it easier
# for users to provide a local seed without worrying about integer overflow.
# See `make_seeds` in
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/stateful_random_ops.py.
try:
return generator.uniform_full_int([], tf.int32, name='recompute_grad_seed')
except (RuntimeError, TypeError, ValueError, tf.errors.NotFoundError) as e:
# For a number of reasons, the above operation can fail like using multiple
# graphs or toggling between eager and graph modes. Reset the generator.
logging.warn('Resetting the generator. %s: %s', type(e), e)
tf.random.experimental.set_global_generator(None)
generator = tf.random.experimental.get_global_generator()
return generator.uniform_full_int([], tf.int32, name='recompute_grad_seed')
def recompute_grad(f, seed=None):
"""An eager-compatible version of recompute_grad.
For f(*args, **kwargs), this supports gradients with respect to args, or to
gradients with respect to any variables residing in the kwarg 'variables'.
Note that for keras layer and model objects, this is handled automatically.
Warning: If `f` was originally a tf.keras Model or Layer object, `g` will not
be able to access the member variables of that object, because `g` returns
through the wrapper function `inner`. When recomputing gradients through
objects that inherit from keras, we suggest keeping a reference to the
underlying object around for the purpose of accessing these variables.
Args:
f: function `f(*x)` that returns a `Tensor` or sequence of `Tensor` outputs.
seed: Optional seed for random ops. `seed` should an integer scalar
`Tensor`. When compiling to XLA, `seed` must have dtype `tf.int32`. If
`seed` is not provided one will be generated.
Returns:
A function `g` that wraps `f`, but which recomputes `f` on the backwards
pass of a gradient call.
"""
@tf.custom_gradient
def inner(*args, **kwargs):
"""Inner function closure for calculating gradients."""
# Detect when we're nested and in the backwards pass, so we don't generate
# an additional seed.
parent_context = get_recompute_context()
if parent_context is not None and parent_context.is_recomputing:
# Use the cached context in the recomputation phase.
with parent_context.children.popleft()._replace(
is_recomputing=True) as context:
result = f(*args, **kwargs)
else:
with RecomputeContext(
is_recomputing=False,
seed=_make_seed_if_none(seed),
children=collections.deque()) as context:
result = f(*args, **kwargs)
# In the forward pass, build up a tree of recomputation contexts.
if parent_context is not None and not parent_context.is_recomputing:
parent_context.children.append(context)
def grad(*dresult, **grad_kwargs):
"""Gradient function calculation for inner function."""
variables = grad_kwargs.pop('variables', None)
if grad_kwargs:
raise ValueError('Found unexpected kwargs for `grad`: ',
list(grad_kwargs.keys()))
inputs, seed = list(args), context.seed
if _in_xla_context():
inputs = _force_data_dependency(
tf.nest.flatten(dresult), inputs + [seed])
seed = inputs.pop()
with tf.GradientTape() as tape:
tape.watch(inputs)
if variables is not None:
tape.watch(variables)
with tf.control_dependencies(dresult):
with context._replace(is_recomputing=True, seed=seed):
result = f(*inputs, **kwargs)
kw_vars = []
if variables is not None:
kw_vars = list(variables)
grads = tape.gradient(
result, list(inputs) + kw_vars, output_gradients=dresult)
return grads[:len(inputs)], grads[len(inputs):]
return result, grad
return inner
| 9,027 | 36.460581 | 102 | py |
models | models-master/official/projects/bigbird/encoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.projects.bigbird.encoder."""
import numpy as np
import tensorflow as tf
from official.projects.bigbird import encoder
class BigBirdEncoderTest(tf.test.TestCase):
def test_encoder(self):
sequence_length = 1024
batch_size = 2
vocab_size = 1024
network = encoder.BigBirdEncoder(
num_layers=1, vocab_size=1024, max_position_embeddings=4096)
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(2, size=(batch_size, sequence_length))
outputs = network([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs["sequence_output"].shape,
(batch_size, sequence_length, 768))
def test_save_restore(self):
sequence_length = 1024
batch_size = 2
vocab_size = 1024
network = encoder.BigBirdEncoder(
num_layers=1, vocab_size=1024, max_position_embeddings=4096)
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(2, size=(batch_size, sequence_length))
inputs = dict(
input_word_ids=word_id_data,
input_mask=mask_data,
input_type_ids=type_id_data)
ref_outputs = network(inputs)
model_path = self.get_temp_dir() + "/model"
network.save(model_path)
loaded = tf.keras.models.load_model(model_path)
outputs = loaded(inputs)
self.assertAllClose(outputs["sequence_output"],
ref_outputs["sequence_output"])
if __name__ == "__main__":
tf.test.main()
| 2,340 | 35.578125 | 75 | py |
models | models-master/official/projects/bigbird/stateless_dropout.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A replacement for tf.nn.dropout that uses stateless random ops."""
import numbers
from typing import Optional, Sequence, Text, Union
from absl import logging
import tensorflow as tf
def _as_shape(shape: Union[Sequence[int], tf.TensorShape]) -> tf.TensorShape:
"""Converts the given object to a TensorShape."""
return shape if isinstance(shape, tf.TensorShape) else tf.TensorShape(shape)
def _get_noise_shape(
x: tf.Tensor, noise_shape: Union[Sequence[int], tf.TensorShape]
) -> Union[tf.Tensor, tf.TensorShape, Sequence[int]]:
"""Computes the shape of the binary mask for dropout."""
# If noise_shape is none return immediately.
if noise_shape is None:
return tf.shape(x)
try:
# Best effort to figure out the intended shape.
# If not possible, let the op to handle it.
# In eager mode exception will show up.
noise_shape_ = _as_shape(noise_shape)
except (TypeError, ValueError):
return noise_shape
if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims):
new_dims = []
for i, dim in enumerate(x.shape.dims):
if noise_shape_.dims[i].value is None and dim.value is not None:
new_dims.append(dim.value)
else:
new_dims.append(noise_shape_.dims[i].value)
return tf.TensorShape(new_dims)
return noise_shape
def stateless_dropout(x: tf.Tensor,
rate: float,
seed: tf.Tensor,
noise_shape: Optional[Union[Sequence[int],
tf.TensorShape]] = None,
name: Optional[Text] = None) -> tf.Tensor:
"""Computes dropout: randomly sets elements to zero to prevent overfitting.
See https://www.tensorflow.org/api_docs/python/tf/nn/dropout.
This version differs in that the seed is required if the rate is nonzero.
Args:
x: A floating point tensor.
rate: A scalar `Tensor` with the same type as x. The probability that each
element is dropped. For example, setting rate=0.1 would drop 10% of input
elements.
seed: A shape [2] integer Tensor of seeds to the random number generator.
Must have dtype `tf.int32` when compiling to XLA.
noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for
randomly generated keep/drop flags.
name: A name for this operation (optional).
Returns:
A `Tensor` of the same shape of `x`.
Raises:
ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating point
tensor. `rate=1` is disallowed, because the output would be all zeros,
which is likely not what was intended.
"""
with tf.name_scope(name or 'stateless_dropout') as name:
x = tf.convert_to_tensor(x, name='x')
if not x.dtype.is_floating:
raise ValueError('x has to be a floating point tensor since it\'s going '
' to be scaled. Got a %s tensor instead.' % x.dtype)
if isinstance(rate, numbers.Real):
if not (rate >= 0 and rate < 1):
raise ValueError('rate must be a scalar tensor or a float in the '
'range [0, 1), got %g' % rate)
if rate > 0.5:
logging.log_first_n(
logging.WARN, 'Large dropout rate: %g (>0.5). In TensorFlow '
'.x, dropout() uses dropout rate instead of keep_prob. '
'Please ensure that this is intended.', 5, rate)
# Early return if nothing needs to be dropped.
if tf.get_static_value(rate) == 0:
return x
rate = tf.convert_to_tensor(rate, dtype=x.dtype, name='rate')
rate.shape.assert_has_rank(0)
noise_shape = _get_noise_shape(x, noise_shape)
# Sample a uniform distribution on [0.0, 1.0) and select values larger than
# rate.
#
# NOTE: Random uniform actually can only generate 2^23 floats on [1.0, 2.0)
# and subtract 1.0.
random_tensor = tf.random.stateless_uniform(
noise_shape, seed=seed, dtype=x.dtype)
keep_prob = 1 - rate
scale = 1 / keep_prob
# NOTE: if (1.0 + rate) - 1 is equal to rate, then we want to consider that
# float to be selected, hence we use a >= comparison.
keep_mask = random_tensor >= rate
ret = x * scale * tf.cast(keep_mask, x.dtype)
if not tf.executing_eagerly():
ret.set_shape(x.get_shape())
return ret
| 4,921 | 38.376 | 79 | py |
models | models-master/official/projects/bigbird/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/lra/lra_dual_encoder_dataloader.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loads dataset for the similarity comparison (classification) task."""
import dataclasses
from typing import Mapping, Optional, Tuple
import tensorflow as tf
from official.common import dataset_fn
from official.core import config_definitions as cfg
from official.core import input_reader
from official.nlp.data import data_loader
from official.nlp.data import data_loader_factory
LABEL_TYPES_MAP = {'int': tf.int64, 'float': tf.float32}
@dataclasses.dataclass
class DualEncoderDataConfig(cfg.DataConfig):
"""Data config for similarity comparison task."""
input_path: str = ''
global_batch_size: int = 32
is_training: bool = True
seq_length: int = 128
label_type: str = 'int'
# Whether to include the example id number.
include_example_id: bool = False
label_field: str = 'label_ids'
# Maps the key in TfExample to feature name.
# E.g 'label_ids' to 'next_sentence_labels'
label_name: Optional[Tuple[str, str]] = None
# Either tfrecord, sstable, or recordio.
file_type: str = 'tfrecord'
@data_loader_factory.register_data_loader_cls(DualEncoderDataConfig)
class DualEncoderDataLoader(data_loader.DataLoader):
"""A class to load dataset for similarity comparison (classification) task."""
def __init__(self, params):
self._params = params
self._seq_length = params.seq_length
self._include_example_id = params.include_example_id
self._label_field = params.label_field
if params.label_name:
self._label_name_mapping = dict([params.label_name])
else:
self._label_name_mapping = dict()
def name_to_features_spec(self):
"""Defines features to decode. Subclass may override to append features."""
label_type = LABEL_TYPES_MAP[self._params.label_type]
name_to_features = {
'left_word_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64),
'left_mask': tf.io.FixedLenFeature([self._seq_length], tf.int64),
'right_word_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64),
'right_mask': tf.io.FixedLenFeature([self._seq_length], tf.int64),
self._label_field: tf.io.FixedLenFeature([], label_type),
}
if self._include_example_id:
name_to_features['example_id'] = tf.io.FixedLenFeature([], tf.int64)
return name_to_features
def _decode(self, record: tf.Tensor):
"""Decodes a serialized tf.Example."""
example = tf.io.parse_single_example(record, self.name_to_features_spec())
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in example:
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example
def _parse(self, record: Mapping[str, tf.Tensor]):
"""Parses raw tensors into a dict of tensors to be consumed by the model."""
key_mapping = {
'left_ids': 'left_word_ids',
'left_mask': 'left_mask',
'right_ids': 'right_word_ids',
'right_mask': 'right_mask',
}
ret = {}
for record_key in record:
if record_key in key_mapping:
ret[key_mapping[record_key]] = record[record_key]
else:
ret[record_key] = record[record_key]
if self._label_field in self._label_name_mapping:
ret[self._label_name_mapping[self._label_field]] = record[
self._label_field
]
return ret
def load(self, input_context: Optional[tf.distribute.InputContext] = None):
"""Returns a tf.dataset.Dataset."""
reader = input_reader.InputReader(
dataset_fn=dataset_fn.pick_dataset_fn(self._params.file_type),
params=self._params,
decoder_fn=self._decode,
parser_fn=self._parse,
)
return reader.read(input_context)
| 4,355 | 33.848 | 80 | py |
models | models-master/official/projects/lra/linformer_encoder_block.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based LinformerEncoder block layer."""
from typing import Any, Optional
from absl import logging
import tensorflow as tf
import tensorflow_models as tfm
from official.modeling import tf_utils
@tf.keras.utils.register_keras_serializable(package="Text")
class LinformerEncoderBlock(tf.keras.layers.Layer):
"""LinformerEncoderBlock layer.
This layer implements the Linformer Encoder from
"Linformer: Self-Attention with Linear Complexity".
(https://arxiv.org/abs/2006.04768)
References:
[Linformer: Self-Attention with Linear Complexity]
(https://arxiv.org/abs/2006.04768)
[Long Range Arena: A Benchmark for Efficient Transformers]
(https://arxiv.org/abs/2011.04006)
"""
def __init__(
self,
num_attention_heads,
inner_dim,
inner_activation,
low_rank_features,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_bias=True,
norm_first=False,
norm_epsilon=1e-12,
output_dropout=0.0,
attention_dropout=0.0,
inner_dropout=0.0,
attention_initializer=None,
attention_axes=None,
use_query_residual=True,
key_dim=None,
value_dim=None,
output_last_dim=None,
diff_q_kv_att_layer_norm=False,
return_attention_scores=False,
**kwargs
):
"""Initializes `LinformerEncoder`.
Note: If `output_last_dim` is used and `use_query_residual` is `True`, the
`output_last_dim`'s value must equal the first input's last dimension for
the query residual connection to work. This is because the residual
connection after the multi-head-attention requires their dimensions to
match. If `use_query_residual` is `False`, the `output_last_dim` dictactes
the last dimension of the output of this module and the
multi-head-attention.
E.g. let's say input dims are `[batch_size, seq_dim, input_last_dim]`.
Scenario 1: If `output_last_dim` is not `None`, then the output dims of this
module would be `[batch_size, seq_dim, output_last_dim]`. Note `key_dim` is
overriden by `output_last_dim`.
Scenario 2: If `output_last_dim` is `None` and `key_dim` is not `None`, then
the output dims of this module would be `[batch_size, seq_dim, key_dim]`.
Scenario 3: If the `output_last_dim` and `key_dim` are both `None`, the
output dims would be `[batch_size, seq_dim, input_last_dim]`.
Args:
num_attention_heads: Number of attention heads.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network.
low_rank_features: The number of dimensions for low-rank projection.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
use_bias: Whether to enable use_bias in attention layer. If set False,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate
dense layers. If set False, output of attention and intermediate dense
layers is normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: Dropout probability for within the attention layer.
inner_dropout: Dropout probability for the first Dense layer in a
two-layer feedforward network.
attention_initializer: Initializer for kernels of attention layers. If set
`None`, attention layers use kernel_initializer as initializer for
kernel.
attention_axes: axes over which the attention is applied. `None` means
attention over all axes, but batch, heads, and features.
use_query_residual: Toggle to execute residual connection after attention.
key_dim: `key_dim` for the `tf.keras.layers.MultiHeadAttention`. If
`None`, we use the first `input_shape`'s last dim.
value_dim: `value_dim` for the `tf.keras.layers.MultiHeadAttention`.
output_last_dim: Final dimension of the output of this module. This also
dictates the value for the final dimension of the multi-head-attention.
When it's `None`, we use, in order of decreasing precedence, `key_dim` *
`num_heads` or the first `input_shape`'s last dim as the output's last
dim.
diff_q_kv_att_layer_norm: If `True`, create a separate attention layer
norm layer for query and key-value if `norm_first` is `True`. Invalid to
set to `True` if `norm_first` is `False`.
return_attention_scores: If `True`, the output of this layer will be a
tuple and additionally contain the attention scores in the shape of
`[batch_size, num_attention_heads, seq_dim, seq_dim]`.
**kwargs: keyword arguments.
"""
tfm.nlp.layers.util.filter_kwargs(kwargs)
super().__init__(**kwargs)
self._num_heads = num_attention_heads
self._low_rank_features = low_rank_features
self._inner_dim = inner_dim
self._inner_activation = inner_activation
self._attention_dropout_rate = attention_dropout
self._output_dropout_rate = output_dropout
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._inner_dropout = inner_dropout
self._use_query_residual = use_query_residual
self._key_dim = key_dim
self._value_dim = value_dim
self._output_last_dim = output_last_dim
self._diff_q_kv_att_layer_norm = diff_q_kv_att_layer_norm
self._return_attention_scores = return_attention_scores
if attention_initializer:
self._attention_initializer = tf.keras.initializers.get(
attention_initializer
)
else:
self._attention_initializer = tf_utils.clone_initializer(
self._kernel_initializer
)
self._attention_axes = attention_axes
if self._diff_q_kv_att_layer_norm and not self._norm_first:
raise ValueError(
"Setting `diff_q_and_kv_attention_layer_norm` to True"
"when `norm_first` is False is invalid."
)
def build(self, input_shape):
if isinstance(input_shape, tf.TensorShape):
input_tensor_shape = input_shape
elif isinstance(input_shape, (list, tuple)):
input_tensor_shape = tf.TensorShape(input_shape[0])
else:
raise ValueError(
"The type of input shape argument is not supported, got: %s"
% type(input_shape)
)
einsum_equation = "abc,cd->abd"
if len(input_tensor_shape.as_list()) > 3:
einsum_equation = "...bc,cd->...bd"
hidden_size = input_tensor_shape[-1]
if hidden_size % self._num_heads != 0:
logging.warning(
(
"The input size (%d) is not a multiple of the number of attention"
" heads (%d)"
),
hidden_size,
self._num_heads,
)
if self._key_dim is None:
self._key_dim = int(hidden_size // self._num_heads)
if self._output_last_dim is None:
last_output_shape = hidden_size
else:
last_output_shape = self._output_last_dim
common_kwargs = dict(
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint,
)
self._key_projection = tf.keras.layers.Dense(
self._low_rank_features,
activation=None,
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
name="key_low_rank_projection",
**common_kwargs
)
self._value_projection = tf.keras.layers.Dense(
self._low_rank_features,
activation=None,
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
name="value_low_rank_projection",
**common_kwargs
)
self._attention_layer = tf.keras.layers.MultiHeadAttention(
num_heads=self._num_heads,
key_dim=self._low_rank_features,
value_dim=self._low_rank_features,
dropout=self._attention_dropout_rate,
use_bias=self._use_bias,
kernel_initializer=self._attention_initializer,
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
attention_axes=self._attention_axes,
output_shape=self._output_last_dim,
name="self_attention",
**common_kwargs
)
self._attention_dropout = tf.keras.layers.Dropout(
rate=self._attention_dropout_rate
)
# Use float32 in layernorm for numeric stability.
# It is probably safe in mixed_float16, but we haven't validated this yet.
self._attention_layer_norm = tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32,
)
self._attention_layer_norm_kv = self._attention_layer_norm
if self._diff_q_kv_att_layer_norm:
self._attention_layer_norm_kv = tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm_kv",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32,
)
self._intermediate_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=(None, self._inner_dim),
bias_axes="d",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
name="intermediate",
**common_kwargs
)
policy = tf.keras.mixed_precision.global_policy()
if policy.name == "mixed_bfloat16":
# bfloat16 causes BERT with the LAMB optimizer to not converge
# as well, so we use float32.
# TODO(b/154538392): Investigate this.
policy = tf.float32
self._intermediate_activation_layer = tf.keras.layers.Activation(
self._inner_activation, dtype=policy
)
self._inner_dropout_layer = tf.keras.layers.Dropout(
rate=self._inner_dropout
)
self._output_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=(None, last_output_shape),
bias_axes="d",
name="output",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs
)
self._output_dropout = tf.keras.layers.Dropout(
rate=self._output_dropout_rate
)
# Use float32 in layernorm for numeric stability.
self._output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32,
)
super().build(input_shape)
def get_config(self):
config = {
"num_attention_heads": self._num_heads,
"low_rank_features": self._low_rank_features,
"inner_dim": self._inner_dim,
"inner_activation": self._inner_activation,
"output_dropout": self._output_dropout_rate,
"attention_dropout": self._attention_dropout_rate,
"kernel_initializer": tf.keras.initializers.serialize(
self._kernel_initializer
),
"bias_initializer": tf.keras.initializers.serialize(
self._bias_initializer
),
"kernel_regularizer": tf.keras.regularizers.serialize(
self._kernel_regularizer
),
"bias_regularizer": tf.keras.regularizers.serialize(
self._bias_regularizer
),
"activity_regularizer": tf.keras.regularizers.serialize(
self._activity_regularizer
),
"kernel_constraint": tf.keras.constraints.serialize(
self._kernel_constraint
),
"bias_constraint": tf.keras.constraints.serialize(
self._bias_constraint
),
"use_bias": self._use_bias,
"norm_first": self._norm_first,
"norm_epsilon": self._norm_epsilon,
"inner_dropout": self._inner_dropout,
"attention_initializer": tf.keras.initializers.serialize(
self._attention_initializer
),
"attention_axes": self._attention_axes,
"use_query_residual": self._use_query_residual,
"key_dim": self._key_dim,
"value_dim": self._value_dim,
"output_last_dim": self._output_last_dim,
"diff_q_kv_att_layer_norm": self._diff_q_kv_att_layer_norm,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs: Any, output_range: Optional[tf.Tensor] = None) -> Any:
"""Transformer self-attention encoder block call.
Args:
inputs: a single tensor or a list of tensors. `input tensor` as the single
sequence of embeddings. [`input tensor`, `attention mask`] to have the
additional attention mask. [`query tensor`, `key value tensor`,
`attention mask`] to have separate input streams for the query, and
key/value to the multi-head attention.
output_range: the sequence output range, [0, output_range) for slicing the
target sequence. `None` means the target sequence is not sliced. If you
would like to have no change to the model training, it is better to only
set the `output_range` for serving.
Returns:
An output tensor with the same dimensions as input/query tensor.
"""
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
input_tensor, attention_mask = inputs
key_value = None
elif len(inputs) == 3:
input_tensor, key_value, attention_mask = inputs
else:
raise ValueError(
"Unexpected inputs to %s with length at %d"
% (self.__class__, len(inputs))
)
else:
input_tensor, key_value, attention_mask = (inputs, None, None)
if output_range:
if self._norm_first:
source_tensor = input_tensor[:, 0:output_range, :]
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm_kv(key_value)
target_tensor = input_tensor[:, 0:output_range, :]
if attention_mask is not None:
attention_mask = attention_mask[:, 0:output_range, :]
else:
if self._norm_first:
source_tensor = input_tensor
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm_kv(key_value)
target_tensor = input_tensor
if key_value is None:
key_value = input_tensor
## Low Rank Projection Here
key = self._key_projection(key_value)
value = self._value_projection(input_tensor)
## Low Rank Projection Done
if self._return_attention_scores:
attention_output, attention_scores = self._attention_layer(
query=target_tensor,
key=key,
value=value,
attention_mask=attention_mask,
return_attention_scores=True,
)
else:
attention_output = self._attention_layer(
query=target_tensor,
key=key,
value=value,
attention_mask=attention_mask,
)
attention_output = self._attention_dropout(attention_output)
if self._norm_first:
# Important to not combine `self._norm_first` and
# `self._use_query_residual` into one if clause because else is only for
# `_norm_first == False`.
if self._use_query_residual:
attention_output = source_tensor + attention_output
else:
if self._use_query_residual:
attention_output = target_tensor + attention_output
attention_output = self._attention_layer_norm(attention_output)
if self._norm_first:
source_attention_output = attention_output
attention_output = self._output_layer_norm(attention_output)
inner_output = self._intermediate_dense(attention_output)
inner_output = self._intermediate_activation_layer(inner_output)
inner_output = self._inner_dropout_layer(inner_output)
layer_output = self._output_dense(inner_output)
layer_output = self._output_dropout(layer_output)
if self._norm_first:
layer_output = source_attention_output + layer_output
else:
# During mixed precision training, layer norm output is always fp32 for
# now. Casts fp32 for the subsequent add.
layer_output = tf.cast(layer_output, tf.float32)
layer_output = self._output_layer_norm(layer_output + attention_output)
if self._return_attention_scores:
return layer_output, attention_scores
else:
return layer_output
| 18,587 | 39.496732 | 80 | py |
models | models-master/official/projects/lra/moving_average_gated_attention.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based MegaEncoder block layer."""
from typing import Any
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.lra.exponential_moving_average import MultiHeadEMA
def get_activation_fn(activation):
## Helper Function for Activation
if activation == "silu":
return tf.nn.silu
elif activation == "softmax":
return tf.nn.softmax
else:
raise NotImplementedError
return
class RelativePositionBias(tf.keras.layers.Layer):
"""Relative position embedding layer with bias."""
def __init__(self, max_positions):
super().__init__()
self.max_positions = max_positions
def build(self, input_shape):
gauss_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.02)
self.rel_pos_bias = tf.Variable(
gauss_init(shape=[2 * self.max_positions - 1], dtype=tf.float32),
trainable=True,
)
def call(self, seq_len):
if seq_len is None:
seq_len = self.max_positions
seq_len = tf.get_static_value(seq_len)
# seq_len * 2 -1
b = self.rel_pos_bias[
(self.max_positions - seq_len) : (self.max_positions + seq_len - 1)
]
# seq_len * 3 - 1
t = tf.pad(b, paddings=tf.constant([[0, seq_len]]))
# (seq_len * 3 - 1) * seq_len
t = tf.tile(t, (seq_len,))
t = t[:-seq_len]
# seq_len x (3 * seq_len - 2)
t = tf.reshape(t, shape=(seq_len, 3 * seq_len - 2))
r = (2 * seq_len - 1) // 2
start = r
end = t.shape[1] - r
t = t[:, start:end]
return t
class MovingAverageGatedAttention(tf.keras.layers.Layer):
"""MegaEncoderBlock layer.
This layer implements the Mega Encoder from
"Mega: Moving Average Equipped Gated Attention".
(https://arxiv.org/abs/2209.10655)
"""
def __init__(
self,
embed_dim,
zdim,
hdim,
ndim,
intermediate_size,
inner_activation=None,
dropout=0.0,
attention_dropout=0.0,
hidden_dropout=0.0,
activation="silu",
bidirectional=False,
truncation=None,
prenorm=True,
max_positions=1024,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
attention_initializer=None,
attention_axes=None,
return_attention_scores=False,
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
):
self.embed_dim = embed_dim
self.hdim = hdim
self.zdim = zdim
self.ndim = ndim
self.inner_dim = intermediate_size
self.activation = get_activation_fn(activation=activation)
self.inner_activation = inner_activation
self.scaling = self.zdim**-0.5
self.dropout = tf.keras.layers.Dropout(rate=dropout)
self.hidden_dropout = tf.keras.layers.Dropout(rate=hidden_dropout)
self.attention_dropout_rate = attention_dropout
self.attention_dropout = tf.keras.layers.Dropout(rate=attention_dropout)
self.ffn_intermediate_dropout = tf.keras.layers.Dropout(rate=hidden_dropout)
self.output_dropout = tf.keras.layers.Dropout(rate=hidden_dropout)
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
if attention_initializer:
self._attention_initializer = tf.keras.initializers.get(
attention_initializer
)
else:
self._attention_initializer = tf_utils.clone_initializer(
self._kernel_initializer
)
self._attention_axes = attention_axes
self._use_bias = use_bias
self.return_attention_scores = return_attention_scores
self.prenorm = prenorm
self.norm = tf.keras.layers.LayerNormalization(axis=-1)
self.ffn_norm = tf.keras.layers.LayerNormalization(axis=-1)
self.move = MultiHeadEMA(
embed_dim, ndim=ndim, bidirectional=bidirectional, truncation=truncation
)
self.max_positions = max_positions
super().__init__()
def build(self, input_shape):
gauss_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.02)
zero_init = tf.keras.initializers.Zeros()
self.v_proj = tf.keras.layers.Dense(
self.hdim,
activation=None,
use_bias=True,
kernel_initializer=tf_utils.clone_initializer(gauss_init),
bias_initializer=tf_utils.clone_initializer(zero_init),
name="v_proj",
)
self.mx_proj = tf.keras.layers.Dense(
self.zdim + self.hdim + 2 * self.embed_dim,
activation=None,
use_bias=True,
kernel_initializer=tf_utils.clone_initializer(gauss_init),
bias_initializer=tf_utils.clone_initializer(zero_init),
name="mx_proj",
)
self.h_proj = tf.keras.layers.Dense(
self.embed_dim,
activation=None,
use_bias=True,
kernel_initializer=tf_utils.clone_initializer(gauss_init),
bias_initializer=tf_utils.clone_initializer(zero_init),
name="h_proj",
)
self._intermediate_dense = tf.keras.layers.Dense(
self.inner_dim, use_bias=True
)
self._output_dense = tf.keras.layers.Dense(self.embed_dim, use_bias=True)
policy = tf.keras.mixed_precision.global_policy()
self._intermediate_activation_layer = tf.keras.layers.Activation(
self.inner_activation, dtype=policy
)
self.gamma = tf.Variable(
gauss_init(shape=[2, self.zdim], dtype=tf.float32), trainable=True
)
self.beta = tf.Variable(
zero_init(shape=[2, self.zdim], dtype=tf.float32), trainable=True
)
self.rel_pos_bias = RelativePositionBias(max_positions=self.max_positions)
super().build(input_shape)
def get_config(self):
base_config = super().get_config()
base_config.update({
"embed_dim": self.embed_dim,
"zdim": self.zdim,
"hdim": self.hdim,
"dropout": self.dropout,
"attention_dropout": self.attention_dropout_rate,
"kernel_initializer": tf.keras.initializers.serialize(
self._kernel_initializer
),
"bias_initializer": tf.keras.initializers.serialize(
self._bias_initializer
),
"use_bias": self._use_bias,
"prenorm": self.prenorm,
"max_positions": self.max_positions,
"attention_initializer": tf.keras.initializers.serialize(
self._attention_initializer
),
"attention_axes": self._attention_axes,
"return_attention_scores": self.return_attention_scores,
})
return base_config
def _softmax_attention(self, q, k):
slen = k.shape[1]
# C x C
if slen is None:
slen = 2
bias = self.rel_pos_bias(slen)
# scaled attention
q = q * self.scaling
# B x K x C x C
qk = tf.matmul(q, tf.transpose(k, perm=(0, 2, 1))) + bias
attn_weights = tf.nn.softmax(qk, axis=-1)
return attn_weights
def call(self, inputs: Any) -> Any:
"""MEGA encoder block call.
Args:
inputs: a single tensor or a list of tensors. `input tensor`
as the single sequence of embeddings. [`input tensor`,
`attention mask`] to have the
additional attention mask. [`query tensor`, `key value tensor`,
`attention mask`] to have separate input streams for the query, and
key/value to the multi-head attention.
Returns:
An output tensor with the same dimensions as input/query tensor.
"""
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
(input_tensor, attention_mask) = inputs
key_value = None
elif len(inputs) == 3:
(input_tensor, key_value, attention_mask) = inputs
else:
raise ValueError(
"Unexpected inputs to %s with length at %d"
% (self.__class__, len(inputs))
)
else:
(input_tensor, key_value, attention_mask) = (inputs, None, None)
if self.prenorm:
input_tensor = self.norm(input_tensor)
if key_value is not None:
key_value = self.norm(key_value)
## B*L*D -> L*B*D
## Multi-Dimensional Damped EMA
x = tf.transpose(input_tensor, perm=[1, 0, 2])
residual = x
seq_len, bsz, _ = x.shape
# L x B x E
v = self.activation(self.v_proj(x))
# L x B x D
mx = self.move(x, attention_mask)
mx = self.dropout(mx)
# L x B x D -> L x B x (2*D+S+E)
base = self.mx_proj(mx)
u, zr, hx = tf.split(
base, [self.embed_dim, self.zdim + self.hdim, self.embed_dim], axis=-1
)
# L x B x D
u = tf.math.sigmoid(u)
# L x B x (E+S)
z, r = tf.split(tf.nn.silu(zr), [self.zdim, self.hdim], axis=-1)
# L x B x S -> L x B x 1 x S -> L x B x 2 x S
z = tf.expand_dims(z, axis=2) * self.gamma + self.beta
# L x B x 2 x S -> L x B x S
q, k = tf.unstack(z, axis=2)
# L x B x D -> B x L x D
q = tf.transpose(q, perm=(1, 0, 2))
k = tf.transpose(k, perm=(1, 0, 2))
# L x B x E -> B x L x E
v = tf.transpose(v, perm=(1, 0, 2))
attn_weights = self._softmax_attention(q, k)
v = self.hidden_dropout(v)
kernel = tf.squeeze(self.attention_dropout(attn_weights))
# B x K x C x E -> B x L x E -> L x B x E
h = tf.transpose(
tf.reshape(
tf.linalg.matmul(kernel, v), shape=(bsz, seq_len, self.hdim)
),
perm=(1, 0, 2),
)
# L x B x E -> L x B x D
h = self.activation(hx + self.h_proj(h * r))
h = self.dropout(h)
# L x B x D
out = residual + tf.math.multiply(u, h - residual)
if not self.prenorm:
out = self.norm(out)
out = tf.transpose(out, perm=(1, 0, 2))
if self.prenorm:
out = self.ffn_norm(out)
inner_output = self._intermediate_dense(out)
inner_output = self._intermediate_activation_layer(inner_output)
inner_output = self.ffn_intermediate_dropout(inner_output)
layer_output = self._output_dense(inner_output)
layer_output = self.output_dropout(layer_output) + out
if not self.prenorm:
layer_output = self.ffn_norm(layer_output)
return layer_output
| 11,011 | 30.373219 | 80 | py |
models | models-master/official/projects/lra/transformer_encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer encoder."""
# pylint: disable=g-classes-have-attributes
from typing import Any, Callable, Optional, Union
from absl import logging
import tensorflow as tf
import tensorflow_models as tfm
from official.modeling import tf_utils
layers = tfm.nlp.layers
_Initializer = Union[str, tf.keras.initializers.Initializer]
_approx_gelu = lambda x: tf.keras.activations.gelu(x, approximate=True)
class TransformerEncoder(tf.keras.layers.Layer):
"""TransformerEncoder.
Args:
vocab_size: The size of the token vocabulary.
pad_token_id: the token id for the pad token
hidden_size: The size of the transformer hidden layers.
num_layers: The number of transformer layers.
num_attention_heads: The number of attention heads for each transformer. The
hidden size must be divisible by the number of attention heads.
max_sequence_length: The maximum sequence length that this encoder can
consume. If None, max_sequence_length uses the value from sequence length.
This determines the variable shape for positional embeddings.
type_vocab_size: The number of types that the 'type_ids' input can take.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network for each transformer.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network for each transformer.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: The dropout rate to use for the attention layers within
the transformer layers.
initializer: The initialzer to use for all weights in this encoder.
output_range: The sequence output range, [0, output_range), by slicing the
target sequence of the last transformer layer. `None` means the entire
target sequence will attend to the source sequence, which yields the full
output.
embedding_width: The width of the word embeddings. If the embedding width is
not equal to hidden size, embedding parameters will be factorized into two
matrices in the shape of ['vocab_size', 'embedding_width'] and
['embedding_width', 'hidden_size'] ('embedding_width' is usually much
smaller than 'hidden_size').
embedding_layer: An optional Layer instance which will be called to generate
embeddings for the input word IDs.
norm_first: Whether to normalize inputs to attention and intermediate dense
layers. If set False, output of attention and intermediate dense layers is
normalized.
"""
def __init__(
self,
vocab_size: int,
hidden_size: int = 768,
num_layers: int = 12,
num_attention_heads: int = 12,
max_sequence_length: int = 512,
type_vocab_size: int = 16,
inner_dim: int = 3072,
inner_activation: Callable[..., Any] = _approx_gelu,
output_dropout: float = 0.1,
attention_dropout: float = 0.1,
initializer: _Initializer = tf.keras.initializers.TruncatedNormal(
stddev=0.02
),
output_range: Optional[int] = None,
embedding_width: Optional[int] = None,
embedding_layer: Optional[tf.keras.layers.Layer] = None,
norm_first: bool = False,
**kwargs
):
super().__init__(**kwargs)
activation = tf.keras.activations.get(inner_activation)
initializer = tf.keras.initializers.get(initializer)
if embedding_width is None:
embedding_width = hidden_size
if embedding_layer is None:
self._embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=initializer,
name='word_embeddings',
)
else:
self._embedding_layer = embedding_layer
self._position_embedding_layer = layers.PositionEmbedding(
initializer=initializer,
max_length=max_sequence_length,
name='position_embedding',
)
self._type_embedding_layer = layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=initializer,
use_one_hot=True,
name='type_embeddings',
)
self._embedding_norm_layer = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32
)
self._embedding_dropout = tf.keras.layers.Dropout(
rate=output_dropout, name='embedding_dropout'
)
# We project the 'embedding' output to 'hidden_size' if it is not already
# 'hidden_size'.
self._embedding_projection = None
if embedding_width != hidden_size:
self._embedding_projection = tf.keras.layers.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
kernel_initializer=initializer,
name='embedding_projection',
)
self._transformer_layers = []
self._attention_mask_layer = layers.SelfAttentionMask(
name='self_attention_mask'
)
for i in range(num_layers):
layer = layers.TransformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=inner_dim,
inner_activation=inner_activation,
output_dropout=output_dropout,
attention_dropout=attention_dropout,
norm_first=norm_first,
return_attention_scores=False,
kernel_initializer=tf_utils.clone_initializer(initializer),
name='transformer/layer_%d' % i,
)
self._transformer_layers.append(layer)
self._num_layers = num_layers
self._pooler_layer = tf.keras.layers.Dense(
units=hidden_size,
activation='tanh',
kernel_initializer=initializer,
name='pooler_transform',
)
self._config = {
'vocab_size': vocab_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'num_attention_heads': num_attention_heads,
'max_sequence_length': max_sequence_length,
'type_vocab_size': type_vocab_size,
'inner_dim': inner_dim,
'inner_activation': tf.keras.activations.serialize(activation),
'output_dropout': output_dropout,
'attention_dropout': attention_dropout,
'initializer': tf.keras.initializers.serialize(initializer),
'output_range': output_range,
'embedding_width': embedding_width,
'embedding_layer': embedding_layer,
'norm_first': norm_first,
}
self.inputs = dict(
input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
)
def call(self, inputs):
word_embeddings = None
if isinstance(inputs, dict):
if 'input_word_ids' in inputs.keys():
word_ids = inputs.get('input_word_ids')
mask = inputs.get('input_mask')
type_ids = inputs.get('input_type_ids', None)
word_embeddings = inputs.get('input_word_embeddings', None)
elif 'left_word_ids' in inputs.keys():
word_ids = inputs.get('left_word_ids')
mask = inputs.get('left_mask')
elif 'right_word_ids' in inputs.keys():
word_ids = inputs.get('right_word_ids')
mask = inputs.get('right_mask')
dense_inputs = inputs.get('dense_inputs', None)
dense_mask = inputs.get('dense_mask', None)
dense_type_ids = inputs.get('dense_type_ids', None)
elif isinstance(inputs, list):
## Dual Encoder Tasks
word_ids, mask = inputs
type_ids = None
dense_inputs, dense_mask, dense_type_ids = None, None, None
else:
raise ValueError('Unexpected inputs type to %s.' % self.__class__)
if type_ids is None:
type_ids = tf.zeros_like(mask)
if word_embeddings is None:
word_embeddings = self._embedding_layer(word_ids)
if dense_inputs is not None:
mask = tf.concat([mask, dense_mask], axis=1)
embeddings = self._get_embeddings(
word_ids, type_ids, word_embeddings, dense_inputs, dense_type_ids
)
embeddings = self._embedding_norm_layer(embeddings)
embeddings = self._embedding_dropout(embeddings)
if self._embedding_projection is not None:
embeddings = self._embedding_projection(embeddings)
attention_mask = self._attention_mask_layer(embeddings, mask)
encoder_outputs = []
x = embeddings
for layer in self._transformer_layers:
x = layer([x, attention_mask])
encoder_outputs.append(x)
last_encoder_output = encoder_outputs[-1]
first_token_tensor = last_encoder_output[:, 0, :]
pooled_output = self._pooler_layer(first_token_tensor)
output = dict(
sequence_output=encoder_outputs[-1],
pooled_output=pooled_output,
encoder_outputs=encoder_outputs,
)
return output
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_embedding_layer(self):
return self._embedding_layer
def get_config(self):
return dict(self._config)
@property
def transformer_layers(self):
"""List of Transformer layers in the encoder."""
return self._transformer_layers
@property
def pooler_layer(self):
"""The pooler dense layer after the transformer layers."""
return self._pooler_layer
@classmethod
def from_config(cls, config, custom_objects=None):
if 'embedding_layer' in config and config['embedding_layer'] is not None:
warn_string = (
'You are reloading a model that was saved with a '
'potentially-shared embedding layer object. If you contine to '
'train this model, the embedding layer will no longer be shared. '
'To work around this, load the model outside of the Keras API.'
)
print('WARNING: ' + warn_string)
logging.warn(warn_string)
return cls(**config)
def _get_embeddings(
self,
word_ids: tf.Tensor,
type_ids: tf.Tensor,
word_embeddings: Optional[tf.Tensor],
dense_inputs: Optional[tf.Tensor],
dense_type_ids: Optional[tf.Tensor],
) -> tf.Tensor:
if word_embeddings is None:
word_embeddings = self._embedding_layer(word_ids)
if dense_inputs is not None:
# Concat the dense embeddings at sequence end.
word_embeddings = tf.concat([word_embeddings, dense_inputs], axis=1)
type_ids = tf.concat([type_ids, dense_type_ids], axis=1)
type_embeddings = self._type_embedding_layer(type_ids)
# absolute position embeddings.
position_embeddings = self._position_embedding_layer(word_embeddings)
return word_embeddings + position_embeddings + type_embeddings
| 11,272 | 35.247588 | 80 | py |
models | models-master/official/projects/lra/lra_dual_encoder_task.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trainer network for dual encoder style models."""
# pylint: disable=g-classes-have-attributes
import dataclasses
from typing import List, Union, Optional
from absl import logging
import numpy as np
import orbit
from scipy import stats
from sklearn import metrics as sklearn_metrics
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.nlp.data import data_loader_factory
from official.nlp.tasks import utils
from official.projects.lra import lra_dual_encoder
METRIC_TYPES = frozenset(
['accuracy', 'f1', 'matthews_corrcoef', 'pearson_spearman_corr']
)
@dataclasses.dataclass
class ModelConfig(base_config.Config):
"""A classifier/regressor configuration."""
num_classes: int = 2
use_encoder_pooler: bool = False
encoder: encoders.EncoderConfig = encoders.EncoderConfig()
max_seq_length: int = 512
@dataclasses.dataclass
class DualEncoderConfig(cfg.TaskConfig):
"""The model config."""
# At most one of `init_checkpoint` and `hub_module_url` can
# be specified.
init_checkpoint: str = ''
init_cls_pooler: bool = False
hub_module_url: str = ''
metric_type: str = 'accuracy'
# Defines the concrete model config at instantiation time.
model: ModelConfig = ModelConfig()
train_data: cfg.DataConfig = cfg.DataConfig()
validation_data: cfg.DataConfig = cfg.DataConfig()
@task_factory.register_task_cls(DualEncoderConfig)
class DualEncoderTask(base_task.Task):
"""Task object for DualEncoderTask."""
def __init__(self, params: cfg.TaskConfig, logging_dir=None, name=None):
super().__init__(params, logging_dir, name=name)
if params.metric_type not in METRIC_TYPES:
raise ValueError('Invalid metric_type: {}'.format(params.metric_type))
self.metric_type = params.metric_type
if hasattr(params.train_data, 'label_field'):
self.label_field = params.train_data.label_field
else:
self.label_field = 'label_ids'
def build_model(self):
if self.task_config.hub_module_url and self.task_config.init_checkpoint:
raise ValueError(
'At most one of `hub_module_url` and '
'`init_checkpoint` can be specified.'
)
if self.task_config.hub_module_url:
encoder_network = utils.get_encoder_from_hub(
self.task_config.hub_module_url
)
else:
encoder_network = encoders.build_encoder(self.task_config.model.encoder)
encoder_cfg = self.task_config.model.encoder.get()
return lra_dual_encoder.LRADualEncoder(
network=encoder_network,
max_seq_length=self.task_config.model.max_seq_length,
num_classes=self.task_config.model.num_classes,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range
),
use_encoder_pooler=self.task_config.model.use_encoder_pooler,
inner_dim=encoder_cfg.hidden_size * 2,
)
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
label_ids = labels[self.label_field]
if self.task_config.model.num_classes == 1:
loss = tf.keras.losses.mean_squared_error(label_ids, model_outputs)
else:
loss = tf.keras.losses.sparse_categorical_crossentropy(
label_ids, tf.cast(model_outputs, tf.float32), from_logits=True
)
if aux_losses:
loss += tf.add_n(aux_losses)
return tf_utils.safe_mean(loss)
def build_inputs(self, params, input_context=None):
"""Returns tf.data.Dataset for sentence_prediction task."""
if params.input_path == 'dummy':
def dummy_data(_):
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
x = dict(
left_word_ids=dummy_ids,
left_mask=dummy_ids,
right_word_ids=dummy_ids,
right_mask=dummy_ids,
)
if self.task_config.model.num_classes == 1:
y = tf.zeros((1,), dtype=tf.float32)
else:
y = tf.zeros((1, 1), dtype=tf.int32)
x[self.label_field] = y
return x
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE
)
return dataset
return data_loader_factory.get_data_loader(params).load(input_context)
def build_metrics(self, training=None):
del training
if self.task_config.model.num_classes == 1:
metrics = [tf.keras.metrics.MeanSquaredError()]
elif self.task_config.model.num_classes == 2:
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='cls_accuracy'),
tf.keras.metrics.AUC(name='auc', curve='PR'),
]
else:
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='cls_accuracy'),
]
return metrics
def process_metrics(self, metrics, labels, model_outputs):
for metric in metrics:
if metric.name == 'auc':
# Convert the logit to probability and extract the probability of True..
metric.update_state(
labels[self.label_field],
tf.expand_dims(tf.nn.softmax(model_outputs)[:, 1], axis=1),
)
if metric.name == 'cls_accuracy':
metric.update_state(labels[self.label_field], model_outputs)
def process_compiled_metrics(self, compiled_metrics, labels, model_outputs):
compiled_metrics.update_state(labels[self.label_field], model_outputs)
def validation_step(self, inputs, model: tf.keras.Model, metrics=None):
features, labels = inputs, inputs
outputs = self.inference_step(features, model)
loss = self.build_losses(
labels=labels, model_outputs=outputs, aux_losses=model.losses
)
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, labels, outputs)
if model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in metrics or []})
logs.update({m.name: m.result() for m in model.metrics})
if self.metric_type == 'matthews_corrcoef':
logs.update({
'sentence_prediction': (
tf.expand_dims( # Ensure one prediction along batch dimension.
tf.math.argmax(outputs, axis=1), axis=1
)
),
'labels': labels[self.label_field],
})
else:
logs.update({
'sentence_prediction': outputs,
'labels': labels[self.label_field],
})
return logs
def aggregate_logs(self, state=None, step_outputs=None):
if self.metric_type == 'accuracy':
return None
if state is None:
state = {'sentence_prediction': [], 'labels': []}
state['sentence_prediction'].append(
np.concatenate(
[v.numpy() for v in step_outputs['sentence_prediction']], axis=0
)
)
state['labels'].append(
np.concatenate([v.numpy() for v in step_outputs['labels']], axis=0)
)
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
if self.metric_type == 'accuracy':
return None
preds = np.concatenate(aggregated_logs['sentence_prediction'], axis=0)
labels = np.concatenate(aggregated_logs['labels'], axis=0)
if self.metric_type == 'f1':
preds = np.argmax(preds, axis=1)
return {self.metric_type: sklearn_metrics.f1_score(labels, preds)}
elif self.metric_type == 'matthews_corrcoef':
preds = np.reshape(preds, -1)
labels = np.reshape(labels, -1)
return {
self.metric_type: sklearn_metrics.matthews_corrcoef(preds, labels)
}
elif self.metric_type == 'pearson_spearman_corr':
preds = np.reshape(preds, -1)
labels = np.reshape(labels, -1)
pearson_corr = stats.pearsonr(preds, labels)[0]
spearman_corr = stats.spearmanr(preds, labels)[0]
corr_metric = (pearson_corr + spearman_corr) / 2
return {self.metric_type: corr_metric}
def initialize(self, model):
"""Load a pretrained checkpoint (if exists) and then train from iter 0."""
ckpt_dir_or_file = self.task_config.init_checkpoint
logging.info(
'Trying to load pretrained checkpoint from %s', ckpt_dir_or_file
)
if ckpt_dir_or_file and tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
if not ckpt_dir_or_file:
logging.info(
'No checkpoint file found from %s. Will not load.', ckpt_dir_or_file
)
return
pretrain2finetune_mapping = {
'encoder': model.checkpoint_items['encoder'],
}
if self.task_config.init_cls_pooler:
# This option is valid when use_encoder_pooler is false.
pretrain2finetune_mapping['next_sentence.pooler_dense'] = (
model.checkpoint_items['sentence_prediction.pooler_dense']
)
ckpt = tf.train.Checkpoint(**pretrain2finetune_mapping)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info(
'Finished loading pretrained checkpoint from %s', ckpt_dir_or_file
)
def predict(
task: DualEncoderTask,
params: cfg.DataConfig,
model: tf.keras.Model,
params_aug: Optional[cfg.DataConfig] = None,
test_time_aug_wgt: float = 0.3,
) -> List[Union[int, float]]:
"""Predicts on the input data.
Args:
task: A `DualEncoderTask` object.
params: A `cfg.DataConfig` object.
model: A keras.Model.
params_aug: A `cfg.DataConfig` object for augmented data.
test_time_aug_wgt: Test time augmentation weight. The prediction score will
use (1. - test_time_aug_wgt) original prediction plus test_time_aug_wgt
augmented prediction.
Returns:
A list of predictions with length of `num_examples`. For regression task,
each element in the list is the predicted score; for classification task,
each element is the predicted class id.
"""
def predict_step(inputs):
"""Replicated prediction calculation."""
x = inputs
example_id = x.pop('example_id')
outputs = task.inference_step(x, model)
return dict(example_id=example_id, predictions=outputs)
def aggregate_fn(state, outputs):
"""Concatenates model's outputs."""
if state is None:
state = []
for per_replica_example_id, per_replica_batch_predictions in zip(
outputs['example_id'], outputs['predictions']
):
state.extend(zip(per_replica_example_id, per_replica_batch_predictions))
return state
dataset = orbit.utils.make_distributed_dataset(
tf.distribute.get_strategy(), task.build_inputs, params
)
outputs = utils.predict(predict_step, aggregate_fn, dataset)
# When running on TPU POD, the order of output cannot be maintained,
# so we need to sort by example_id.
outputs = sorted(outputs, key=lambda x: x[0])
is_regression = task.task_config.model.num_classes == 1
if params_aug is not None:
dataset_aug = orbit.utils.make_distributed_dataset(
tf.distribute.get_strategy(), task.build_inputs, params_aug
)
outputs_aug = utils.predict(predict_step, aggregate_fn, dataset_aug)
outputs_aug = sorted(outputs_aug, key=lambda x: x[0])
if is_regression:
return [
(1.0 - test_time_aug_wgt) * x[1] + test_time_aug_wgt * y[1]
for x, y in zip(outputs, outputs_aug)
]
else:
return [
tf.argmax(
(1.0 - test_time_aug_wgt) * x[1] + test_time_aug_wgt * y[1],
axis=-1,
)
for x, y in zip(outputs, outputs_aug)
]
if is_regression:
return [x[1] for x in outputs]
else:
return [tf.argmax(x[1], axis=-1) for x in outputs]
| 12,471 | 34.634286 | 80 | py |
models | models-master/official/projects/lra/exponential_moving_average.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based MegaEncoder block layer."""
from typing import Optional
import tensorflow as tf
class MultiHeadEMA(tf.keras.layers.Layer):
"""Exponential Moving Average Layer.
See "https://arxiv.org/abs/2209.10655" for more details.
"""
def __init__(
self, embed_dim, ndim=2, bidirectional=False, truncation=None, **kwargs
):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.ndim = ndim
self.bidirectional = bidirectional
self.truncation = truncation
self.scale = tf.math.sqrt(1.0 / self.ndim)
self.kernel_dim = 2 * embed_dim if self.bidirectional else embed_dim
self._kernel = None
self._coeffs = None
def build(self, input_shape):
self.damping_factor = self.add_weight(
shape=(self.kernel_dim, self.ndim, 1),
initializer="random_normal",
trainable=True,
name="damping_factor",
dtype=tf.float32,
)
self.decay_factor = self.add_weight(
shape=(self.kernel_dim, self.ndim, 1),
initializer="random_normal",
trainable=True,
name="decay_factor",
dtype=tf.float32,
)
self.ema_expansion_matrix = self.add_weight(
shape=(self.kernel_dim, self.ndim, 1),
initializer="random_normal",
trainable=True,
name="ema_expansion_matrix",
dtype=tf.float32,
)
self.kernel_projection_matrix = self.add_weight(
shape=(self.kernel_dim, self.ndim),
initializer="random_normal",
trainable=True,
name="kernel_projection_matrix",
dtype=tf.float32,
)
self.residual_weight = self.add_weight(
shape=(self.embed_dim,),
initializer="ones",
trainable=True,
name="residual_weight",
dtype=tf.float32,
)
super().build(input_shape)
def _calc_coeffs(self):
self._coeffs = None
# D x N x 1
damping_factor = tf.math.sigmoid(self.damping_factor)
decay_factor = tf.math.sigmoid(self.decay_factor)
previous_timestep_weight = 1.0 - damping_factor * decay_factor
return damping_factor, previous_timestep_weight
def _compute_kernel(self, length: int):
self._kernel = None
# D x N x 1
damping_factor, previous_timestep_weight = self._calc_coeffs()
# D x N x L
vander = tf.cast(
tf.reshape(tf.range(length), shape=(1, 1, length)),
dtype=damping_factor.dtype,
) * tf.math.log(previous_timestep_weight)
kernel = (damping_factor * self.ema_expansion_matrix) * tf.math.exp(vander)
# D x L
return tf.einsum(
"dnl,dn->dl", kernel, self.kernel_projection_matrix * self.scale
)
def coeffs(self):
if self.training:
return self._calc_coeffs()
else:
if self._coeffs is None:
self._coeffs = self._calc_coeffs()
return self._coeffs
def kernel(self, length: int):
assert self.truncation is None, "WEIRD!"
kernel_size = (
length if self.truncation is None else min(self.truncation, length)
)
return self._compute_kernel(kernel_size)
def call(self, x, padding_mask: Optional[tf.Tensor] = None) -> tf.Tensor:
"""Input shape: Time x Batch x Channel.
Args:
x: Tensor input.
padding_mask (ByteTensor, optional): mask to exclude keys that are pads,
of shape `(batch, src_len)`, where padding elements are indicated by
1s.
Returns:
transformed: transformed Tensor.
"""
seq_len, _, embed_dim = x.shape
assert embed_dim == self.embed_dim
if seq_len is None:
seq_len = 1
# L x B x D
residual = x * self.residual_weight
# L x B x D -> B x D x L
x = tf.transpose(x, perm=(1, 2, 0))
# Masking of the tensor
if padding_mask is not None:
x = x * tf.cast(tf.expand_dims(padding_mask, axis=1), x.dtype)
k = self.kernel(seq_len)
kernel_size = k.shape[1]
fft_len = seq_len
s = 0
if self.bidirectional:
k1, k2 = tf.split(k, [self.embed_dim, self.embed_dim], axis=0)
# D x 2*L-1
padding_l = tf.constant([[0, 0], [kernel_size - 1, 0]])
padding_r = tf.constant([[0, 0], [0, kernel_size - 1]])
padding_x = tf.constant([[0, 0], [0, 0], [kernel_size - 1, 0]])
k = tf.pad(k1, padding_l) + tf.pad(tf.reverse(k2, axis=[-1]), padding_r)
x = tf.pad(x, padding_x)
fft_len = fft_len + kernel_size - 1
s = 2 * kernel_size - 2
k_f = tf.signal.rfft(
k, fft_length=tf.constant([2 * fft_len], dtype=tf.int32)
)
x_f = tf.signal.rfft(
x, fft_length=tf.constant([2 * fft_len], dtype=tf.int32)
)
# B x D x L
out = tf.signal.irfft(
x_f * k_f, fft_length=tf.constant([2 * fft_len], dtype=tf.int32)
)[..., s : s + seq_len]
# B x D x L -> L x B x D
out = tf.nn.silu(tf.transpose(out, perm=(2, 0, 1)) + residual)
return out
| 5,446 | 29.601124 | 79 | py |
models | models-master/official/projects/lra/linformer_encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linformer encoder. Modified From huggingface/transformers."""
# pylint: disable=g-classes-have-attributes
from typing import Any, Callable, Optional, Union
from absl import logging
import tensorflow as tf
import tensorflow_models as tfm
from official.modeling import tf_utils
from official.projects.lra.linformer_encoder_block import LinformerEncoderBlock
layers = tfm.nlp.layers
_Initializer = Union[str, tf.keras.initializers.Initializer]
_approx_gelu = lambda x: tf.keras.activations.gelu(x, approximate=True)
class LinformerEncoder(tf.keras.layers.Layer):
"""LinformerEncoder.
Args:
vocab_size: The size of the token vocabulary.
hidden_size: The size of the transformer hidden layers.
num_layers: The number of transformer layers.
num_attention_heads: The number of attention heads for each transformer. The
hidden size must be divisible by the number of attention heads.
low_rank_features: The number of dimensions for low rank projection.
max_sequence_length: The maximum sequence length that this encoder can
consume. If None, max_sequence_length uses the value from sequence length.
This determines the variable shape for positional embeddings.
type_vocab_size: The number of types that the 'type_ids' input can take.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network for each transformer.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network for each transformer.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: The dropout rate to use for the attention layers within
the transformer layers.
initializer: The initialzer to use for all weights in this encoder.
output_range: The sequence output range, [0, output_range), by slicing the
target sequence of the last transformer layer. `None` means the entire
target sequence will attend to the source sequence, which yields the full
output.
embedding_width: The width of the word embeddings. If the embedding width is
not equal to hidden size, embedding parameters will be factorized into two
matrices in the shape of ['vocab_size', 'embedding_width'] and
['embedding_width', 'hidden_size'] ('embedding_width' is usually much
smaller than 'hidden_size').
embedding_layer: An optional Layer instance which will be called to generate
embeddings for the input word IDs.
norm_first: Whether to normalize inputs to attention and intermediate dense
layers. If set False, output of attention and intermediate dense layers is
normalized.
"""
def __init__(
self,
vocab_size: int,
hidden_size: int = 768,
num_layers: int = 12,
num_attention_heads: int = 12,
low_rank_features: int = 32,
max_sequence_length: int = 512,
type_vocab_size: int = 16,
inner_dim: int = 3072,
inner_activation: Callable[..., Any] = _approx_gelu,
output_dropout: float = 0.1,
attention_dropout: float = 0.1,
initializer: _Initializer = tf.keras.initializers.TruncatedNormal(
stddev=0.02
),
output_range: Optional[int] = None,
embedding_width: Optional[int] = None,
embedding_layer: Optional[tf.keras.layers.Layer] = None,
norm_first: bool = False,
**kwargs
):
super().__init__(**kwargs)
# Linformer args
self._low_rank_features = low_rank_features
activation = tf.keras.activations.get(inner_activation)
initializer = tf.keras.initializers.get(initializer)
if embedding_width is None:
embedding_width = hidden_size
if embedding_layer is None:
self._embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=initializer,
name='word_embeddings',
)
else:
self._embedding_layer = embedding_layer
self._position_embedding_layer = layers.PositionEmbedding(
initializer=initializer,
max_length=max_sequence_length,
name='position_embedding',
)
self._type_embedding_layer = layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=initializer,
use_one_hot=True,
name='type_embeddings',
)
self._embedding_norm_layer = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32
)
self._embedding_dropout = tf.keras.layers.Dropout(
rate=output_dropout, name='embedding_dropout'
)
# We project the 'embedding' output to 'hidden_size' if it is not already
# 'hidden_size'.
self._embedding_projection = None
if embedding_width != hidden_size:
self._embedding_projection = tf.keras.layers.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
kernel_initializer=initializer,
name='embedding_projection',
)
self._transformer_layers = []
self._attention_mask_layer = layers.SelfAttentionMask(
name='self_attention_mask'
)
for i in range(num_layers):
layer = LinformerEncoderBlock(
num_attention_heads=num_attention_heads,
low_rank_features=low_rank_features,
inner_dim=inner_dim,
inner_activation=inner_activation,
output_dropout=output_dropout,
attention_dropout=attention_dropout,
norm_first=norm_first,
return_attention_scores=False,
kernel_initializer=tf_utils.clone_initializer(initializer),
name='transformer/layer_%d' % i,
)
self._transformer_layers.append(layer)
self._num_layers = num_layers
self._pooler_layer = tf.keras.layers.Dense(
units=hidden_size,
activation='tanh',
kernel_initializer=initializer,
name='pooler_transform',
)
self._config = {
'vocab_size': vocab_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'low_rank_features': low_rank_features,
'num_attention_heads': num_attention_heads,
'max_sequence_length': max_sequence_length,
'type_vocab_size': type_vocab_size,
'inner_dim': inner_dim,
'inner_activation': tf.keras.activations.serialize(activation),
'output_dropout': output_dropout,
'attention_dropout': attention_dropout,
'initializer': tf.keras.initializers.serialize(initializer),
'output_range': output_range,
'embedding_width': embedding_width,
'embedding_layer': embedding_layer,
'norm_first': norm_first,
}
self.inputs = dict(
input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
)
def call(self, inputs):
if isinstance(inputs, dict):
word_embeddings = inputs.get('input_word_embeddings', None)
type_ids = inputs.get('input_type_ids', None)
if 'input_word_ids' in inputs.keys():
word_ids = inputs.get('input_word_ids')
mask = inputs.get('input_mask')
elif 'left_word_ids' in inputs.keys():
word_ids = inputs.get('left_word_ids')
mask = inputs.get('left_mask')
elif 'right_word_ids' in inputs.keys():
word_ids = inputs.get('right_word_ids')
mask = inputs.get('right_mask')
dense_inputs = inputs.get('dense_inputs', None)
dense_mask = inputs.get('dense_mask', None)
dense_type_ids = inputs.get('dense_type_ids', None)
elif isinstance(inputs, list):
## Dual Encoder Tasks
word_ids, mask = inputs
word_embeddings = None
type_ids = None
dense_inputs, dense_mask, dense_type_ids = None, None, None
else:
raise ValueError('Unexpected inputs type to %s.' % self.__class__)
if type_ids is None:
type_ids = tf.zeros_like(mask)
if word_embeddings is None:
word_embeddings = self._embedding_layer(word_ids)
if dense_inputs is not None:
mask = tf.concat([mask, dense_mask], axis=1)
embeddings = self._get_embeddings(
word_ids, type_ids, word_embeddings, dense_inputs, dense_type_ids
)
embeddings = self._embedding_norm_layer(embeddings)
embeddings = self._embedding_dropout(embeddings)
if self._embedding_projection is not None:
embeddings = self._embedding_projection(embeddings)
attention_mask = self._attention_mask_layer(embeddings, mask)
encoder_outputs = []
x = embeddings
for layer in self._transformer_layers:
x = layer([x, attention_mask])
encoder_outputs.append(x)
last_encoder_output = encoder_outputs[-1]
first_token_tensor = last_encoder_output[:, 0, :]
pooled_output = self._pooler_layer(first_token_tensor)
output = dict(
sequence_output=encoder_outputs[-1],
pooled_output=pooled_output,
encoder_outputs=encoder_outputs,
)
return output
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_embedding_layer(self):
return self._embedding_layer
def get_config(self):
return dict(self._config)
@classmethod
def from_config(cls, config, custom_objects=None):
if 'embedding_layer' in config and config['embedding_layer'] is not None:
warn_string = (
'You are reloading a model that was saved with a '
'potentially-shared embedding layer object. If you contine to '
'train this model, the embedding layer will no longer be shared. '
'To work around this, load the model outside of the Keras API.'
)
print('WARNING: ' + warn_string)
logging.warn(warn_string)
return cls(**config)
def _get_embeddings(
self,
word_ids: tf.Tensor,
type_ids: tf.Tensor,
word_embeddings: Optional[tf.Tensor],
dense_inputs: Optional[tf.Tensor],
dense_type_ids: Optional[tf.Tensor],
) -> tf.Tensor:
if word_embeddings is None:
word_embeddings = self._embedding_layer(word_ids)
if dense_inputs is not None:
# Concat the dense embeddings at sequence end.
word_embeddings = tf.concat([word_embeddings, dense_inputs], axis=1)
type_ids = tf.concat([type_ids, dense_type_ids], axis=1)
type_embeddings = self._type_embedding_layer(type_ids)
# absolute position embeddings.
position_embeddings = self._position_embedding_layer(word_embeddings)
return word_embeddings + position_embeddings + type_embeddings
| 11,332 | 35.915309 | 80 | py |
models | models-master/official/projects/lra/linformer_experiments.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linformer experiments."""
# pylint: disable=g-doc-return-or-yield,line-too-long
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.nlp.configs import encoders
from official.nlp.data import sentence_prediction_dataloader
from official.nlp.tasks import sentence_prediction
from official.projects.lra import lra_dual_encoder_dataloader
from official.projects.lra import lra_dual_encoder_task
from official.projects.lra.linformer import LinformerEncoderConfig
AdamWeightDecay = optimization.AdamWeightDecayConfig
PolynomialLr = optimization.PolynomialLrConfig
PolynomialWarmupConfig = optimization.PolynomialWarmupConfig
_TRAINER = cfg.TrainerConfig(
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
'adamw': {
'weight_decay_rate': 0.01,
'exclude_from_weight_decay': [
'LayerNorm',
'layer_norm',
'bias',
],
},
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 3e-5,
'end_learning_rate': 0.0,
},
},
'warmup': {'type': 'polynomial'},
})
)
@exp_factory.register_config_factory('linformer/lra_listops')
def linformer_listops() -> cfg.ExperimentConfig:
"""Linformer lra fine-tuning."""
config = cfg.ExperimentConfig(
task=sentence_prediction.SentencePredictionConfig(
model=sentence_prediction.ModelConfig(
encoder=encoders.EncoderConfig(
type='any', any=LinformerEncoderConfig()
)
),
train_data=sentence_prediction_dataloader.SentencePredictionDataConfig(),
validation_data=sentence_prediction_dataloader.SentencePredictionDataConfig(
is_training=False, drop_remainder=False
),
),
trainer=_TRAINER,
)
return config
@exp_factory.register_config_factory('linformer/lra_imdb')
def linformer_imdb() -> cfg.ExperimentConfig:
"""Linformer lra fine-tuning."""
config = cfg.ExperimentConfig(
task=sentence_prediction.SentencePredictionConfig(
model=sentence_prediction.ModelConfig(
encoder=encoders.EncoderConfig(
type='any', any=LinformerEncoderConfig()
)
),
train_data=sentence_prediction_dataloader.SentencePredictionDataConfig(),
validation_data=sentence_prediction_dataloader.SentencePredictionDataConfig(
is_training=False, drop_remainder=False
),
),
trainer=_TRAINER,
)
return config
@exp_factory.register_config_factory('linformer/lra_cifar')
def linformer_cifar() -> cfg.ExperimentConfig:
"""Linformer lra fine-tuning."""
config = cfg.ExperimentConfig(
task=sentence_prediction.SentencePredictionConfig(
model=sentence_prediction.ModelConfig(
encoder=encoders.EncoderConfig(
type='any', any=LinformerEncoderConfig()
)
),
train_data=sentence_prediction_dataloader.SentencePredictionDataConfig(),
validation_data=sentence_prediction_dataloader.SentencePredictionDataConfig(
is_training=False, drop_remainder=False
),
),
trainer=_TRAINER,
)
return config
@exp_factory.register_config_factory('linformer/lra_pathfinder')
def linformer_pathfinder() -> cfg.ExperimentConfig:
"""Linformer lra fine-tuning."""
config = cfg.ExperimentConfig(
task=sentence_prediction.SentencePredictionConfig(
model=sentence_prediction.ModelConfig(
encoder=encoders.EncoderConfig(
type='any', any=LinformerEncoderConfig()
)
),
train_data=sentence_prediction_dataloader.SentencePredictionDataConfig(),
validation_data=sentence_prediction_dataloader.SentencePredictionDataConfig(
is_training=False, drop_remainder=False
),
),
trainer=_TRAINER,
)
return config
@exp_factory.register_config_factory('linformer/lra_aan')
def linformer_aan() -> cfg.ExperimentConfig:
"""Linformer LRA Task."""
config = cfg.ExperimentConfig(
task=lra_dual_encoder_task.DualEncoderConfig(
model=lra_dual_encoder_task.ModelConfig(
encoder=encoders.EncoderConfig(
type='any', any=LinformerEncoderConfig()
)
),
train_data=lra_dual_encoder_dataloader.DualEncoderDataConfig(),
validation_data=lra_dual_encoder_dataloader.DualEncoderDataConfig(
is_training=False, drop_remainder=False
),
),
trainer=_TRAINER,
)
return config
| 5,492 | 34.211538 | 86 | py |
models | models-master/official/projects/lra/linformer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linformer model configurations and instantiation methods."""
import dataclasses
import tensorflow as tf
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.projects.lra.linformer_encoder import LinformerEncoder
@dataclasses.dataclass
class LinformerEncoderConfig(encoders.BertEncoderConfig):
"""Extra paramerters for Linformer configs.
Attributes:
pad_token_id: the token id for the pad token
low_rank_features: number of dimensions for low-rank projection
"""
pad_token_id: int = 0
low_rank_features: int = 256
@base_config.bind(LinformerEncoderConfig)
def get_encoder(encoder_cfg: LinformerEncoderConfig):
"""Gets a 'LinformerEncoder' object.
Args:
encoder_cfg: A 'LinformerEncoderConfig'.
Returns:
A encoder object.
"""
encoder = LinformerEncoder(
vocab_size=encoder_cfg.vocab_size,
hidden_size=encoder_cfg.hidden_size,
num_layers=encoder_cfg.num_layers,
num_attention_heads=encoder_cfg.num_attention_heads,
low_rank_features=encoder_cfg.low_rank_features,
inner_dim=encoder_cfg.intermediate_size,
inner_activation=tf_utils.get_activation(encoder_cfg.hidden_activation),
output_dropout=encoder_cfg.dropout_rate,
attention_dropout=encoder_cfg.attention_dropout_rate,
max_sequence_length=encoder_cfg.max_position_embeddings,
type_vocab_size=encoder_cfg.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range
),
output_range=encoder_cfg.output_range,
embedding_width=encoder_cfg.embedding_size,
norm_first=encoder_cfg.norm_first,
)
return encoder
| 2,362 | 33.246377 | 78 | py |
models | models-master/official/projects/lra/transformer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Longformer model configurations and instantiation methods."""
import dataclasses
import tensorflow as tf
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.projects.lra.transformer_encoder import TransformerEncoder
@dataclasses.dataclass
class TransformerEncoderConfig(encoders.BertEncoderConfig):
"""Extra paramerters for Transformer configs.
Attributes: For in-place usage only
"""
@base_config.bind(TransformerEncoderConfig)
def get_encoder(encoder_cfg: TransformerEncoderConfig):
"""Gets a 'TransformerEncoder' object.
Args:
encoder_cfg: A 'TransformerEncoderConfig'.
Returns:
A encoder object.
"""
encoder = TransformerEncoder(
vocab_size=encoder_cfg.vocab_size,
hidden_size=encoder_cfg.hidden_size,
num_layers=encoder_cfg.num_layers,
num_attention_heads=encoder_cfg.num_attention_heads,
inner_dim=encoder_cfg.intermediate_size,
inner_activation=tf_utils.get_activation(encoder_cfg.hidden_activation),
output_dropout=encoder_cfg.dropout_rate,
attention_dropout=encoder_cfg.attention_dropout_rate,
max_sequence_length=encoder_cfg.max_position_embeddings,
type_vocab_size=encoder_cfg.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range
),
output_range=encoder_cfg.output_range,
embedding_width=encoder_cfg.embedding_size,
norm_first=encoder_cfg.norm_first,
)
return encoder
| 2,176 | 34.112903 | 78 | py |
models | models-master/official/projects/lra/mega_experiments.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mega experiments."""
# pylint: disable=g-doc-return-or-yield,line-too-long
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.nlp.configs import encoders
from official.nlp.data import sentence_prediction_dataloader
from official.nlp.tasks import sentence_prediction
from official.projects.lra import lra_dual_encoder_dataloader
from official.projects.lra import lra_dual_encoder_task
from official.projects.lra.mega import MegaEncoderConfig
AdamWeightDecay = optimization.AdamWeightDecayConfig
PolynomialLr = optimization.PolynomialLrConfig
PolynomialWarmupConfig = optimization.PolynomialWarmupConfig
_TRAINER = cfg.TrainerConfig(
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
'adamw': {
'weight_decay_rate': 0.01,
'exclude_from_weight_decay': [
'LayerNorm',
'layer_norm',
'bias',
],
},
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 1e-7,
'end_learning_rate': 0.0,
},
},
'warmup': {'type': 'polynomial'},
})
)
@exp_factory.register_config_factory('mega/lra_listops')
def mega_listops() -> cfg.ExperimentConfig:
"""Mega lra fine-tuning."""
config = cfg.ExperimentConfig(
task=sentence_prediction.SentencePredictionConfig(
model=sentence_prediction.ModelConfig(
encoder=encoders.EncoderConfig(
type='any', any=MegaEncoderConfig()
)
),
train_data=sentence_prediction_dataloader.SentencePredictionDataConfig(),
validation_data=sentence_prediction_dataloader.SentencePredictionDataConfig(
is_training=False, drop_remainder=False
),
),
trainer=_TRAINER,
)
return config
@exp_factory.register_config_factory('mega/lra_imdb')
def mega_imdb() -> cfg.ExperimentConfig:
"""Mega lra fine-tuning."""
config = cfg.ExperimentConfig(
task=sentence_prediction.SentencePredictionConfig(
model=sentence_prediction.ModelConfig(
encoder=encoders.EncoderConfig(
type='any', any=MegaEncoderConfig()
)
),
train_data=sentence_prediction_dataloader.SentencePredictionDataConfig(),
validation_data=sentence_prediction_dataloader.SentencePredictionDataConfig(
is_training=False, drop_remainder=False
),
),
trainer=_TRAINER,
)
return config
@exp_factory.register_config_factory('mega/lra_cifar')
def mega_cifar() -> cfg.ExperimentConfig:
"""Mega lra fine-tuning."""
config = cfg.ExperimentConfig(
task=sentence_prediction.SentencePredictionConfig(
model=sentence_prediction.ModelConfig(
encoder=encoders.EncoderConfig(
type='any', any=MegaEncoderConfig()
)
),
train_data=sentence_prediction_dataloader.SentencePredictionDataConfig(),
validation_data=sentence_prediction_dataloader.SentencePredictionDataConfig(
is_training=False, drop_remainder=False
),
),
trainer=_TRAINER,
)
return config
@exp_factory.register_config_factory('mega/lra_pathfinder')
def mega_pathfinder() -> cfg.ExperimentConfig:
"""Mega lra fine-tuning."""
config = cfg.ExperimentConfig(
task=sentence_prediction.SentencePredictionConfig(
model=sentence_prediction.ModelConfig(
encoder=encoders.EncoderConfig(
type='any', any=MegaEncoderConfig()
)
),
train_data=sentence_prediction_dataloader.SentencePredictionDataConfig(),
validation_data=sentence_prediction_dataloader.SentencePredictionDataConfig(
is_training=False, drop_remainder=False
),
),
trainer=_TRAINER,
)
return config
@exp_factory.register_config_factory('mega/lra_aan')
def mega_aan() -> cfg.ExperimentConfig:
"""Mega LRA task."""
config = cfg.ExperimentConfig(
task=lra_dual_encoder_task.DualEncoderConfig(
model=lra_dual_encoder_task.ModelConfig(
encoder=encoders.EncoderConfig(
type='any', any=MegaEncoderConfig()
)
),
train_data=lra_dual_encoder_dataloader.DualEncoderDataConfig(),
validation_data=lra_dual_encoder_dataloader.DualEncoderDataConfig(
is_training=False, drop_remainder=False
),
),
trainer=_TRAINER,
)
return config
| 5,375 | 33.909091 | 86 | py |
models | models-master/official/projects/lra/mega_encoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.projects.lra.mega_encoder."""
import numpy as np
import tensorflow as tf
from official.projects.lra import mega_encoder
class MegaEncoderTest(tf.test.TestCase):
def test_encoder(self):
sequence_length = 1024
batch_size = 2
vocab_size = 1024
network = mega_encoder.MegaEncoder(
num_layers=1,
vocab_size=1024,
max_sequence_length=4096,
)
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length)
)
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(2, size=(batch_size, sequence_length))
outputs = network({
"input_word_ids": word_id_data,
"input_mask": mask_data,
"input_type_ids": type_id_data,
})
self.assertEqual(
outputs["sequence_output"].shape,
(batch_size, sequence_length, 128),
)
if __name__ == "__main__":
tf.test.main()
| 1,574 | 29.288462 | 75 | py |
models | models-master/official/projects/lra/transformer_experiments.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer experiments."""
# pylint: disable=g-doc-return-or-yield,line-too-long
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.nlp.configs import encoders
from official.nlp.data import sentence_prediction_dataloader
from official.nlp.tasks import sentence_prediction
from official.projects.lra import lra_dual_encoder_dataloader
from official.projects.lra import lra_dual_encoder_task
from official.projects.lra.transformer import TransformerEncoderConfig
AdamWeightDecay = optimization.AdamWeightDecayConfig
PolynomialLr = optimization.PolynomialLrConfig
PolynomialWarmupConfig = optimization.PolynomialWarmupConfig
_TRAINER = cfg.TrainerConfig(
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
'adamw': {
'weight_decay_rate': 0.01,
'exclude_from_weight_decay': [
'LayerNorm',
'layer_norm',
'bias',
],
},
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 3e-5,
'end_learning_rate': 0.0,
},
},
'warmup': {'type': 'polynomial'},
})
)
@exp_factory.register_config_factory('transformer/lra_listops')
def transformer_listops() -> cfg.ExperimentConfig:
"""Transformer lra fine-tuning."""
config = cfg.ExperimentConfig(
task=sentence_prediction.SentencePredictionConfig(
model=sentence_prediction.ModelConfig(
encoder=encoders.EncoderConfig(
type='any', any=TransformerEncoderConfig()
)
),
train_data=sentence_prediction_dataloader.SentencePredictionDataConfig(),
validation_data=sentence_prediction_dataloader.SentencePredictionDataConfig(
is_training=False, drop_remainder=False
),
),
trainer=_TRAINER,
)
return config
@exp_factory.register_config_factory('transformer/lra_imdb')
def transformer_imdb() -> cfg.ExperimentConfig:
"""Transformer lra fine-tuning."""
config = cfg.ExperimentConfig(
task=sentence_prediction.SentencePredictionConfig(
model=sentence_prediction.ModelConfig(
encoder=encoders.EncoderConfig(
type='any', any=TransformerEncoderConfig()
)
),
train_data=sentence_prediction_dataloader.SentencePredictionDataConfig(),
validation_data=sentence_prediction_dataloader.SentencePredictionDataConfig(
is_training=False, drop_remainder=False
),
),
trainer=_TRAINER,
)
return config
@exp_factory.register_config_factory('transformer/lra_cifar')
def transformer_cifar() -> cfg.ExperimentConfig:
"""Transformer lra fine-tuning."""
config = cfg.ExperimentConfig(
task=sentence_prediction.SentencePredictionConfig(
model=sentence_prediction.ModelConfig(
encoder=encoders.EncoderConfig(
type='any', any=TransformerEncoderConfig()
)
),
train_data=sentence_prediction_dataloader.SentencePredictionDataConfig(),
validation_data=sentence_prediction_dataloader.SentencePredictionDataConfig(
is_training=False, drop_remainder=False
),
),
trainer=_TRAINER,
)
return config
@exp_factory.register_config_factory('transformer/lra_pathfinder')
def transformer_pathfinder() -> cfg.ExperimentConfig:
"""Transformer lra fine-tuning."""
config = cfg.ExperimentConfig(
task=sentence_prediction.SentencePredictionConfig(
model=sentence_prediction.ModelConfig(
encoder=encoders.EncoderConfig(
type='any', any=TransformerEncoderConfig()
)
),
train_data=sentence_prediction_dataloader.SentencePredictionDataConfig(),
validation_data=sentence_prediction_dataloader.SentencePredictionDataConfig(
is_training=False, drop_remainder=False
),
),
trainer=_TRAINER,
)
return config
@exp_factory.register_config_factory('transformer/lra_aan')
def transformer_aan() -> cfg.ExperimentConfig:
"""Transformer lra fine-tuning."""
config = cfg.ExperimentConfig(
task=lra_dual_encoder_task.DualEncoderConfig(
model=lra_dual_encoder_task.ModelConfig(
encoder=encoders.EncoderConfig(
type='any', any=TransformerEncoderConfig()
)
),
train_data=lra_dual_encoder_dataloader.DualEncoderDataConfig(),
validation_data=lra_dual_encoder_dataloader.DualEncoderDataConfig(
is_training=False, drop_remainder=False
),
),
trainer=_TRAINER,
)
return config
| 5,544 | 34.774194 | 86 | py |
models | models-master/official/projects/lra/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A customized training library for the specific task."""
from absl import app
from absl import flags
import gin
from official.common import distribute_utils
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
from official.projects.lra import linformer_experiments # pylint:disable=unused-import
from official.projects.lra import mega_experiments # pylint:disable=unused-import
from official.projects.lra import transformer_experiments # pylint:disable=unused-import
FLAGS = flags.FLAGS
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu,
**params.runtime.model_parallelism()
)
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir,
)
train_utils.save_gin_config(FLAGS.mode, model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(main)
| 2,742 | 35.573333 | 89 | py |
models | models-master/official/projects/lra/mega.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mega model configurations and instantiation methods."""
import dataclasses
import tensorflow as tf
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.projects.lra.mega_encoder import MegaEncoder
@dataclasses.dataclass
class MegaEncoderConfig(encoders.BertEncoderConfig):
"""Extra paramerters for Mega configs.
Attributes:
pad_token_id: the token id for the pad token
low_rank_features: number of dimensions for low-rank projection
"""
zdim: int = 64
hdim: int = 256
ndim: int = 16
activation: str = 'silu'
bidirectional: bool = False
dropout: float = 0.0
hidden_dropout: float = 0.0
@base_config.bind(MegaEncoderConfig)
def get_encoder(encoder_cfg: MegaEncoderConfig):
"""Gets a 'MegaEncoder' object.
Args:
encoder_cfg: A 'MegaEncoderConfig'.
Returns:
A encoder object.
"""
encoder = MegaEncoder(
vocab_size=encoder_cfg.vocab_size,
hidden_size=encoder_cfg.hidden_size,
num_layers=encoder_cfg.num_layers,
zdim=encoder_cfg.zdim,
hdim=encoder_cfg.hdim,
ndim=encoder_cfg.ndim,
activation=encoder_cfg.activation,
bidirectional=encoder_cfg.bidirectional,
dropout=encoder_cfg.dropout,
hidden_dropout=encoder_cfg.hidden_dropout,
inner_activation=tf_utils.get_activation(encoder_cfg.hidden_activation),
attention_dropout=encoder_cfg.attention_dropout_rate,
max_sequence_length=encoder_cfg.max_position_embeddings,
type_vocab_size=encoder_cfg.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range
),
output_range=encoder_cfg.output_range,
embedding_width=encoder_cfg.embedding_size,
norm_first=encoder_cfg.norm_first,
)
return encoder
| 2,470 | 31.090909 | 78 | py |
models | models-master/official/projects/lra/mega_encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mega encoder. Modified From huggingface/transformers."""
# pylint: disable=g-classes-have-attributes
from typing import Any, Callable, Optional, Union
from absl import logging
import tensorflow as tf
import tensorflow_models as tfm
from official.modeling import tf_utils
from official.projects.lra.moving_average_gated_attention import MovingAverageGatedAttention
layers = tfm.nlp.layers
_Initializer = Union[str, tf.keras.initializers.Initializer]
_approx_gelu = lambda x: tf.keras.activations.gelu(x, approximate=True)
@tf.keras.utils.register_keras_serializable(package='Text')
class MegaEncoder(tf.keras.layers.Layer):
"""MegaEncoder.
Args:
vocab_size: The size of the token vocabulary.
embedding_width: The number of embedding dimensions.
intermediate_size: The number of dimension for MLP layers.
num_layers: The number of transformer layers.
max_sequence_length: The maximum sequence length that this encoder can
consume. If None, max_sequence_length uses the value from sequence length.
This determines the variable shape for positional embeddings.
type_vocab_size: The number of types that the 'type_ids' input can take.
zdim: hidden dimension for gates used in MEGA Layer.
hdim: hidden dimension used in MEGA Layer.
ndim: number of EMA used in MEGA layer.
activation: The activation for the first Dense layer in a two-layer
feedforward network for each transformer.
bidirectional: Whether to use bidirectional EMA.
dropout: Dropout probability for the post-attention and output dropout.
attention_dropout: The dropout rate to use for the attention layers within
the transformer layers.
hidden_dropout: The dropout rate to use for hidden states in MEGA.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network for each transformer.
initializer: The initialzer to use for all weights in this encoder.
output_range: The sequence output range, [0, output_range), by slicing the
target sequence of the last transformer layer. `None` means the entire
target sequence will attend to the source sequence, which yields the full
output.
embedding_layer: An optional Layer instance which will be called to generate
embeddings for the input word IDs.
norm_first: Whether to normalize inputs to attention and intermediate dense
layers. If set False, output of attention and intermediate dense layers is
normalized.
"""
def __init__(
self,
vocab_size: int,
embedding_width: int = 128,
intermediate_size: int = 256,
num_layers: int = 12,
max_sequence_length: int = 512,
type_vocab_size: int = 16,
zdim: int = 64,
hdim: int = 256,
ndim: int = 16,
activation='silu',
bidirectional=False,
dropout: float = 0.0,
attention_dropout: float = 0.0,
hidden_dropout: float = 0.0,
inner_activation: Callable[..., Any] = _approx_gelu,
initializer: _Initializer = tf.keras.initializers.TruncatedNormal(
stddev=0.02
),
output_range: Optional[int] = None,
embedding_layer: Optional[tf.keras.layers.Layer] = None,
norm_first: bool = False,
hidden_size: Optional[int] = None,
**kwargs
):
super().__init__(**kwargs)
# Mega args
initializer = tf.keras.initializers.get(initializer)
if embedding_layer is None:
self._embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=initializer,
name='word_embeddings',
)
else:
self._embedding_layer = embedding_layer
self._position_embedding_layer = layers.PositionEmbedding(
initializer=initializer,
max_length=max_sequence_length,
name='position_embedding',
)
self._type_embedding_layer = layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=initializer,
use_one_hot=True,
name='type_embeddings',
)
self._embedding_norm_layer = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32
)
self._embedding_dropout = tf.keras.layers.Dropout(
rate=dropout, name='embedding_dropout'
)
self._transformer_layers = []
self._attention_mask_layer = layers.SelfAttentionMask(
name='self_attention_mask'
)
for _ in range(num_layers):
layer = MovingAverageGatedAttention(
embed_dim=embedding_width,
zdim=zdim,
hdim=hdim,
ndim=ndim,
intermediate_size=intermediate_size,
inner_activation=inner_activation,
dropout=dropout,
attention_dropout=attention_dropout,
hidden_dropout=hidden_dropout,
activation=activation,
bidirectional=bidirectional,
prenorm=norm_first,
max_positions=max_sequence_length,
use_bias=True,
return_attention_scores=False,
kernel_initializer=tf_utils.clone_initializer(initializer),
)
self._transformer_layers.append(layer)
self._num_layers = num_layers
self._pooler_layer = tf.keras.layers.Dense(
units=embedding_width,
activation='silu',
kernel_initializer=initializer,
name='pooler_transform',
)
self._config = {
'vocab_size': vocab_size,
'num_layers': num_layers,
'max_sequence_length': max_sequence_length,
'type_vocab_size': type_vocab_size,
'zdim': zdim,
'hdim': hdim,
'ndim': ndim,
'activation': activation,
'bidirectional': bidirectional,
'dropout': dropout,
'attention_dropout': attention_dropout,
'hidden_dropout': hidden_dropout,
'inner_activation': tf.keras.activations.serialize(inner_activation),
'initializer': tf.keras.initializers.serialize(initializer),
'output_range': output_range,
'embedding_width': embedding_width,
'embedding_layer': embedding_layer,
'norm_first': norm_first,
}
self.inputs = dict(
input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
)
def call(self, inputs):
word_embeddings = None
if isinstance(inputs, dict):
if 'input_word_ids' in inputs.keys():
word_ids = inputs.get('input_word_ids')
mask = inputs.get('input_mask')
type_ids = inputs.get('input_type_ids', None)
word_embeddings = inputs.get('input_word_embeddings', None)
elif 'left_word_ids' in inputs.keys():
word_ids = inputs.get('left_word_ids')
mask = inputs.get('left_mask')
elif 'right_word_ids' in inputs.keys():
word_ids = inputs.get('right_word_ids')
mask = inputs.get('right_mask')
dense_inputs = inputs.get('dense_inputs', None)
dense_mask = inputs.get('dense_mask', None)
elif isinstance(inputs, list):
## Dual Encoder Tasks
word_ids, mask = inputs
type_ids = None
dense_inputs, dense_mask = None, None
else:
raise ValueError('Unexpected inputs type to %s.' % self.__class__)
if type_ids is None:
type_ids = tf.zeros_like(mask)
if word_embeddings is None:
word_embeddings = self._embedding_layer(word_ids)
if dense_inputs is not None:
mask = tf.concat([mask, dense_mask], axis=1)
embeddings = self._embedding_norm_layer(word_embeddings)
embeddings = self._embedding_dropout(embeddings)
encoder_outputs = []
x = embeddings
for l in range(self._num_layers):
if x.shape[0] is None:
pass
else:
x = self._transformer_layers[l]([x, mask])
encoder_outputs.append(x)
last_encoder_output = encoder_outputs[-1]
avg_token_tensor = tf.math.reduce_mean(last_encoder_output, axis=1)
pooled_output = self._pooler_layer(avg_token_tensor)
output = dict(
sequence_output=encoder_outputs[-1],
pooled_output=pooled_output,
encoder_outputs=encoder_outputs,
)
return output
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_embedding_layer(self):
return self._embedding_layer
def get_config(self):
return dict(self._config)
@property
def transformer_layers(self):
"""List of Transformer layers in the encoder."""
return self._transformer_layers
@property
def pooler_layer(self):
"""The pooler dense layer after the transformer layers."""
return self._pooler_layer
@classmethod
def from_config(cls, config, custom_objects=None):
if 'embedding_layer' in config and config['embedding_layer'] is not None:
warn_string = (
'You are reloading a model that was saved with a '
'potentially-shared embedding layer object. If you contine to '
'train this model, the embedding layer will no longer be shared. '
'To work around this, load the model outside of the Keras API.'
)
print('WARNING: ' + warn_string)
logging.warn(warn_string)
return cls(**config)
def _get_embeddings(
self,
word_ids: tf.Tensor,
type_ids: tf.Tensor,
word_embeddings: Optional[tf.Tensor],
dense_inputs: Optional[tf.Tensor],
dense_type_ids: Optional[tf.Tensor],
) -> tf.Tensor:
if word_embeddings is None:
word_embeddings = self._embedding_layer(word_ids)
if dense_inputs is not None:
# Concat the dense embeddings at sequence end.
word_embeddings = tf.concat([word_embeddings, dense_inputs], axis=1)
type_ids = tf.concat([type_ids, dense_type_ids], axis=1)
type_embeddings = self._type_embedding_layer(type_ids)
# absolute position embeddings.
position_embeddings = self._position_embedding_layer(word_embeddings)
return word_embeddings + position_embeddings + type_embeddings
| 10,751 | 34.368421 | 92 | py |
models | models-master/official/projects/lra/lra_dual_encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trainer network for dual encoder style models."""
# pylint: disable=g-classes-have-attributes
import collections
import tensorflow as tf
import tensorflow_models as tfm
@tf.keras.utils.register_keras_serializable(package='Text')
class LRADualEncoder(tf.keras.layers.Layer):
"""A dual encoder model based on a transformer-based encoder.
This is an implementation of the dual encoder network structure based on the
transfomer stack, as described in ["Language-agnostic BERT Sentence
Embedding"](https://arxiv.org/abs/2007.01852)
The DualEncoder allows a user to pass in a transformer stack, and build a dual
encoder model based on the transformer stack.
Args:
network: A transformer network which should output an encoding output.
max_seq_length: The maximum allowed sequence length for transformer.
normalize: If set to True, normalize the encoding produced by transfomer.
logit_scale: The scaling factor of dot products when doing training.
logit_margin: The margin between positive and negative when doing training.
output: The output style for this network. Can be either `logits` or
`predictions`. If set to `predictions`, it will output the embedding
producted by transformer network.
"""
def __init__(
self,
network,
num_classes,
max_seq_length,
dropout_rate=0.1,
initializer='glorot_uniform',
use_encoder_pooler=True,
inner_dim=None,
head_name='dual_encode',
**kwargs
):
super().__init__(**kwargs)
config_dict = {
'network': network,
'num_classes': num_classes,
'head_name': head_name,
'max_seq_length': max_seq_length,
'initializer': initializer,
'use_encoder_pooler': use_encoder_pooler,
'inner_dim': inner_dim,
}
# We are storing the config dict as a namedtuple here to ensure checkpoint
# compatibility with an earlier version of this model which did not track
# the config dict attribute. TF does not track immutable attrs which
# do not contain Trackables, so by creating a config namedtuple instead of
# a dict we avoid tracking it.
config_cls = collections.namedtuple('Config', config_dict.keys())
self._config = config_cls(**config_dict)
self._use_encoder_pooler = use_encoder_pooler
self.network = network
self.classifier = tfm.nlp.layers.ClassificationHead(
inner_dim=0 if use_encoder_pooler else inner_dim,
num_classes=num_classes,
initializer=initializer,
dropout_rate=dropout_rate,
name=head_name,
)
def call(self, inputs):
if isinstance(inputs, dict):
left_word_ids = inputs.get('left_word_ids')
left_mask = inputs.get('left_mask')
right_word_ids = inputs.get('right_word_ids')
right_mask = inputs.get('right_mask')
else:
raise ValueError('Unexpected inputs type to %s.' % self.__class__)
inputs = [left_word_ids, left_mask, right_word_ids, right_mask]
left_inputs = [left_word_ids, left_mask]
left_outputs = self.network(left_inputs)
right_inputs = [right_word_ids, right_mask]
right_outputs = self.network(right_inputs)
if self._use_encoder_pooler:
# Because we have a copy of inputs to create this Model object, we can
# invoke the Network object with its own input tensors to start the Model.
if isinstance(left_outputs, list):
left_cls_inputs = left_outputs[1]
right_cls_inputs = right_outputs[1]
else:
left_cls_inputs = left_outputs['pooled_output']
right_cls_inputs = right_outputs['pooled_output']
else:
if isinstance(left_outputs, list):
left_cls_inputs = left_outputs[0]
right_cls_inputs = right_outputs[0]
else:
left_cls_inputs = left_outputs['sequence_output']
right_cls_inputs = right_outputs['sequence_output']
cls_inputs = tf.concat([left_cls_inputs, right_cls_inputs], -1)
predictions = self.classifier(cls_inputs)
return predictions
def get_config(self):
return dict(self._config._asdict())
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(encoder=self.network)
return items
| 4,977 | 35.602941 | 80 | py |
models | models-master/official/projects/mobilebert/export_tfhub.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script to export the MobileBERT encoder model as a TF-Hub SavedModel."""
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from official.projects.mobilebert import model_utils
FLAGS = flags.FLAGS
flags.DEFINE_string(
"bert_config_file", None,
"Bert configuration file to define core mobilebert layers.")
flags.DEFINE_string("model_checkpoint_path", None,
"File path to TF model checkpoint.")
flags.DEFINE_string("export_path", None, "TF-Hub SavedModel destination path.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_bool("do_lower_case", True, "Whether to lowercase.")
def create_mobilebert_model(bert_config):
"""Creates a model for exporting to tfhub."""
pretrainer = model_utils.create_mobilebert_pretrainer(bert_config)
encoder = pretrainer.encoder_network
encoder_inputs_dict = {x.name: x for x in encoder.inputs}
encoder_output_dict = encoder(encoder_inputs_dict)
# For interchangeability with other text representations,
# add "default" as an alias for MobileBERT's whole-input reptesentations.
encoder_output_dict["default"] = encoder_output_dict["pooled_output"]
core_model = tf.keras.Model(
inputs=encoder_inputs_dict, outputs=encoder_output_dict)
pretrainer_inputs_dict = {x.name: x for x in pretrainer.inputs}
pretrainer_output_dict = pretrainer(pretrainer_inputs_dict)
mlm_model = tf.keras.Model(
inputs=pretrainer_inputs_dict, outputs=pretrainer_output_dict)
# Set `_auto_track_sub_layers` to False, so that the additional weights
# from `mlm` sub-object will not be included in the core model.
# TODO(b/169210253): Use public API after the bug is resolved.
core_model._auto_track_sub_layers = False # pylint: disable=protected-access
core_model.mlm = mlm_model
return core_model, pretrainer
def export_bert_tfhub(bert_config, model_checkpoint_path, hub_destination,
vocab_file, do_lower_case):
"""Restores a tf.keras.Model and saves for TF-Hub."""
core_model, pretrainer = create_mobilebert_model(bert_config)
checkpoint = tf.train.Checkpoint(**pretrainer.checkpoint_items)
logging.info("Begin to load model")
checkpoint.restore(model_checkpoint_path).assert_existing_objects_matched()
logging.info("Loading model finished")
core_model.vocab_file = tf.saved_model.Asset(vocab_file)
core_model.do_lower_case = tf.Variable(do_lower_case, trainable=False)
logging.info("Begin to save files for tfhub at %s", hub_destination)
core_model.save(hub_destination, include_optimizer=False, save_format="tf")
logging.info("tfhub files exported!")
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
bert_config = model_utils.BertConfig.from_json_file(FLAGS.bert_config_file)
export_bert_tfhub(bert_config, FLAGS.model_checkpoint_path, FLAGS.export_path,
FLAGS.vocab_file, FLAGS.do_lower_case)
if __name__ == "__main__":
app.run(main)
| 3,689 | 41.413793 | 80 | py |
models | models-master/official/projects/mobilebert/model_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checkpoint converter for Mobilebert."""
import copy
import json
import tensorflow.compat.v1 as tf
from official.modeling import tf_utils
from official.nlp.modeling import layers
from official.nlp.modeling import models
from official.nlp.modeling import networks
class BertConfig(object):
"""Configuration for `BertModel`."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02,
embedding_size=None,
trigram_input=False,
use_bottleneck=False,
intra_bottleneck_size=None,
use_bottleneck_attention=False,
key_query_shared_bottleneck=False,
num_feedforward_networks=1,
normalization_type="layer_norm",
classifier_activation=True):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
embedding_size: The size of the token embedding.
trigram_input: Use a convolution of trigram as input.
use_bottleneck: Use the bottleneck/inverted-bottleneck structure in BERT.
intra_bottleneck_size: The hidden size in the bottleneck.
use_bottleneck_attention: Use attention inputs from the bottleneck
transformation.
key_query_shared_bottleneck: Use the same linear transformation for
query&key in the bottleneck.
num_feedforward_networks: Number of FFNs in a block.
normalization_type: The normalization type in BERT.
classifier_activation: Using the tanh activation for the final
representation of the [CLS] token in fine-tuning.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.embedding_size = embedding_size
self.trigram_input = trigram_input
self.use_bottleneck = use_bottleneck
self.intra_bottleneck_size = intra_bottleneck_size
self.use_bottleneck_attention = use_bottleneck_attention
self.key_query_shared_bottleneck = key_query_shared_bottleneck
self.num_feedforward_networks = num_feedforward_networks
self.normalization_type = normalization_type
self.classifier_activation = classifier_activation
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in json_object.items():
config.__dict__[key] = value
if config.embedding_size is None:
config.embedding_size = config.hidden_size
if config.intra_bottleneck_size is None:
config.intra_bottleneck_size = config.hidden_size
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def create_mobilebert_pretrainer(bert_config):
"""Creates a BertPretrainerV2 that wraps MobileBERTEncoder model."""
mobilebert_encoder = networks.MobileBERTEncoder(
word_vocab_size=bert_config.vocab_size,
word_embed_size=bert_config.embedding_size,
type_vocab_size=bert_config.type_vocab_size,
max_sequence_length=bert_config.max_position_embeddings,
num_blocks=bert_config.num_hidden_layers,
hidden_size=bert_config.hidden_size,
num_attention_heads=bert_config.num_attention_heads,
intermediate_size=bert_config.intermediate_size,
intermediate_act_fn=tf_utils.get_activation(bert_config.hidden_act),
hidden_dropout_prob=bert_config.hidden_dropout_prob,
attention_probs_dropout_prob=bert_config.attention_probs_dropout_prob,
intra_bottleneck_size=bert_config.intra_bottleneck_size,
initializer_range=bert_config.initializer_range,
use_bottleneck_attention=bert_config.use_bottleneck_attention,
key_query_shared_bottleneck=bert_config.key_query_shared_bottleneck,
num_feedforward_networks=bert_config.num_feedforward_networks,
normalization_type=bert_config.normalization_type,
classifier_activation=bert_config.classifier_activation)
masked_lm = layers.MobileBertMaskedLM(
embedding_table=mobilebert_encoder.get_embedding_table(),
activation=tf_utils.get_activation(bert_config.hidden_act),
initializer=tf.keras.initializers.TruncatedNormal(
stddev=bert_config.initializer_range),
name="cls/predictions")
pretrainer = models.BertPretrainerV2(
encoder_network=mobilebert_encoder, customized_masked_lm=masked_lm)
# Makes sure the pretrainer variables are created.
_ = pretrainer(pretrainer.inputs)
return pretrainer
| 7,470 | 42.690058 | 80 | py |
models | models-master/official/projects/mobilebert/tf2_model_checkpoint_converter.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checkpoint converter for Mobilebert."""
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from official.projects.mobilebert import model_utils
FLAGS = flags.FLAGS
flags.DEFINE_string(
"bert_config_file", None,
"Bert configuration file to define core mobilebert layers.")
flags.DEFINE_string("tf1_checkpoint_path", None,
"Path to load tf1 checkpoint.")
flags.DEFINE_string("tf2_checkpoint_path", None,
"Path to save tf2 checkpoint.")
flags.DEFINE_boolean("use_model_prefix", False,
("If use model name as prefix for variables. Turn this"
"flag on when the converted checkpoint is used for model"
"in subclass implementation, which uses the model name as"
"prefix for all variable names."))
def _bert_name_replacement(var_name, name_replacements):
"""Gets the variable name replacement."""
for src_pattern, tgt_pattern in name_replacements:
if src_pattern in var_name:
old_var_name = var_name
var_name = var_name.replace(src_pattern, tgt_pattern)
logging.info("Converted: %s --> %s", old_var_name, var_name)
return var_name
def _has_exclude_patterns(name, exclude_patterns):
"""Checks if a string contains substrings that match patterns to exclude."""
for p in exclude_patterns:
if p in name:
return True
return False
def _get_permutation(name, permutations):
"""Checks whether a variable requires transposition by pattern matching."""
for src_pattern, permutation in permutations:
if src_pattern in name:
logging.info("Permuted: %s --> %s", name, permutation)
return permutation
return None
def _get_new_shape(name, shape, num_heads):
"""Checks whether a variable requires reshape by pattern matching."""
if "attention/attention_output/kernel" in name:
return tuple([num_heads, shape[0] // num_heads, shape[1]])
if "attention/attention_output/bias" in name:
return shape
patterns = [
"attention/query", "attention/value", "attention/key"
]
for pattern in patterns:
if pattern in name:
if "kernel" in name:
return tuple([shape[0], num_heads, shape[1] // num_heads])
if "bias" in name:
return tuple([num_heads, shape[0] // num_heads])
return None
def convert(checkpoint_from_path,
checkpoint_to_path,
name_replacements,
permutations,
bert_config,
exclude_patterns=None):
"""Migrates the names of variables within a checkpoint.
Args:
checkpoint_from_path: Path to source checkpoint to be read in.
checkpoint_to_path: Path to checkpoint to be written out.
name_replacements: A list of tuples of the form (match_str, replace_str)
describing variable names to adjust.
permutations: A list of tuples of the form (match_str, permutation)
describing permutations to apply to given variables. Note that match_str
should match the original variable name, not the replaced one.
bert_config: A `BertConfig` to create the core model.
exclude_patterns: A list of string patterns to exclude variables from
checkpoint conversion.
Returns:
A dictionary that maps the new variable names to the Variable objects.
A dictionary that maps the old variable names to the new variable names.
"""
last_ffn_layer_id = str(bert_config.num_feedforward_networks - 1)
name_replacements = [
(x[0], x[1].replace("LAST_FFN_LAYER_ID", last_ffn_layer_id))
for x in name_replacements
]
output_dir, _ = os.path.split(checkpoint_to_path)
tf.io.gfile.makedirs(output_dir)
# Create a temporary V1 name-converted checkpoint in the output directory.
temporary_checkpoint_dir = os.path.join(output_dir, "temp_v1")
temporary_checkpoint = os.path.join(temporary_checkpoint_dir, "ckpt")
with tf.Graph().as_default():
logging.info("Reading checkpoint_from_path %s", checkpoint_from_path)
reader = tf.train.NewCheckpointReader(checkpoint_from_path)
name_shape_map = reader.get_variable_to_shape_map()
new_variable_map = {}
conversion_map = {}
for var_name in name_shape_map:
if exclude_patterns and _has_exclude_patterns(var_name, exclude_patterns):
continue
# Get the original tensor data.
tensor = reader.get_tensor(var_name)
# Look up the new variable name, if any.
new_var_name = _bert_name_replacement(var_name, name_replacements)
# See if we need to reshape the underlying tensor.
new_shape = None
if bert_config.num_attention_heads > 0:
new_shape = _get_new_shape(new_var_name, tensor.shape,
bert_config.num_attention_heads)
if new_shape:
logging.info("Veriable %s has a shape change from %s to %s",
var_name, tensor.shape, new_shape)
tensor = np.reshape(tensor, new_shape)
# See if we need to permute the underlying tensor.
permutation = _get_permutation(var_name, permutations)
if permutation:
tensor = np.transpose(tensor, permutation)
# Create a new variable with the possibly-reshaped or transposed tensor.
var = tf.Variable(tensor, name=var_name)
# Save the variable into the new variable map.
new_variable_map[new_var_name] = var
# Keep a list of converter variables for sanity checking.
if new_var_name != var_name:
conversion_map[var_name] = new_var_name
saver = tf.train.Saver(new_variable_map)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
logging.info("Writing checkpoint_to_path %s", temporary_checkpoint)
saver.save(sess, temporary_checkpoint, write_meta_graph=False)
logging.info("Summary:")
logging.info("Converted %d variable name(s).", len(new_variable_map))
logging.info("Converted: %s", str(conversion_map))
mobilebert_model = model_utils.create_mobilebert_pretrainer(bert_config)
create_v2_checkpoint(
mobilebert_model, temporary_checkpoint, checkpoint_to_path)
# Clean up the temporary checkpoint, if it exists.
try:
tf.io.gfile.rmtree(temporary_checkpoint_dir)
except tf.errors.OpError:
# If it doesn't exist, we don't need to clean it up; continue.
pass
def create_v2_checkpoint(model, src_checkpoint, output_path):
"""Converts a name-based matched TF V1 checkpoint to TF V2 checkpoint."""
# Uses streaming-restore in eager model to read V1 name-based checkpoints.
model.load_weights(src_checkpoint).assert_existing_objects_matched()
checkpoint = tf.train.Checkpoint(**model.checkpoint_items)
checkpoint.save(output_path)
_NAME_REPLACEMENT = [
# prefix path replacement
("bert/", "mobile_bert_encoder/"),
("encoder/layer_", "transformer_layer_"),
# embedding layer
("embeddings/embedding_transformation",
"mobile_bert_embedding/embedding_projection"),
("embeddings/position_embeddings",
"mobile_bert_embedding/position_embedding/embeddings"),
("embeddings/token_type_embeddings",
"mobile_bert_embedding/type_embedding/embeddings"),
("embeddings/word_embeddings",
"mobile_bert_embedding/word_embedding/embeddings"),
("embeddings/FakeLayerNorm", "mobile_bert_embedding/embedding_norm"),
("embeddings/LayerNorm", "mobile_bert_embedding/embedding_norm"),
# attention layer
("attention/output/dense", "attention/attention_output"),
("attention/output/FakeLayerNorm", "attention/norm"),
("attention/output/LayerNorm", "attention/norm"),
("attention/self", "attention"),
# input bottleneck
("bottleneck/input/dense", "bottleneck_input/dense"),
("bottleneck/input/FakeLayerNorm", "bottleneck_input/norm"),
("bottleneck/input/LayerNorm", "bottleneck_input/norm"),
("bottleneck/attention/dense", "kq_shared_bottleneck/dense"),
("bottleneck/attention/FakeLayerNorm", "kq_shared_bottleneck/norm"),
("bottleneck/attention/LayerNorm", "kq_shared_bottleneck/norm"),
# ffn layer
("ffn_layer_0/output/dense", "ffn_layer_0/output_dense"),
("ffn_layer_1/output/dense", "ffn_layer_1/output_dense"),
("ffn_layer_2/output/dense", "ffn_layer_2/output_dense"),
("output/dense", "ffn_layer_LAST_FFN_LAYER_ID/output_dense"),
("ffn_layer_0/output/FakeLayerNorm", "ffn_layer_0/norm"),
("ffn_layer_0/output/LayerNorm", "ffn_layer_0/norm"),
("ffn_layer_1/output/FakeLayerNorm", "ffn_layer_1/norm"),
("ffn_layer_1/output/LayerNorm", "ffn_layer_1/norm"),
("ffn_layer_2/output/FakeLayerNorm", "ffn_layer_2/norm"),
("ffn_layer_2/output/LayerNorm", "ffn_layer_2/norm"),
("output/FakeLayerNorm", "ffn_layer_LAST_FFN_LAYER_ID/norm"),
("output/LayerNorm", "ffn_layer_LAST_FFN_LAYER_ID/norm"),
("ffn_layer_0/intermediate/dense", "ffn_layer_0/intermediate_dense"),
("ffn_layer_1/intermediate/dense", "ffn_layer_1/intermediate_dense"),
("ffn_layer_2/intermediate/dense", "ffn_layer_2/intermediate_dense"),
("intermediate/dense", "ffn_layer_LAST_FFN_LAYER_ID/intermediate_dense"),
# output bottleneck
("output/bottleneck/FakeLayerNorm", "bottleneck_output/norm"),
("output/bottleneck/LayerNorm", "bottleneck_output/norm"),
("output/bottleneck/dense", "bottleneck_output/dense"),
# pooler layer
("pooler/dense", "pooler"),
# MLM layer
("cls/predictions", "bert/cls/predictions"),
("cls/predictions/output_bias", "cls/predictions/output_bias/bias")
]
_EXCLUDE_PATTERNS = ["cls/seq_relationship", "global_step"]
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
if not FLAGS.use_model_prefix:
_NAME_REPLACEMENT[0] = ("bert/", "")
bert_config = model_utils.BertConfig.from_json_file(FLAGS.bert_config_file)
convert(FLAGS.tf1_checkpoint_path,
FLAGS.tf2_checkpoint_path,
_NAME_REPLACEMENT,
[],
bert_config,
_EXCLUDE_PATTERNS)
if __name__ == "__main__":
app.run(main)
| 10,709 | 37.387097 | 80 | py |
models | models-master/official/projects/mobilebert/run_distillation.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
"""Creating the task and start trainer."""
import pprint
from absl import app
from absl import flags
from absl import logging
import gin
from official.common import distribute_utils
from official.common import flags as tfm_flags
from official.core import config_definitions as cfg
from official.core import train_utils
from official.modeling import hyperparams
from official.modeling import optimization
from official.modeling import performance
from official.modeling.fast_training.progressive import train_lib
from official.modeling.fast_training.progressive import trainer as prog_trainer_lib
from official.nlp.data import pretrain_dataloader
from official.projects.mobilebert import distillation
FLAGS = flags.FLAGS
optimization_config = optimization.OptimizationConfig(
optimizer=optimization.OptimizerConfig(
type='lamb',
lamb=optimization.LAMBConfig(
weight_decay_rate=0.01,
exclude_from_weight_decay=['LayerNorm', 'bias', 'norm'],
clipnorm=1.0)),
learning_rate=optimization.LrConfig(
type='polynomial',
polynomial=optimization.PolynomialLrConfig(
initial_learning_rate=1.5e-3,
decay_steps=10000,
end_learning_rate=1.5e-3)),
warmup=optimization.WarmupConfig(
type='linear',
linear=optimization.LinearWarmupConfig(warmup_learning_rate=0)))
# copy from progressive/utils.py due to the private visibility issue.
def config_override(params, flags_obj):
"""Override ExperimentConfig according to flags."""
# Change runtime.tpu to the real tpu.
params.override({
'runtime': {
'tpu': flags_obj.tpu,
}
})
# Get the first level of override from `--config_file`.
# `--config_file` is typically used as a template that specifies the common
# override for a particular experiment.
for config_file in flags_obj.config_file or []:
params = hyperparams.override_params_dict(
params, config_file, is_strict=True)
# Get the second level of override from `--params_override`.
# `--params_override` is typically used as a further override over the
# template. For example, one may define a particular template for training
# ResNet50 on ImageNet in a config file and pass it via `--config_file`,
# then define different learning rates and pass it via `--params_override`.
if flags_obj.params_override:
params = hyperparams.override_params_dict(
params, flags_obj.params_override, is_strict=True)
params.validate()
params.lock()
pp = pprint.PrettyPrinter()
logging.info('Final experiment parameters: %s', pp.pformat(params.as_dict()))
model_dir = flags_obj.model_dir
if 'train' in flags_obj.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
return params
def get_exp_config():
"""Get ExperimentConfig."""
params = cfg.ExperimentConfig(
task=distillation.BertDistillationTaskConfig(
train_data=pretrain_dataloader.BertPretrainDataConfig(),
validation_data=pretrain_dataloader.BertPretrainDataConfig(
is_training=False)),
trainer=prog_trainer_lib.ProgressiveTrainerConfig(
progressive=distillation.BertDistillationProgressiveConfig(),
optimizer_config=optimization_config,
train_steps=740000,
checkpoint_interval=20000))
return config_override(params, FLAGS)
def main(_):
logging.info('Parsing config files...')
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = get_exp_config()
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu)
with distribution_strategy.scope():
task = distillation.BertDistillationTask(
strategy=distribution_strategy,
progressive=params.trainer.progressive,
optimizer_config=params.trainer.optimizer_config,
task_config=params.task)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=FLAGS.model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(main)
| 5,486 | 36.074324 | 83 | py |
models | models-master/official/projects/mobilebert/utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import numpy as np
def generate_fake_input(batch_size=1, seq_len=5, vocab_size=10000, seed=0):
"""Generate consistent fake integer input sequences."""
np.random.seed(seed)
fake_input = []
for _ in range(batch_size):
fake_input.append([])
for _ in range(seq_len):
fake_input[-1].append(np.random.randint(0, vocab_size))
fake_input = np.asarray(fake_input)
return fake_input
| 1,036 | 33.566667 | 75 | py |
models | models-master/official/projects/mobilebert/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/mobilebert/distillation.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Progressive distillation for MobileBERT student model."""
import dataclasses
from typing import List, Optional
from absl import logging
import orbit
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.modeling import optimization
from official.modeling import tf_utils
from official.modeling.fast_training.progressive import policies
from official.modeling.hyperparams import base_config
from official.nlp import modeling
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import data_loader_factory
from official.nlp.modeling import layers
from official.nlp.modeling import models
@dataclasses.dataclass
class LayerWiseDistillConfig(base_config.Config):
"""Defines the behavior of layerwise distillation."""
num_steps: int = 10000
warmup_steps: int = 0
initial_learning_rate: float = 1.5e-3
end_learning_rate: float = 1.5e-3
decay_steps: int = 10000
hidden_distill_factor: float = 100.0
beta_distill_factor: float = 5000.0
gamma_distill_factor: float = 5.0
if_transfer_attention: bool = True
attention_distill_factor: float = 1.0
if_freeze_previous_layers: bool = False
# The ids of teacher layers that will be mapped to the student model.
# For example, if you want to compress a 24 layer teacher to a 6 layer
# student, you can set it to [3, 7, 11, 15, 19, 23] (the index starts from 0).
# If `None`, we assume teacher and student have the same number of layers,
# and each layer of teacher model will be mapped to student's corresponding
# layer.
transfer_teacher_layers: Optional[List[int]] = None
@dataclasses.dataclass
class PretrainDistillConfig(base_config.Config):
"""Defines the behavior of pretrain distillation."""
num_steps: int = 500000
warmup_steps: int = 10000
initial_learning_rate: float = 1.5e-3
end_learning_rate: float = 1.5e-7
decay_steps: int = 500000
if_use_nsp_loss: bool = True
distill_ground_truth_ratio: float = 0.5
@dataclasses.dataclass
class BertDistillationProgressiveConfig(policies.ProgressiveConfig):
"""Defines the specific distillation behavior."""
if_copy_embeddings: bool = True
layer_wise_distill_config: LayerWiseDistillConfig = dataclasses.field(
default_factory=LayerWiseDistillConfig
)
pretrain_distill_config: PretrainDistillConfig = dataclasses.field(
default_factory=PretrainDistillConfig
)
@dataclasses.dataclass
class BertDistillationTaskConfig(cfg.TaskConfig):
"""Defines the teacher/student model architecture and training data."""
teacher_model: bert.PretrainerConfig = dataclasses.field(
default_factory=lambda: bert.PretrainerConfig( # pylint: disable=g-long-lambda
encoder=encoders.EncoderConfig(type='mobilebert')
)
)
student_model: bert.PretrainerConfig = dataclasses.field(
default_factory=lambda: bert.PretrainerConfig( # pylint: disable=g-long-lambda
encoder=encoders.EncoderConfig(type='mobilebert')
)
)
# The path to the teacher model checkpoint or its directory.
teacher_model_init_checkpoint: str = ''
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(
default_factory=cfg.DataConfig
)
def build_sub_encoder(encoder, target_layer_id):
"""Builds an encoder that only computes first few transformer layers."""
input_ids = encoder.inputs[0]
input_mask = encoder.inputs[1]
type_ids = encoder.inputs[2]
attention_mask = modeling.layers.SelfAttentionMask()(
inputs=input_ids, to_mask=input_mask)
embedding_output = encoder.embedding_layer(input_ids, type_ids)
layer_output = embedding_output
attention_score = None
for layer_idx in range(target_layer_id + 1):
layer_output, attention_score = encoder.transformer_layers[layer_idx](
layer_output, attention_mask, return_attention_scores=True)
return tf.keras.Model(
inputs=[input_ids, input_mask, type_ids],
outputs=[layer_output, attention_score])
class BertDistillationTask(policies.ProgressivePolicy, base_task.Task):
"""Distillation language modeling task progressively."""
def __init__(self,
strategy,
progressive: BertDistillationProgressiveConfig,
optimizer_config: optimization.OptimizationConfig,
task_config: BertDistillationTaskConfig,
logging_dir=None):
self._strategy = strategy
self._task_config = task_config
self._progressive_config = progressive
self._optimizer_config = optimizer_config
self._train_data_config = task_config.train_data
self._eval_data_config = task_config.validation_data
self._the_only_train_dataset = None
self._the_only_eval_dataset = None
layer_wise_config = self._progressive_config.layer_wise_distill_config
transfer_teacher_layers = layer_wise_config.transfer_teacher_layers
num_teacher_layers = (
self._task_config.teacher_model.encoder.mobilebert.num_blocks)
num_student_layers = (
self._task_config.student_model.encoder.mobilebert.num_blocks)
if transfer_teacher_layers and len(
transfer_teacher_layers) != num_student_layers:
raise ValueError('The number of `transfer_teacher_layers` %s does not '
'match the number of student layers. %d' %
(transfer_teacher_layers, num_student_layers))
if not transfer_teacher_layers and (num_teacher_layers !=
num_student_layers):
raise ValueError('`transfer_teacher_layers` is not specified, and the '
'number of teacher layers does not match '
'the number of student layers.')
ratio = progressive.pretrain_distill_config.distill_ground_truth_ratio
if ratio < 0 or ratio > 1:
raise ValueError('distill_ground_truth_ratio has to be within [0, 1].')
# A non-trainable layer for feature normalization for transfer loss
self._layer_norm = tf.keras.layers.LayerNormalization(
axis=-1,
beta_initializer='zeros',
gamma_initializer='ones',
trainable=False)
# Build the teacher and student pretrainer model.
self._teacher_pretrainer = self._build_pretrainer(
self._task_config.teacher_model, name='teacher')
self._student_pretrainer = self._build_pretrainer(
self._task_config.student_model, name='student')
base_task.Task.__init__(
self, params=task_config, logging_dir=logging_dir)
policies.ProgressivePolicy.__init__(self)
def _build_pretrainer(self, pretrainer_cfg: bert.PretrainerConfig, name: str):
"""Builds pretrainer from config and encoder."""
encoder = encoders.build_encoder(pretrainer_cfg.encoder)
if pretrainer_cfg.cls_heads:
cls_heads = [
layers.ClassificationHead(**cfg.as_dict())
for cfg in pretrainer_cfg.cls_heads
]
else:
cls_heads = []
masked_lm = layers.MobileBertMaskedLM(
embedding_table=encoder.get_embedding_table(),
activation=tf_utils.get_activation(pretrainer_cfg.mlm_activation),
initializer=tf.keras.initializers.TruncatedNormal(
stddev=pretrainer_cfg.mlm_initializer_range),
name='cls/predictions')
pretrainer = models.BertPretrainerV2(
encoder_network=encoder,
classification_heads=cls_heads,
customized_masked_lm=masked_lm,
name=name)
return pretrainer
# override policies.ProgressivePolicy
def num_stages(self):
# One stage for each layer, plus additional stage for pre-training
return self._task_config.student_model.encoder.mobilebert.num_blocks + 1
# override policies.ProgressivePolicy
def num_steps(self, stage_id) -> int:
"""Return the total number of steps in this stage."""
if stage_id + 1 < self.num_stages():
return self._progressive_config.layer_wise_distill_config.num_steps
else:
return self._progressive_config.pretrain_distill_config.num_steps
# override policies.ProgressivePolicy
def get_model(self, stage_id, old_model=None) -> tf.keras.Model:
del old_model
return self.build_model(stage_id)
# override policies.ProgressivePolicy
def get_optimizer(self, stage_id):
"""Build optimizer for each stage."""
if stage_id + 1 < self.num_stages():
distill_config = self._progressive_config.layer_wise_distill_config
else:
distill_config = self._progressive_config.pretrain_distill_config
params = self._optimizer_config.replace(
learning_rate={
'polynomial': {
'decay_steps':
distill_config.decay_steps,
'initial_learning_rate':
distill_config.initial_learning_rate,
'end_learning_rate':
distill_config.end_learning_rate,
}
},
warmup={
'linear':
{'warmup_steps':
distill_config.warmup_steps,
}
})
opt_factory = optimization.OptimizerFactory(params)
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
if isinstance(optimizer, tf.keras.optimizers.experimental.Optimizer):
optimizer = tf.keras.__internal__.optimizers.convert_to_legacy_optimizer(
optimizer)
return optimizer
# override policies.ProgressivePolicy
def get_train_dataset(self, stage_id: int) -> tf.data.Dataset:
"""Return Dataset for this stage."""
del stage_id
if self._the_only_train_dataset is None:
self._the_only_train_dataset = orbit.utils.make_distributed_dataset(
self._strategy, self.build_inputs, self._train_data_config)
return self._the_only_train_dataset
# overrides policies.ProgressivePolicy
def get_eval_dataset(self, stage_id):
del stage_id
if self._the_only_eval_dataset is None:
self._the_only_eval_dataset = orbit.utils.make_distributed_dataset(
self._strategy, self.build_inputs, self._eval_data_config)
return self._the_only_eval_dataset
# override base_task.task
def build_model(self, stage_id) -> tf.keras.Model:
"""Build teacher/student keras models with outputs for current stage."""
# Freeze the teacher model.
self._teacher_pretrainer.trainable = False
layer_wise_config = self._progressive_config.layer_wise_distill_config
freeze_previous_layers = layer_wise_config.if_freeze_previous_layers
student_encoder = self._student_pretrainer.encoder_network
if stage_id != self.num_stages() - 1:
# Build a model that outputs teacher's and student's transformer outputs.
inputs = student_encoder.inputs
student_sub_encoder = build_sub_encoder(
encoder=student_encoder, target_layer_id=stage_id)
student_output_feature, student_attention_score = student_sub_encoder(
inputs)
if layer_wise_config.transfer_teacher_layers:
teacher_layer_id = layer_wise_config.transfer_teacher_layers[stage_id]
else:
teacher_layer_id = stage_id
teacher_sub_encoder = build_sub_encoder(
encoder=self._teacher_pretrainer.encoder_network,
target_layer_id=teacher_layer_id)
teacher_output_feature, teacher_attention_score = teacher_sub_encoder(
inputs)
if freeze_previous_layers:
student_encoder.embedding_layer.trainable = False
for i in range(stage_id):
student_encoder.transformer_layers[i].trainable = False
return tf.keras.Model(
inputs=inputs,
outputs=dict(
student_output_feature=student_output_feature,
student_attention_score=student_attention_score,
teacher_output_feature=teacher_output_feature,
teacher_attention_score=teacher_attention_score))
else:
# Build a model that outputs teacher's and student's MLM/NSP outputs.
inputs = self._student_pretrainer.inputs
student_pretrainer_output = self._student_pretrainer(inputs)
teacher_pretrainer_output = self._teacher_pretrainer(inputs)
# Set all student's transformer blocks to trainable.
if freeze_previous_layers:
student_encoder.embedding_layer.trainable = True
for layer in student_encoder.transformer_layers:
layer.trainable = True
model = tf.keras.Model(
inputs=inputs,
outputs=dict(
student_pretrainer_output=student_pretrainer_output,
teacher_pretrainer_output=teacher_pretrainer_output,
))
# Checkpoint the student encoder which is the goal of distillation.
model.checkpoint_items = self._student_pretrainer.checkpoint_items
return model
# overrides base_task.Task
def build_inputs(self, params, input_context=None):
"""Returns tf.data.Dataset for pretraining."""
# copy from masked_lm.py for testing
if params.input_path == 'dummy':
def dummy_data(_):
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
dummy_lm = tf.zeros((1, params.max_predictions_per_seq), dtype=tf.int32)
return dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids,
masked_lm_positions=dummy_lm,
masked_lm_ids=dummy_lm,
masked_lm_weights=tf.cast(dummy_lm, dtype=tf.float32),
next_sentence_labels=tf.zeros((1, 1), dtype=tf.int32))
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
return data_loader_factory.get_data_loader(params).load(input_context)
def _get_distribution_losses(self, teacher, student):
"""Return the beta and gamma distall losses for feature distribution."""
teacher_mean = tf.math.reduce_mean(teacher, axis=-1, keepdims=True)
student_mean = tf.math.reduce_mean(student, axis=-1, keepdims=True)
teacher_var = tf.math.reduce_variance(teacher, axis=-1, keepdims=True)
student_var = tf.math.reduce_variance(student, axis=-1, keepdims=True)
beta_loss = tf.math.squared_difference(student_mean, teacher_mean)
beta_loss = tf.math.reduce_mean(beta_loss, axis=None, keepdims=False)
gamma_loss = tf.math.abs(student_var - teacher_var)
gamma_loss = tf.math.reduce_mean(gamma_loss, axis=None, keepdims=False)
return beta_loss, gamma_loss
def _get_attention_loss(self, teacher_score, student_score):
# Note that the definition of KLDivergence here is a little different from
# the original one (tf.keras.losses.KLDivergence). We adopt this approach
# to stay consistent with the TF1 implementation.
teacher_weight = tf.keras.activations.softmax(teacher_score, axis=-1)
student_log_weight = tf.nn.log_softmax(student_score, axis=-1)
kl_divergence = -(teacher_weight * student_log_weight)
kl_divergence = tf.math.reduce_sum(kl_divergence, axis=-1, keepdims=True)
kl_divergence = tf.math.reduce_mean(kl_divergence, axis=None,
keepdims=False)
return kl_divergence
def build_losses(self, labels, outputs, metrics) -> tf.Tensor:
"""Builds losses and update loss-related metrics for the current stage."""
last_stage = 'student_pretrainer_output' in outputs
# Layer-wise warmup stage
if not last_stage:
distill_config = self._progressive_config.layer_wise_distill_config
teacher_feature = outputs['teacher_output_feature']
student_feature = outputs['student_output_feature']
feature_transfer_loss = tf.keras.losses.mean_squared_error(
self._layer_norm(teacher_feature), self._layer_norm(student_feature))
feature_transfer_loss *= distill_config.hidden_distill_factor
beta_loss, gamma_loss = self._get_distribution_losses(teacher_feature,
student_feature)
beta_loss *= distill_config.beta_distill_factor
gamma_loss *= distill_config.gamma_distill_factor
total_loss = feature_transfer_loss + beta_loss + gamma_loss
if distill_config.if_transfer_attention:
teacher_attention = outputs['teacher_attention_score']
student_attention = outputs['student_attention_score']
attention_loss = self._get_attention_loss(teacher_attention,
student_attention)
attention_loss *= distill_config.attention_distill_factor
total_loss += attention_loss
total_loss /= tf.cast((self._stage_id + 1), tf.float32)
# Last stage to distill pretraining layer.
else:
distill_config = self._progressive_config.pretrain_distill_config
lm_label = labels['masked_lm_ids']
vocab_size = (
self._task_config.student_model.encoder.mobilebert.word_vocab_size)
# Shape: [batch, max_predictions_per_seq, vocab_size]
lm_label = tf.one_hot(indices=lm_label, depth=vocab_size, on_value=1.0,
off_value=0.0, axis=-1, dtype=tf.float32)
gt_ratio = distill_config.distill_ground_truth_ratio
if gt_ratio != 1.0:
teacher_mlm_logits = outputs['teacher_pretrainer_output']['mlm_logits']
teacher_labels = tf.nn.softmax(teacher_mlm_logits, axis=-1)
lm_label = gt_ratio * lm_label + (1-gt_ratio) * teacher_labels
student_pretrainer_output = outputs['student_pretrainer_output']
# Shape: [batch, max_predictions_per_seq, vocab_size]
student_lm_log_probs = tf.nn.log_softmax(
student_pretrainer_output['mlm_logits'], axis=-1)
# Shape: [batch * max_predictions_per_seq]
per_example_loss = tf.reshape(
-tf.reduce_sum(student_lm_log_probs * lm_label, axis=[-1]), [-1])
lm_label_weights = tf.reshape(labels['masked_lm_weights'], [-1])
lm_numerator_loss = tf.reduce_sum(per_example_loss * lm_label_weights)
lm_denominator_loss = tf.reduce_sum(lm_label_weights)
mlm_loss = tf.math.divide_no_nan(lm_numerator_loss, lm_denominator_loss)
total_loss = mlm_loss
if 'next_sentence_labels' in labels:
sentence_labels = labels['next_sentence_labels']
sentence_outputs = tf.cast(
student_pretrainer_output['next_sentence'], dtype=tf.float32)
sentence_loss = tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(
sentence_labels, sentence_outputs, from_logits=True))
total_loss += sentence_loss
# Also update loss-related metrics here, instead of in `process_metrics`.
metrics = dict([(metric.name, metric) for metric in metrics])
if not last_stage:
metrics['feature_transfer_mse'].update_state(feature_transfer_loss)
metrics['beta_transfer_loss'].update_state(beta_loss)
metrics['gamma_transfer_loss'].update_state(gamma_loss)
layer_wise_config = self._progressive_config.layer_wise_distill_config
if layer_wise_config.if_transfer_attention:
metrics['attention_transfer_loss'].update_state(attention_loss)
else:
metrics['lm_example_loss'].update_state(mlm_loss)
if 'next_sentence_labels' in labels:
metrics['next_sentence_loss'].update_state(sentence_loss)
metrics['total_loss'].update_state(total_loss)
return total_loss
# overrides base_task.Task
def build_metrics(self, training=None):
del training
metrics = [
tf.keras.metrics.Mean(name='feature_transfer_mse'),
tf.keras.metrics.Mean(name='beta_transfer_loss'),
tf.keras.metrics.Mean(name='gamma_transfer_loss'),
tf.keras.metrics.SparseCategoricalAccuracy(name='masked_lm_accuracy'),
tf.keras.metrics.Mean(name='lm_example_loss'),
tf.keras.metrics.Mean(name='total_loss')]
if self._progressive_config.layer_wise_distill_config.if_transfer_attention:
metrics.append(tf.keras.metrics.Mean(name='attention_transfer_loss'))
if self._task_config.train_data.use_next_sentence_label:
metrics.append(tf.keras.metrics.SparseCategoricalAccuracy(
name='next_sentence_accuracy'))
metrics.append(tf.keras.metrics.Mean(name='next_sentence_loss'))
return metrics
# overrides base_task.Task
# process non-loss metrics
def process_metrics(self, metrics, labels, student_pretrainer_output):
metrics = dict([(metric.name, metric) for metric in metrics])
# Final pretrainer layer distillation stage.
if student_pretrainer_output is not None:
if 'masked_lm_accuracy' in metrics:
metrics['masked_lm_accuracy'].update_state(
labels['masked_lm_ids'], student_pretrainer_output['mlm_logits'],
labels['masked_lm_weights'])
if 'next_sentence_accuracy' in metrics:
metrics['next_sentence_accuracy'].update_state(
labels['next_sentence_labels'],
student_pretrainer_output['next_sentence'])
# overrides base_task.Task
def train_step(self, inputs, model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer, metrics):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
with tf.GradientTape() as tape:
outputs = model(inputs, training=True)
# Computes per-replica loss.
loss = self.build_losses(
labels=inputs,
outputs=outputs,
metrics=metrics)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
# TODO(b/154564893): enable loss scaling.
# scaled_loss = loss / tf.distribute.get_strategy().num_replicas_in_sync
# get trainable variables for current stage
tvars = model.trainable_variables
last_stage = 'student_pretrainer_output' in outputs
grads = tape.gradient(loss, tvars)
optimizer.apply_gradients(list(zip(grads, tvars)))
self.process_metrics(
metrics, inputs,
outputs['student_pretrainer_output'] if last_stage else None)
return {self.loss: loss}
# overrides base_task.Task
def validation_step(self, inputs, model: tf.keras.Model, metrics):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
outputs = model(inputs, training=False)
# Computes per-replica loss.
loss = self.build_losses(labels=inputs, outputs=outputs, metrics=metrics)
last_stage = 'student_pretrainer_output' in outputs
self.process_metrics(
metrics, inputs,
outputs['student_pretrainer_output'] if last_stage else None)
return {self.loss: loss}
@property
def cur_checkpoint_items(self):
"""Checkpoints for model, stage_id, optimizer for preemption handling."""
return dict(
stage_id=self._stage_id,
volatiles=self._volatiles,
student_pretrainer=self._student_pretrainer,
teacher_pretrainer=self._teacher_pretrainer,
encoder=self._student_pretrainer.encoder_network)
def initialize(self, model):
"""Loads teacher's pretrained checkpoint and copy student's embedding."""
# This function will be called when no checkpoint found for the model,
# i.e., when the training starts (not preemption case).
# The weights of teacher pretrainer and student pretrainer will be
# initialized, rather than the passed-in `model`.
del model
logging.info('Begin to load checkpoint for teacher pretrainer model.')
ckpt_dir_or_file = self._task_config.teacher_model_init_checkpoint
if not ckpt_dir_or_file:
raise ValueError('`teacher_model_init_checkpoint` is not specified.')
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Makes sure the teacher pretrainer variables are created.
_ = self._teacher_pretrainer(self._teacher_pretrainer.inputs)
teacher_checkpoint = tf.train.Checkpoint(
**self._teacher_pretrainer.checkpoint_items)
teacher_checkpoint.read(ckpt_dir_or_file).assert_existing_objects_matched()
logging.info('Begin to copy word embedding from teacher model to student.')
teacher_encoder = self._teacher_pretrainer.encoder_network
student_encoder = self._student_pretrainer.encoder_network
embedding_weights = teacher_encoder.embedding_layer.get_weights()
student_encoder.embedding_layer.set_weights(embedding_weights)
| 25,420 | 40.948845 | 85 | py |
models | models-master/official/projects/mobilebert/distillation_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.projects.mobilebert.distillation."""
import os
from absl import logging
from absl.testing import parameterized
import tensorflow as tf
from official.core import config_definitions as cfg
from official.modeling import optimization
from official.modeling import tf_utils
from official.modeling.fast_training.progressive import trainer as prog_trainer_lib
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import pretrain_dataloader
from official.nlp.modeling import layers
from official.nlp.modeling import models
from official.projects.mobilebert import distillation
class DistillationTest(tf.test.TestCase, parameterized.TestCase):
def prepare_config(self, teacher_block_num, student_block_num,
transfer_teacher_layers):
# using small model for testing
task_config = distillation.BertDistillationTaskConfig(
teacher_model=bert.PretrainerConfig(
encoder=encoders.EncoderConfig(
type='mobilebert',
mobilebert=encoders.MobileBertEncoderConfig(
num_blocks=teacher_block_num)),
cls_heads=[
bert.ClsHeadConfig(
inner_dim=256,
num_classes=2,
dropout_rate=0.1,
name='next_sentence')
],
mlm_activation='gelu'),
student_model=bert.PretrainerConfig(
encoder=encoders.EncoderConfig(
type='mobilebert',
mobilebert=encoders.MobileBertEncoderConfig(
num_blocks=student_block_num)),
cls_heads=[
bert.ClsHeadConfig(
inner_dim=256,
num_classes=2,
dropout_rate=0.1,
name='next_sentence')
],
mlm_activation='relu'),
train_data=pretrain_dataloader.BertPretrainDataConfig(
input_path='dummy',
max_predictions_per_seq=76,
seq_length=512,
global_batch_size=10),
validation_data=pretrain_dataloader.BertPretrainDataConfig(
input_path='dummy',
max_predictions_per_seq=76,
seq_length=512,
global_batch_size=10))
# set only 1 step for each stage
progressive_config = distillation.BertDistillationProgressiveConfig()
progressive_config.layer_wise_distill_config.transfer_teacher_layers = (
transfer_teacher_layers)
progressive_config.layer_wise_distill_config.num_steps = 1
progressive_config.pretrain_distill_config.num_steps = 1
optimization_config = optimization.OptimizationConfig(
optimizer=optimization.OptimizerConfig(
type='lamb',
lamb=optimization.LAMBConfig(
weight_decay_rate=0.0001,
exclude_from_weight_decay=[
'LayerNorm', 'layer_norm', 'bias', 'no_norm'
])),
learning_rate=optimization.LrConfig(
type='polynomial',
polynomial=optimization.PolynomialLrConfig(
initial_learning_rate=1.5e-3,
decay_steps=10000,
end_learning_rate=1.5e-3)),
warmup=optimization.WarmupConfig(
type='linear',
linear=optimization.LinearWarmupConfig(warmup_learning_rate=0)))
exp_config = cfg.ExperimentConfig(
task=task_config,
trainer=prog_trainer_lib.ProgressiveTrainerConfig(
progressive=progressive_config,
optimizer_config=optimization_config))
# Create a teacher model checkpoint.
teacher_encoder = encoders.build_encoder(task_config.teacher_model.encoder)
pretrainer_config = task_config.teacher_model
if pretrainer_config.cls_heads:
teacher_cls_heads = [
layers.ClassificationHead(**cfg.as_dict())
for cfg in pretrainer_config.cls_heads
]
else:
teacher_cls_heads = []
masked_lm = layers.MobileBertMaskedLM(
embedding_table=teacher_encoder.get_embedding_table(),
activation=tf_utils.get_activation(pretrainer_config.mlm_activation),
initializer=tf.keras.initializers.TruncatedNormal(
stddev=pretrainer_config.mlm_initializer_range),
name='cls/predictions')
teacher_pretrainer = models.BertPretrainerV2(
encoder_network=teacher_encoder,
classification_heads=teacher_cls_heads,
customized_masked_lm=masked_lm)
# The model variables will be created after the forward call.
_ = teacher_pretrainer(teacher_pretrainer.inputs)
teacher_pretrainer_ckpt = tf.train.Checkpoint(
**teacher_pretrainer.checkpoint_items)
teacher_ckpt_path = os.path.join(self.get_temp_dir(), 'teacher_model.ckpt')
teacher_pretrainer_ckpt.save(teacher_ckpt_path)
exp_config.task.teacher_model_init_checkpoint = self.get_temp_dir()
return exp_config
@parameterized.parameters((2, 2, None), (4, 2, [1, 3]))
def test_task(self, teacher_block_num, student_block_num,
transfer_teacher_layers):
exp_config = self.prepare_config(teacher_block_num, student_block_num,
transfer_teacher_layers)
bert_distillation_task = distillation.BertDistillationTask(
strategy=tf.distribute.get_strategy(),
progressive=exp_config.trainer.progressive,
optimizer_config=exp_config.trainer.optimizer_config,
task_config=exp_config.task)
metrics = bert_distillation_task.build_metrics()
train_dataset = bert_distillation_task.get_train_dataset(stage_id=0)
train_iterator = iter(train_dataset)
eval_dataset = bert_distillation_task.get_eval_dataset(stage_id=0)
eval_iterator = iter(eval_dataset)
optimizer = tf.keras.optimizers.legacy.SGD(learning_rate=0.1)
# test train/val step for all stages, including the last pretraining stage
for stage in range(student_block_num + 1):
step = stage
bert_distillation_task.update_pt_stage(step)
model = bert_distillation_task.get_model(stage, None)
bert_distillation_task.initialize(model)
bert_distillation_task.train_step(next(train_iterator), model, optimizer,
metrics=metrics)
bert_distillation_task.validation_step(next(eval_iterator), model,
metrics=metrics)
logging.info('begin to save and load model checkpoint')
ckpt = tf.train.Checkpoint(model=model)
ckpt.save(self.get_temp_dir())
if __name__ == '__main__':
tf.test.main()
| 7,257 | 40.474286 | 83 | py |
models | models-master/official/projects/deepmac_maskrcnn/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/deepmac_maskrcnn/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Model Garden Vision training driver."""
from absl import app
from absl import flags
from absl import logging
import gin
from official.common import distribute_utils
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
# pylint: disable=unused-import
from official.projects.deepmac_maskrcnn.common import registry_imports
# pylint: enable=unused-import
FLAGS = flags.FLAGS
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu)
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
logging.info('Training with task %s', task)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir)
train_utils.save_gin_config(FLAGS.mode, model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(main)
| 2,630 | 35.541667 | 80 | py |
models | models-master/official/projects/deepmac_maskrcnn/common/registry_imports.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Imports to configure Mask R-CNN with deep mask heads."""
# pylint: disable=unused-import
from official.projects.deepmac_maskrcnn.tasks import deep_mask_head_rcnn
| 775 | 39.842105 | 74 | py |
models | models-master/official/projects/deepmac_maskrcnn/common/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/deepmac_maskrcnn/serving/export_saved_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Deepmac model export binary for serving/inference.
To export a trained checkpoint in saved_model format (shell script):
CHECKPOINT_PATH = XX
EXPORT_DIR_PATH = XX
CONFIG_FILE_PATH = XX
export_saved_model --export_dir=${EXPORT_DIR_PATH}/ \
--checkpoint_path=${CHECKPOINT_PATH} \
--config_file=${CONFIG_FILE_PATH} \
--batch_size=2 \
--input_image_size=224,224
To serve (python):
export_dir_path = XX
input_type = XX
input_images = XX
imported = tf.saved_model.load(export_dir_path)
model_fn = imported.signatures['serving_default']
output = model_fn(input_images)
"""
from absl import app
from absl import flags
from official.core import exp_factory
from official.modeling import hyperparams
from official.projects.deepmac_maskrcnn.serving import detection
from official.projects.deepmac_maskrcnn.tasks import deep_mask_head_rcnn # pylint: disable=unused-import
from official.vision.serving import export_saved_model_lib
FLAGS = flags.FLAGS
flags.DEFINE_string('experiment', 'deep_mask_head_rcnn_resnetfpn_coco',
'experiment type, e.g. retinanet_resnetfpn_coco')
flags.DEFINE_string('export_dir', None, 'The export directory.')
flags.DEFINE_string('checkpoint_path', None, 'Checkpoint path.')
flags.DEFINE_multi_string(
'config_file',
default=None,
help='YAML/JSON files which specifies overrides. The override order '
'follows the order of args. Note that each file '
'can be used as an override template to override the default parameters '
'specified in Python. If the same parameter is specified in both '
'`--config_file` and `--params_override`, `config_file` will be used '
'first, followed by params_override.')
flags.DEFINE_string(
'params_override', '',
'The JSON/YAML file or string which specifies the parameter to be overriden'
' on top of `config_file` template.')
flags.DEFINE_integer('batch_size', None, 'The batch size.')
flags.DEFINE_string('input_type', 'image_tensor',
('One of `image_tensor`, `image_bytes`, `tf_example` '
'or `image_and_boxes_tensor`.'))
flags.DEFINE_string(
'input_image_size', '224,224',
'The comma-separated string of two integers representing the height,width '
'of the input to the model.')
def main(_):
params = exp_factory.get_exp_config(FLAGS.experiment)
for config_file in FLAGS.config_file or []:
params = hyperparams.override_params_dict(
params, config_file, is_strict=True)
if FLAGS.params_override:
params = hyperparams.override_params_dict(
params, FLAGS.params_override, is_strict=True)
params.validate()
params.lock()
export_module = detection.DetectionModule(
params=params,
batch_size=FLAGS.batch_size,
input_image_size=[int(x) for x in FLAGS.input_image_size.split(',')],
num_channels=3)
export_saved_model_lib.export_inference_graph(
input_type=FLAGS.input_type,
batch_size=FLAGS.batch_size,
input_image_size=[int(x) for x in FLAGS.input_image_size.split(',')],
params=params,
checkpoint_path=FLAGS.checkpoint_path,
export_dir=FLAGS.export_dir,
export_module=export_module,
export_checkpoint_subdir='checkpoint',
export_saved_model_subdir='saved_model')
if __name__ == '__main__':
app.run(main)
| 3,991 | 36.308411 | 105 | py |
models | models-master/official/projects/deepmac_maskrcnn/serving/detection_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for image detection export lib."""
import io
import os
from absl.testing import parameterized
import numpy as np
from PIL import Image
import tensorflow as tf
from official.core import exp_factory
from official.projects.deepmac_maskrcnn.serving import detection
class DetectionExportTest(tf.test.TestCase, parameterized.TestCase):
def _get_detection_module(self, experiment_name, image_size=(640, 640)):
params = exp_factory.get_exp_config(experiment_name)
params.task.model.backbone.resnet.model_id = 18
params.task.model.detection_generator.use_batched_nms = True
detection_module = detection.DetectionModule(
params, batch_size=1, input_image_size=list(image_size))
return detection_module
def _export_from_module(self, module, input_type, save_directory):
signatures = module.get_inference_signatures(
{input_type: 'serving_default'})
tf.saved_model.save(module, save_directory, signatures=signatures)
def _get_dummy_input(self, input_type, batch_size, image_size):
"""Get dummy input for the given input type."""
h, w = image_size
if input_type == 'image_tensor':
return tf.zeros((batch_size, h, w, 3), dtype=np.uint8)
elif input_type == 'image_bytes':
image = Image.fromarray(np.zeros((h, w, 3), dtype=np.uint8))
byte_io = io.BytesIO()
image.save(byte_io, 'PNG')
return [byte_io.getvalue() for b in range(batch_size)]
elif input_type == 'tf_example':
image_tensor = tf.zeros((h, w, 3), dtype=tf.uint8)
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy()
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[encoded_jpeg])),
})).SerializeToString()
return [example for b in range(batch_size)]
@parameterized.parameters(
('image_tensor', 'deep_mask_head_rcnn_resnetfpn_coco', [640, 640]),
('image_bytes', 'deep_mask_head_rcnn_resnetfpn_coco', [640, 384]),
('tf_example', 'deep_mask_head_rcnn_resnetfpn_coco', [640, 640]),
)
def test_export(self, input_type, experiment_name, image_size):
self.skipTest('a')
tmp_dir = self.get_temp_dir()
module = self._get_detection_module(experiment_name, image_size)
self._export_from_module(module, input_type, tmp_dir)
self.assertTrue(os.path.exists(os.path.join(tmp_dir, 'saved_model.pb')))
self.assertTrue(
os.path.exists(os.path.join(tmp_dir, 'variables', 'variables.index')))
self.assertTrue(
os.path.exists(
os.path.join(tmp_dir, 'variables',
'variables.data-00000-of-00001')))
imported = tf.saved_model.load(tmp_dir)
detection_fn = imported.signatures['serving_default']
images = self._get_dummy_input(
input_type, batch_size=1, image_size=image_size)
processed_images, anchor_boxes, image_info = module._build_inputs(
tf.zeros((224, 224, 3), dtype=tf.uint8))
image_shape = image_info[1, :]
image_shape = tf.expand_dims(image_shape, 0)
processed_images = tf.expand_dims(processed_images, 0)
for l, l_boxes in anchor_boxes.items():
anchor_boxes[l] = tf.expand_dims(l_boxes, 0)
expected_outputs = module.model(
images=processed_images,
image_shape=image_shape,
anchor_boxes=anchor_boxes,
training=False)
outputs = detection_fn(tf.constant(images))
self.assertAllClose(outputs['num_detections'].numpy(),
expected_outputs['num_detections'].numpy())
@parameterized.parameters(
('deep_mask_head_rcnn_resnetfpn_coco', [640, 640], 1),
('deep_mask_head_rcnn_resnetfpn_coco', [640, 640], 5),
('deep_mask_head_rcnn_spinenet_coco', [640, 384], 3),
('deep_mask_head_rcnn_spinenet_coco', [640, 384], 9),
)
def test_export_image_and_boxes(self, experiment_name, image_size, num_boxes):
tmp_dir = self.get_temp_dir()
module = self._get_detection_module(experiment_name)
self._export_from_module(module, 'image_and_boxes_tensor', tmp_dir)
self.assertTrue(os.path.exists(os.path.join(tmp_dir, 'saved_model.pb')))
self.assertTrue(
os.path.exists(os.path.join(tmp_dir, 'variables', 'variables.index')))
self.assertTrue(
os.path.exists(
os.path.join(tmp_dir, 'variables',
'variables.data-00000-of-00001')))
imported = tf.saved_model.load(tmp_dir)
detection_fn = imported.signatures['serving_default']
images = self._get_dummy_input(
'image_tensor', batch_size=1, image_size=image_size)
processed_images, anchor_boxes, image_info = module._build_inputs(
tf.zeros(image_size + [3], dtype=tf.uint8))
image_shape = image_info[1, :]
image_shape = image_shape[tf.newaxis]
processed_images = processed_images[tf.newaxis]
image_info = image_info[tf.newaxis]
for l, l_boxes in anchor_boxes.items():
anchor_boxes[l] = tf.expand_dims(l_boxes, 0)
boxes = np.zeros((1, num_boxes, 4), dtype=np.float32)
boxes[:, :, [2, 3]] = 1.0
boxes = tf.constant(boxes)
denormalized_boxes = detection.reverse_input_box_transformation(
boxes, image_info)
expected_outputs = module.model.call_images_and_boxes(
images=processed_images, boxes=denormalized_boxes)
outputs = detection_fn(images=tf.constant(images), boxes=boxes)
self.assertAllClose(outputs['detection_masks'].numpy(),
expected_outputs['detection_masks'].numpy(),
rtol=1e-3, atol=1e-3)
if __name__ == '__main__':
tf.test.main()
| 6,352 | 37.50303 | 80 | py |
models | models-master/official/projects/deepmac_maskrcnn/serving/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/deepmac_maskrcnn/serving/detection.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Detection input and model functions for serving/inference."""
from typing import Dict, Mapping, Text
import tensorflow as tf
from official.projects.deepmac_maskrcnn.configs import deep_mask_head_rcnn as cfg
from official.projects.deepmac_maskrcnn.modeling import maskrcnn_model
from official.projects.deepmac_maskrcnn.tasks import deep_mask_head_rcnn
from official.vision.ops import box_ops
from official.vision.serving import detection
def reverse_input_box_transformation(boxes, image_info):
"""Reverse the Mask R-CNN model's input boxes tranformation.
Args:
boxes: A [batch_size, num_boxes, 4] float tensor of boxes in normalized
coordinates.
image_info: a 2D `Tensor` that encodes the information of the image and the
applied preprocessing. It is in the format of
[[original_height, original_width], [desired_height, desired_width],
[y_scale, x_scale], [y_offset, x_offset]], where [desired_height,
desired_width] is the actual scaled image size, and [y_scale, x_scale] is
the scaling factor, which is the ratio of
scaled dimension / original dimension.
Returns:
boxes: Same shape as input `boxes` but in the absolute coordinate space of
the preprocessed image.
"""
# Reversing sequence from Detection_module.serve when
# output_normalized_coordinates=true
scale = image_info[:, 2:3, :]
scale = tf.tile(scale, [1, 1, 2])
boxes = boxes * scale
height_width = image_info[:, 0:1, :]
return box_ops.denormalize_boxes(boxes, height_width)
class DetectionModule(detection.DetectionModule):
"""Detection Module."""
def _build_model(self):
if self._batch_size is None:
ValueError("batch_size can't be None for detection models")
if self.params.task.model.detection_generator.nms_version != 'batched':
ValueError('Only batched_nms is supported.')
input_specs = tf.keras.layers.InputSpec(shape=[self._batch_size] +
self._input_image_size + [3])
if isinstance(self.params.task.model, cfg.DeepMaskHeadRCNN):
model = deep_mask_head_rcnn.build_maskrcnn(
input_specs=input_specs, model_config=self.params.task.model)
else:
raise ValueError('Detection module not implemented for {} model.'.format(
type(self.params.task.model)))
return model
@tf.function
def inference_for_tflite_image_and_boxes(
self, images: tf.Tensor, boxes: tf.Tensor) -> Mapping[str, tf.Tensor]:
"""A tf-function for serve_image_and_boxes.
Args:
images: A [batch_size, height, width, channels] float tensor.
boxes: A [batch_size, num_boxes, 4] float tensor containing boxes
normalized to the input image.
Returns:
result: A dict containing:
'detection_masks': A [batch_size, num_boxes, mask_height, mask_width]
float tensor containing per-pixel mask probabilities.
"""
if not isinstance(self.model, maskrcnn_model.DeepMaskRCNNModel):
raise ValueError(
('Can only use image and boxes input for DeepMaskRCNNModel, '
'Found {}'.format(type(self.model))))
return self.serve_image_and_boxes(images, boxes)
def serve_image_and_boxes(self, images: tf.Tensor, boxes: tf.Tensor):
"""Function used to export a model that consumes and image and boxes.
The model predicts the class-agnostic masks at the given box locations.
Args:
images: A [batch_size, height, width, channels] float tensor.
boxes: A [batch_size, num_boxes, 4] float tensor containing boxes
normalized to the input image.
Returns:
result: A dict containing:
'detection_masks': A [batch_size, num_boxes, mask_height, mask_width]
float tensor containing per-pixel mask probabilities.
"""
images, _, image_info = self.preprocess(images)
boxes = reverse_input_box_transformation(boxes, image_info)
result = self.model.call_images_and_boxes(images, boxes)
return result
def get_inference_signatures(self, function_keys: Dict[Text, Text]):
signatures = {}
if 'image_and_boxes_tensor' in function_keys:
def_name = function_keys['image_and_boxes_tensor']
image_signature = tf.TensorSpec(
shape=[self._batch_size] + [None] * len(self._input_image_size) +
[self._num_channels],
dtype=tf.uint8)
boxes_signature = tf.TensorSpec(shape=[self._batch_size, None, 4],
dtype=tf.float32)
tf_function = self.inference_for_tflite_image_and_boxes
signatures[def_name] = tf_function.get_concrete_function(
image_signature, boxes_signature)
function_keys.pop('image_and_boxes_tensor', None)
parent_signatures = super(DetectionModule, self).get_inference_signatures(
function_keys)
signatures.update(parent_signatures)
return signatures
| 5,494 | 38.25 | 81 | py |
models | models-master/official/projects/deepmac_maskrcnn/configs/deep_mask_head_rcnn.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Mask R-CNN with deep mask heads."""
import dataclasses
import os
from typing import Optional
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.vision.configs import backbones
from official.vision.configs import common
from official.vision.configs import decoders
from official.vision.configs import maskrcnn as maskrcnn_config
from official.vision.configs import retinanet as retinanet_config
@dataclasses.dataclass
class DeepMaskHead(maskrcnn_config.MaskHead):
convnet_variant: str = 'default'
@dataclasses.dataclass
class DeepMaskHeadRCNN(maskrcnn_config.MaskRCNN):
mask_head: Optional[DeepMaskHead] = dataclasses.field(
default_factory=DeepMaskHead
)
use_gt_boxes_for_masks: bool = False
@dataclasses.dataclass
class DeepMaskHeadRCNNTask(maskrcnn_config.MaskRCNNTask):
"""Configuration for the deep mask head R-CNN task."""
model: DeepMaskHeadRCNN = dataclasses.field(default_factory=DeepMaskHeadRCNN)
@exp_factory.register_config_factory('deep_mask_head_rcnn_resnetfpn_coco')
def deep_mask_head_rcnn_resnetfpn_coco() -> cfg.ExperimentConfig:
"""COCO object detection with Mask R-CNN with deep mask heads."""
global_batch_size = 64
steps_per_epoch = int(retinanet_config.COCO_TRAIN_EXAMPLES /
global_batch_size)
coco_val_samples = 5000
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=DeepMaskHeadRCNNTask(
init_checkpoint='gs://cloud-tpu-checkpoints/vision-2.0/resnet50_imagenet/ckpt-28080',
init_checkpoint_modules='backbone',
annotation_file=os.path.join(maskrcnn_config.COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=DeepMaskHeadRCNN(
num_classes=91, input_size=[1024, 1024, 3], include_mask=True), # pytype: disable=wrong-keyword-args
losses=maskrcnn_config.Losses(l2_weight_decay=0.00004),
train_data=maskrcnn_config.DataConfig(
input_path=os.path.join(maskrcnn_config.COCO_INPUT_PATH_BASE,
'train*'),
is_training=True,
global_batch_size=global_batch_size,
parser=maskrcnn_config.Parser(
aug_rand_hflip=True, aug_scale_min=0.8, aug_scale_max=1.25)),
validation_data=maskrcnn_config.DataConfig(
input_path=os.path.join(maskrcnn_config.COCO_INPUT_PATH_BASE,
'val*'),
is_training=False,
global_batch_size=8)), # pytype: disable=wrong-keyword-args
trainer=cfg.TrainerConfig(
train_steps=22500,
validation_steps=coco_val_samples // 8,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [15000, 20000],
'values': [0.12, 0.012, 0.0012],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 500,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('deep_mask_head_rcnn_spinenet_coco')
def deep_mask_head_rcnn_spinenet_coco() -> cfg.ExperimentConfig:
"""COCO object detection with Mask R-CNN with SpineNet backbone."""
steps_per_epoch = 463
coco_val_samples = 5000
train_batch_size = 256
eval_batch_size = 8
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=DeepMaskHeadRCNNTask(
annotation_file=os.path.join(maskrcnn_config.COCO_INPUT_PATH_BASE,
'instances_val2017.json'), # pytype: disable=wrong-keyword-args
model=DeepMaskHeadRCNN(
backbone=backbones.Backbone(
type='spinenet',
spinenet=backbones.SpineNet(
model_id='49',
min_level=3,
max_level=7,
)),
decoder=decoders.Decoder(
type='identity', identity=decoders.Identity()),
anchor=maskrcnn_config.Anchor(anchor_size=3),
norm_activation=common.NormActivation(use_sync_bn=True),
num_classes=91,
input_size=[640, 640, 3],
min_level=3,
max_level=7,
include_mask=True), # pytype: disable=wrong-keyword-args
losses=maskrcnn_config.Losses(l2_weight_decay=0.00004),
train_data=maskrcnn_config.DataConfig(
input_path=os.path.join(maskrcnn_config.COCO_INPUT_PATH_BASE,
'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=maskrcnn_config.Parser(
aug_rand_hflip=True, aug_scale_min=0.5, aug_scale_max=2.0)),
validation_data=maskrcnn_config.DataConfig(
input_path=os.path.join(maskrcnn_config.COCO_INPUT_PATH_BASE,
'val*'),
is_training=False,
global_batch_size=eval_batch_size,
drop_remainder=False)), # pytype: disable=wrong-keyword-args
trainer=cfg.TrainerConfig(
train_steps=steps_per_epoch * 350,
validation_steps=coco_val_samples // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
steps_per_epoch * 320, steps_per_epoch * 340
],
'values': [0.32, 0.032, 0.0032],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 2000,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
'task.model.min_level == task.model.backbone.spinenet.min_level',
'task.model.max_level == task.model.backbone.spinenet.max_level',
])
return config
| 8,024 | 39.326633 | 115 | py |
models | models-master/official/projects/deepmac_maskrcnn/configs/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/deepmac_maskrcnn/configs/deep_mask_head_rcnn_config_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Check that the config is set correctly."""
import tensorflow as tf
from official.projects.deepmac_maskrcnn.configs import deep_mask_head_rcnn
class DeepMaskHeadRcnnConfigTest(tf.test.TestCase):
def test_config(self):
config = deep_mask_head_rcnn.deep_mask_head_rcnn_resnetfpn_coco()
self.assertIsInstance(config.task, deep_mask_head_rcnn.DeepMaskHeadRCNNTask)
def test_config_spinenet(self):
config = deep_mask_head_rcnn.deep_mask_head_rcnn_spinenet_coco()
self.assertIsInstance(config.task, deep_mask_head_rcnn.DeepMaskHeadRCNNTask)
if __name__ == '__main__':
tf.test.main()
| 1,218 | 33.828571 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.