repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/official/nlp/data/tagging_data_lib_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.data.tagging_data_lib."""
import os
import random
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.data import tagging_data_lib
from official.nlp.tools import tokenization
def _create_fake_file(filename, labels, is_test):
def write_one_sentence(writer, length):
for _ in range(length):
line = "hiworld"
if not is_test:
line += "\t%s" % (labels[random.randint(0, len(labels) - 1)])
writer.write(line + "\n")
# Writes two sentences with length of 3 and 12 respectively.
with tf.io.gfile.GFile(filename, "w") as writer:
write_one_sentence(writer, 3)
writer.write("\n")
write_one_sentence(writer, 12)
class TaggingDataLibTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(TaggingDataLibTest, self).setUp()
self.processors = {
"panx": tagging_data_lib.PanxProcessor,
"udpos": tagging_data_lib.UdposProcessor,
}
self.vocab_file = os.path.join(self.get_temp_dir(), "vocab.txt")
with tf.io.gfile.GFile(self.vocab_file, "w") as writer:
writer.write("\n".join(["[CLS]", "[SEP]", "hi", "##world", "[UNK]"]))
@parameterized.parameters(
{"task_type": "panx"},
{"task_type": "udpos"},
)
def test_generate_tf_record(self, task_type):
processor = self.processors[task_type]()
input_data_dir = os.path.join(self.get_temp_dir(), task_type)
tf.io.gfile.mkdir(input_data_dir)
# Write fake train file.
_create_fake_file(
os.path.join(input_data_dir, "train-en.tsv"),
processor.get_labels(),
is_test=False)
# Write fake dev file.
_create_fake_file(
os.path.join(input_data_dir, "dev-en.tsv"),
processor.get_labels(),
is_test=False)
# Write fake test files.
for lang in processor.supported_languages:
_create_fake_file(
os.path.join(input_data_dir, "test-%s.tsv" % lang),
processor.get_labels(),
is_test=True)
output_path = os.path.join(self.get_temp_dir(), task_type, "output")
tokenizer = tokenization.FullTokenizer(
vocab_file=self.vocab_file, do_lower_case=True)
metadata = tagging_data_lib.generate_tf_record_from_data_file(
processor,
input_data_dir,
tokenizer,
max_seq_length=8,
train_data_output_path=os.path.join(output_path, "train.tfrecord"),
eval_data_output_path=os.path.join(output_path, "eval.tfrecord"),
test_data_output_path=os.path.join(output_path, "test_{}.tfrecord"),
text_preprocessing=tokenization.convert_to_unicode)
self.assertEqual(metadata["train_data_size"], 5)
files = tf.io.gfile.glob(output_path + "/*")
expected_files = []
expected_files.append(os.path.join(output_path, "train.tfrecord"))
expected_files.append(os.path.join(output_path, "eval.tfrecord"))
for lang in processor.supported_languages:
expected_files.append(
os.path.join(output_path, "test_%s.tfrecord" % lang))
self.assertCountEqual(files, expected_files)
if __name__ == "__main__":
tf.test.main()
| 3,737 | 33.293578 | 76 | py |
models | models-master/official/nlp/data/question_answering_dataloader.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loads dataset for the question answering (e.g, SQuAD) task."""
import dataclasses
from typing import Mapping, Optional
import tensorflow as tf
from official.common import dataset_fn
from official.core import config_definitions as cfg
from official.core import input_reader
from official.nlp.data import data_loader
from official.nlp.data import data_loader_factory
@dataclasses.dataclass
class QADataConfig(cfg.DataConfig):
"""Data config for question answering task (tasks/question_answering)."""
# For training, `input_path` is expected to be a pre-processed TFRecord file,
# while for evaluation, it is expected to be a raw JSON file (b/173814590).
input_path: str = ''
global_batch_size: int = 48
is_training: bool = True
seq_length: int = 384
# Settings below are question answering specific.
version_2_with_negative: bool = False
# Settings below are only used for eval mode.
input_preprocessed_data_path: str = ''
doc_stride: int = 128
query_length: int = 64
# The path to the vocab file of word piece tokenizer or the
# model of the sentence piece tokenizer.
vocab_file: str = ''
tokenization: str = 'WordPiece' # WordPiece or SentencePiece
do_lower_case: bool = True
xlnet_format: bool = False
file_type: str = 'tfrecord'
@data_loader_factory.register_data_loader_cls(QADataConfig)
class QuestionAnsweringDataLoader(data_loader.DataLoader):
"""A class to load dataset for sentence prediction (classification) task."""
def __init__(self, params):
self._params = params
self._seq_length = params.seq_length
self._is_training = params.is_training
self._xlnet_format = params.xlnet_format
def _decode(self, record: tf.Tensor):
"""Decodes a serialized tf.Example."""
name_to_features = {
'input_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64),
'input_mask': tf.io.FixedLenFeature([self._seq_length], tf.int64),
'segment_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64),
}
if self._xlnet_format:
name_to_features['class_index'] = tf.io.FixedLenFeature([], tf.int64)
name_to_features['paragraph_mask'] = tf.io.FixedLenFeature(
[self._seq_length], tf.int64)
if self._is_training:
name_to_features['is_impossible'] = tf.io.FixedLenFeature([], tf.int64)
if self._is_training:
name_to_features['start_positions'] = tf.io.FixedLenFeature([], tf.int64)
name_to_features['end_positions'] = tf.io.FixedLenFeature([], tf.int64)
else:
name_to_features['unique_ids'] = tf.io.FixedLenFeature([], tf.int64)
example = tf.io.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in example:
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example
def _parse(self, record: Mapping[str, tf.Tensor]):
"""Parses raw tensors into a dict of tensors to be consumed by the model."""
x, y = {}, {}
for name, tensor in record.items():
if name in ('start_positions', 'end_positions', 'is_impossible'):
y[name] = tensor
elif name == 'input_ids':
x['input_word_ids'] = tensor
elif name == 'segment_ids':
x['input_type_ids'] = tensor
else:
x[name] = tensor
if name == 'start_positions' and self._xlnet_format:
x[name] = tensor
return (x, y)
def load(self, input_context: Optional[tf.distribute.InputContext] = None):
"""Returns a tf.dataset.Dataset."""
reader = input_reader.InputReader(
params=self._params,
dataset_fn=dataset_fn.pick_dataset_fn(self._params.file_type),
decoder_fn=self._decode,
parser_fn=self._parse)
return reader.read(input_context)
| 4,464 | 37.491379 | 80 | py |
models | models-master/official/nlp/data/tagging_data_lib.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library to process data for tagging task such as NER/POS."""
import collections
import os
from absl import logging
import tensorflow as tf
from official.nlp.data import classifier_data_lib
from official.nlp.tools import tokenization
# A negative label id for the padding label, which will not contribute
# to loss/metrics in training.
_PADDING_LABEL_ID = -1
# The special unknown token, used to substitute a word which has too many
# subwords after tokenization.
_UNK_TOKEN = "[UNK]"
class InputExample(object):
"""A single training/test example for token classification."""
def __init__(self,
sentence_id,
sub_sentence_id=0,
words=None,
label_ids=None):
"""Constructs an InputExample."""
self.sentence_id = sentence_id
self.sub_sentence_id = sub_sentence_id
self.words = words if words else []
self.label_ids = label_ids if label_ids else []
def add_word_and_label_id(self, word, label_id):
"""Adds word and label_id pair in the example."""
self.words.append(word)
self.label_ids.append(label_id)
def _read_one_file(file_name, label_list):
"""Reads one file and returns a list of `InputExample` instances."""
lines = tf.io.gfile.GFile(file_name, "r").readlines()
examples = []
label_id_map = {label: i for i, label in enumerate(label_list)}
sentence_id = 0
example = InputExample(sentence_id=0)
for line in lines:
line = line.strip("\n")
if line:
# The format is: <token>\t<label> for train/dev set and <token> for test.
items = line.split("\t")
assert len(items) == 2 or len(items) == 1
token = items[0].strip()
# Assign a dummy label_id for test set
label_id = label_id_map[items[1].strip()] if len(items) == 2 else 0
example.add_word_and_label_id(token, label_id)
else:
# Empty line indicates a new sentence.
if example.words:
examples.append(example)
sentence_id += 1
example = InputExample(sentence_id=sentence_id)
if example.words:
examples.append(example)
return examples
class PanxProcessor(classifier_data_lib.DataProcessor):
"""Processor for the Panx data set."""
supported_languages = [
"ar", "he", "vi", "id", "jv", "ms", "tl", "eu", "ml", "ta", "te", "af",
"nl", "en", "de", "el", "bn", "hi", "mr", "ur", "fa", "fr", "it", "pt",
"es", "bg", "ru", "ja", "ka", "ko", "th", "sw", "yo", "my", "zh", "kk",
"tr", "et", "fi", "hu"
]
def __init__(self,
process_text_fn=tokenization.convert_to_unicode,
only_use_en_train=True,
only_use_en_dev=True):
"""See base class.
Args:
process_text_fn: See base class.
only_use_en_train: If True, only use english training data. Otherwise, use
training data from all languages.
only_use_en_dev: If True, only use english dev data. Otherwise, use dev
data from all languages.
"""
super(PanxProcessor, self).__init__(process_text_fn)
self.only_use_en_train = only_use_en_train
self.only_use_en_dev = only_use_en_dev
def get_train_examples(self, data_dir):
examples = _read_one_file(
os.path.join(data_dir, "train-en.tsv"), self.get_labels())
if not self.only_use_en_train:
for language in self.supported_languages:
if language == "en":
continue
examples.extend(
_read_one_file(
os.path.join(data_dir, f"train-{language}.tsv"),
self.get_labels()))
return examples
def get_dev_examples(self, data_dir):
examples = _read_one_file(
os.path.join(data_dir, "dev-en.tsv"), self.get_labels())
if not self.only_use_en_dev:
for language in self.supported_languages:
if language == "en":
continue
examples.extend(
_read_one_file(
os.path.join(data_dir, f"dev-{language}.tsv"),
self.get_labels()))
return examples
def get_test_examples(self, data_dir):
examples_dict = {}
for language in self.supported_languages:
examples_dict[language] = _read_one_file(
os.path.join(data_dir, "test-%s.tsv" % language), self.get_labels())
return examples_dict
def get_labels(self):
return ["O", "B-PER", "I-PER", "B-LOC", "I-LOC", "B-ORG", "I-ORG"]
@staticmethod
def get_processor_name():
return "panx"
class UdposProcessor(classifier_data_lib.DataProcessor):
"""Processor for the Udpos data set."""
supported_languages = [
"af", "ar", "bg", "de", "el", "en", "es", "et", "eu", "fa", "fi", "fr",
"he", "hi", "hu", "id", "it", "ja", "kk", "ko", "mr", "nl", "pt", "ru",
"ta", "te", "th", "tl", "tr", "ur", "vi", "yo", "zh"
]
def __init__(self,
process_text_fn=tokenization.convert_to_unicode,
only_use_en_train=True,
only_use_en_dev=True):
"""See base class.
Args:
process_text_fn: See base class.
only_use_en_train: If True, only use english training data. Otherwise, use
training data from all languages.
only_use_en_dev: If True, only use english dev data. Otherwise, use dev
data from all languages.
"""
super(UdposProcessor, self).__init__(process_text_fn)
self.only_use_en_train = only_use_en_train
self.only_use_en_dev = only_use_en_dev
def get_train_examples(self, data_dir):
if self.only_use_en_train:
examples = _read_one_file(
os.path.join(data_dir, "train-en.tsv"), self.get_labels())
else:
examples = []
# Uses glob because some languages are missing in train.
for filepath in tf.io.gfile.glob(os.path.join(data_dir, "train-*.tsv")):
examples.extend(
_read_one_file(
filepath,
self.get_labels()))
return examples
def get_dev_examples(self, data_dir):
if self.only_use_en_dev:
examples = _read_one_file(
os.path.join(data_dir, "dev-en.tsv"), self.get_labels())
else:
examples = []
for filepath in tf.io.gfile.glob(os.path.join(data_dir, "dev-*.tsv")):
examples.extend(
_read_one_file(
filepath,
self.get_labels()))
return examples
def get_test_examples(self, data_dir):
examples_dict = {}
for language in self.supported_languages:
examples_dict[language] = _read_one_file(
os.path.join(data_dir, "test-%s.tsv" % language), self.get_labels())
return examples_dict
def get_labels(self):
return [
"ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM",
"PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"
]
@staticmethod
def get_processor_name():
return "udpos"
def _tokenize_example(example, max_length, tokenizer, text_preprocessing=None):
"""Tokenizes words and breaks long example into short ones."""
# Needs additional [CLS] and [SEP] tokens.
max_length = max_length - 2
new_examples = []
new_example = InputExample(sentence_id=example.sentence_id, sub_sentence_id=0)
if any([x < 0 for x in example.label_ids]):
raise ValueError("Unexpected negative label_id: %s" % example.label_ids)
for i, word in enumerate(example.words):
if text_preprocessing:
word = text_preprocessing(word)
subwords = tokenizer.tokenize(word)
if (not subwords or len(subwords) > max_length) and word:
subwords = [_UNK_TOKEN]
if len(subwords) + len(new_example.words) > max_length:
# Start a new example.
new_examples.append(new_example)
last_sub_sentence_id = new_example.sub_sentence_id
new_example = InputExample(
sentence_id=example.sentence_id,
sub_sentence_id=last_sub_sentence_id + 1)
for j, subword in enumerate(subwords):
# Use the real label for the first subword, and pad label for
# the remainings.
subword_label = example.label_ids[i] if j == 0 else _PADDING_LABEL_ID
new_example.add_word_and_label_id(subword, subword_label)
if new_example.words:
new_examples.append(new_example)
return new_examples
def _convert_single_example(example, max_seq_length, tokenizer):
"""Converts an `InputExample` instance to a `tf.train.Example` instance."""
tokens = ["[CLS]"]
tokens.extend(example.words)
tokens.append("[SEP]")
input_ids = tokenizer.convert_tokens_to_ids(tokens)
label_ids = [_PADDING_LABEL_ID]
label_ids.extend(example.label_ids)
label_ids.append(_PADDING_LABEL_ID)
segment_ids = [0] * len(input_ids)
input_mask = [1] * len(input_ids)
# Pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(_PADDING_LABEL_ID)
def create_int_feature(values):
return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(input_ids)
features["input_mask"] = create_int_feature(input_mask)
features["segment_ids"] = create_int_feature(segment_ids)
features["label_ids"] = create_int_feature(label_ids)
features["sentence_id"] = create_int_feature([example.sentence_id])
features["sub_sentence_id"] = create_int_feature([example.sub_sentence_id])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
return tf_example
def write_example_to_file(examples,
tokenizer,
max_seq_length,
output_file,
text_preprocessing=None):
"""Writes `InputExample`s into a tfrecord file with `tf.train.Example` protos.
Note that the words inside each example will be tokenized and be applied by
`text_preprocessing` if available. Also, if the length of sentence (plus
special [CLS] and [SEP] tokens) exceeds `max_seq_length`, the long sentence
will be broken into multiple short examples. For example:
Example (text_preprocessing=lowercase, max_seq_length=5)
words: ["What", "a", "great", "weekend"]
labels: [ 7, 5, 9, 10]
sentence_id: 0
preprocessed: ["what", "a", "great", "weekend"]
tokenized: ["what", "a", "great", "week", "##end"]
will result in two tf.example protos:
tokens: ["[CLS]", "what", "a", "great", "[SEP]"]
label_ids: [-1, 7, 5, 9, -1]
input_mask: [ 1, 1, 1, 1, 1]
segment_ids: [ 0, 0, 0, 0, 0]
input_ids: [ tokenizer.convert_tokens_to_ids(tokens) ]
sentence_id: 0
tokens: ["[CLS]", "week", "##end", "[SEP]", "[PAD]"]
label_ids: [-1, 10, -1, -1, -1]
input_mask: [ 1, 1, 1, 0, 0]
segment_ids: [ 0, 0, 0, 0, 0]
input_ids: [ tokenizer.convert_tokens_to_ids(tokens) ]
sentence_id: 0
Note the use of -1 in `label_ids` to indicate that a token should not be
considered for classification (e.g., trailing ## wordpieces or special
token). Token classification models should accordingly ignore these when
calculating loss, metrics, etc...
Args:
examples: A list of `InputExample` instances.
tokenizer: The tokenizer to be applied on the data.
max_seq_length: Maximum length of generated sequences.
output_file: The name of the output tfrecord file.
text_preprocessing: optional preprocessing run on each word prior to
tokenization.
Returns:
The total number of tf.train.Example proto written to file.
"""
tf.io.gfile.makedirs(os.path.dirname(output_file))
writer = tf.io.TFRecordWriter(output_file)
num_tokenized_examples = 0
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logging.info("Writing example %d of %d to %s", ex_index, len(examples),
output_file)
tokenized_examples = _tokenize_example(example, max_seq_length, tokenizer,
text_preprocessing)
num_tokenized_examples += len(tokenized_examples)
for per_tokenized_example in tokenized_examples:
tf_example = _convert_single_example(per_tokenized_example,
max_seq_length, tokenizer)
writer.write(tf_example.SerializeToString())
writer.close()
return num_tokenized_examples
def token_classification_meta_data(train_data_size,
max_seq_length,
num_labels,
eval_data_size=None,
test_data_size=None,
label_list=None,
processor_type=None):
"""Creates metadata for tagging (token classification) datasets."""
meta_data = {
"train_data_size": train_data_size,
"max_seq_length": max_seq_length,
"num_labels": num_labels,
"task_type": "tagging",
"label_type": "int",
"label_shape": [max_seq_length],
}
if eval_data_size:
meta_data["eval_data_size"] = eval_data_size
if test_data_size:
meta_data["test_data_size"] = test_data_size
if label_list:
meta_data["label_list"] = label_list
if processor_type:
meta_data["processor_type"] = processor_type
return meta_data
def generate_tf_record_from_data_file(processor, data_dir, tokenizer,
max_seq_length, train_data_output_path,
eval_data_output_path,
test_data_output_path,
text_preprocessing):
"""Generates tfrecord files from the raw data."""
common_kwargs = dict(
tokenizer=tokenizer,
max_seq_length=max_seq_length,
text_preprocessing=text_preprocessing)
train_examples = processor.get_train_examples(data_dir)
train_data_size = write_example_to_file(
train_examples, output_file=train_data_output_path, **common_kwargs)
eval_examples = processor.get_dev_examples(data_dir)
eval_data_size = write_example_to_file(
eval_examples, output_file=eval_data_output_path, **common_kwargs)
test_input_data_examples = processor.get_test_examples(data_dir)
test_data_size = {}
for language, examples in test_input_data_examples.items():
test_data_size[language] = write_example_to_file(
examples,
output_file=test_data_output_path.format(language),
**common_kwargs)
labels = processor.get_labels()
meta_data = token_classification_meta_data(
train_data_size,
max_seq_length,
len(labels),
eval_data_size,
test_data_size,
label_list=labels,
processor_type=processor.get_processor_name())
return meta_data
| 15,489 | 35.276347 | 80 | py |
models | models-master/official/nlp/data/sentence_prediction_dataloader.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loads dataset for the sentence prediction (classification) task."""
import dataclasses
import functools
from typing import List, Mapping, Optional, Tuple
import tensorflow as tf
import tensorflow_hub as hub
from official.common import dataset_fn
from official.core import config_definitions as cfg
from official.core import input_reader
from official.nlp import modeling
from official.nlp.data import data_loader
from official.nlp.data import data_loader_factory
LABEL_TYPES_MAP = {'int': tf.int64, 'float': tf.float32}
@dataclasses.dataclass
class SentencePredictionDataConfig(cfg.DataConfig):
"""Data config for sentence prediction task (tasks/sentence_prediction)."""
input_path: str = ''
global_batch_size: int = 32
is_training: bool = True
seq_length: int = 128
label_type: str = 'int'
# Whether to include the example id number.
include_example_id: bool = False
label_field: str = 'label_ids'
# Maps the key in TfExample to feature name.
# E.g 'label_ids' to 'next_sentence_labels'
label_name: Optional[Tuple[str, str]] = None
# Either tfrecord, sstable, or recordio.
file_type: str = 'tfrecord'
@data_loader_factory.register_data_loader_cls(SentencePredictionDataConfig)
class SentencePredictionDataLoader(data_loader.DataLoader):
"""A class to load dataset for sentence prediction (classification) task."""
def __init__(self, params):
self._params = params
self._seq_length = params.seq_length
self._include_example_id = params.include_example_id
self._label_field = params.label_field
if params.label_name:
self._label_name_mapping = dict([params.label_name])
else:
self._label_name_mapping = dict()
def name_to_features_spec(self):
"""Defines features to decode. Subclass may override to append features."""
label_type = LABEL_TYPES_MAP[self._params.label_type]
name_to_features = {
'input_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64),
'input_mask': tf.io.FixedLenFeature([self._seq_length], tf.int64),
'segment_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64),
self._label_field: tf.io.FixedLenFeature([], label_type),
}
if self._include_example_id:
name_to_features['example_id'] = tf.io.FixedLenFeature([], tf.int64)
return name_to_features
def _decode(self, record: tf.Tensor):
"""Decodes a serialized tf.Example."""
example = tf.io.parse_single_example(record, self.name_to_features_spec())
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in example:
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example
def _parse(self, record: Mapping[str, tf.Tensor]):
"""Parses raw tensors into a dict of tensors to be consumed by the model."""
key_mapping = {
'input_ids': 'input_word_ids',
'input_mask': 'input_mask',
'segment_ids': 'input_type_ids'
}
ret = {}
for record_key in record:
if record_key in key_mapping:
ret[key_mapping[record_key]] = record[record_key]
else:
ret[record_key] = record[record_key]
if self._label_field in self._label_name_mapping:
ret[self._label_name_mapping[self._label_field]] = record[
self._label_field]
return ret
def load(self, input_context: Optional[tf.distribute.InputContext] = None):
"""Returns a tf.dataset.Dataset."""
reader = input_reader.InputReader(
dataset_fn=dataset_fn.pick_dataset_fn(self._params.file_type),
params=self._params,
decoder_fn=self._decode,
parser_fn=self._parse)
return reader.read(input_context)
@dataclasses.dataclass
class SentencePredictionTextDataConfig(cfg.DataConfig):
"""Data config for sentence prediction task with raw text."""
# Either set `input_path`...
input_path: str = ''
# Either `int` or `float`.
label_type: str = 'int'
# ...or `tfds_name` and `tfds_split` to specify input.
tfds_name: str = ''
tfds_split: str = ''
# The name of the text feature fields. The text features will be
# concatenated in order.
text_fields: Optional[List[str]] = None
label_field: str = 'label'
global_batch_size: int = 32
seq_length: int = 128
is_training: bool = True
# Either build preprocessing with Python code by specifying these values
# for modeling.layers.BertTokenizer()/SentencepieceTokenizer()....
tokenization: str = 'WordPiece' # WordPiece or SentencePiece
# Text vocab file if tokenization is WordPiece, or sentencepiece.ModelProto
# file if tokenization is SentencePiece.
vocab_file: str = ''
lower_case: bool = True
# ...or load preprocessing from a SavedModel at this location.
preprocessing_hub_module_url: str = ''
# Either tfrecord or sstsable or recordio.
file_type: str = 'tfrecord'
include_example_id: bool = False
class TextProcessor(tf.Module):
"""Text features processing for sentence prediction task."""
def __init__(self,
seq_length: int,
vocab_file: Optional[str] = None,
tokenization: Optional[str] = None,
lower_case: Optional[bool] = True,
preprocessing_hub_module_url: Optional[str] = None):
if preprocessing_hub_module_url:
self._preprocessing_hub_module = hub.load(preprocessing_hub_module_url)
self._tokenizer = self._preprocessing_hub_module.tokenize
self._pack_inputs = functools.partial(
self._preprocessing_hub_module.bert_pack_inputs,
seq_length=seq_length)
return
if tokenization == 'WordPiece':
self._tokenizer = modeling.layers.BertTokenizer(
vocab_file=vocab_file, lower_case=lower_case)
elif tokenization == 'SentencePiece':
self._tokenizer = modeling.layers.SentencepieceTokenizer(
model_file_path=vocab_file,
lower_case=lower_case,
strip_diacritics=True) # Strip diacritics to follow ALBERT model
else:
raise ValueError('Unsupported tokenization: %s' % tokenization)
self._pack_inputs = modeling.layers.BertPackInputs(
seq_length=seq_length,
special_tokens_dict=self._tokenizer.get_special_tokens_dict())
def __call__(self, segments):
segments = [self._tokenizer(s) for s in segments]
# BertTokenizer returns a RaggedTensor with shape [batch, word, subword],
# and SentencepieceTokenizer returns a RaggedTensor with shape
# [batch, sentencepiece],
segments = [
tf.cast(x.merge_dims(1, -1) if x.shape.rank > 2 else x, tf.int32)
for x in segments
]
return self._pack_inputs(segments)
@data_loader_factory.register_data_loader_cls(SentencePredictionTextDataConfig)
class SentencePredictionTextDataLoader(data_loader.DataLoader):
"""Loads dataset with raw text for sentence prediction task."""
def __init__(self, params):
if bool(params.tfds_name) != bool(params.tfds_split):
raise ValueError('`tfds_name` and `tfds_split` should be specified or '
'unspecified at the same time.')
if bool(params.tfds_name) == bool(params.input_path):
raise ValueError('Must specify either `tfds_name` and `tfds_split` '
'or `input_path`.')
if not params.text_fields:
raise ValueError('Unexpected empty text fields.')
if bool(params.vocab_file) == bool(params.preprocessing_hub_module_url):
raise ValueError('Must specify exactly one of vocab_file (with matching '
'lower_case flag) or preprocessing_hub_module_url.')
self._params = params
self._text_fields = params.text_fields
self._label_field = params.label_field
self._label_type = params.label_type
self._include_example_id = params.include_example_id
self._text_processor = TextProcessor(
seq_length=params.seq_length,
vocab_file=params.vocab_file,
tokenization=params.tokenization,
lower_case=params.lower_case,
preprocessing_hub_module_url=params.preprocessing_hub_module_url)
def _bert_preprocess(self, record: Mapping[str, tf.Tensor]):
"""Berts preprocess."""
segments = [record[x] for x in self._text_fields]
model_inputs = self._text_processor(segments)
for key in record:
if key not in self._text_fields:
model_inputs[key] = record[key]
return model_inputs
def name_to_features_spec(self):
name_to_features = {}
for text_field in self._text_fields:
name_to_features[text_field] = tf.io.FixedLenFeature([], tf.string)
label_type = LABEL_TYPES_MAP[self._label_type]
name_to_features[self._label_field] = tf.io.FixedLenFeature([], label_type)
if self._include_example_id:
name_to_features['example_id'] = tf.io.FixedLenFeature([], tf.int64)
return name_to_features
def _decode(self, record: tf.Tensor):
"""Decodes a serialized tf.Example."""
example = tf.io.parse_single_example(record, self.name_to_features_spec())
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in example:
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example
def load(self, input_context: Optional[tf.distribute.InputContext] = None):
"""Returns a tf.dataset.Dataset."""
reader = input_reader.InputReader(
dataset_fn=dataset_fn.pick_dataset_fn(self._params.file_type),
decoder_fn=self._decode if self._params.input_path else None,
params=self._params,
postprocess_fn=self._bert_preprocess)
return reader.read(input_context)
| 10,337 | 37.574627 | 80 | py |
models | models-master/official/nlp/data/squad_lib.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library to process data for SQuAD 1.1 and SQuAD 2.0."""
# pylint: disable=g-bad-import-order
import collections
import copy
import json
import math
import os
import six
from absl import logging
import tensorflow as tf
from official.nlp.tools import tokenization
class SquadExample(object):
"""A single training/test example for simple sequence classification.
For examples without an answer, the start and end position are -1.
Attributes:
qas_id: ID of the question-answer pair.
question_text: Original text for the question.
doc_tokens: The list of tokens in the context obtained by splitting on
whitespace only.
orig_answer_text: Original text for the answer.
start_position: Starting index of the answer in `doc_tokens`.
end_position: Ending index of the answer in `doc_tokens`.
is_impossible: Whether the question is impossible to answer given the
context. Only used in SQuAD 2.0.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
s += ", question_text: %s" % (
tokenization.printable_text(self.question_text))
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
if self.start_position:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
paragraph_mask=None,
class_index=None,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
self.paragraph_mask = paragraph_mask
self.class_index = class_index
class FeatureWriter(object):
"""Writes InputFeature to TF example file."""
def __init__(self, filename, is_training):
self.filename = filename
self.is_training = is_training
self.num_features = 0
tf.io.gfile.makedirs(os.path.dirname(filename))
self._writer = tf.io.TFRecordWriter(filename)
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return feature
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
if feature.paragraph_mask is not None:
features["paragraph_mask"] = create_int_feature(feature.paragraph_mask)
if feature.class_index is not None:
features["class_index"] = create_int_feature([feature.class_index])
if self.is_training:
features["start_positions"] = create_int_feature([feature.start_position])
features["end_positions"] = create_int_feature([feature.end_position])
impossible = 0
if feature.is_impossible:
impossible = 1
features["is_impossible"] = create_int_feature([impossible])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
self._writer.write(tf_example.SerializeToString())
def close(self):
self._writer.close()
def read_squad_examples(input_file, is_training,
version_2_with_negative,
translated_input_folder=None):
"""Read a SQuAD json file into a list of SquadExample."""
with tf.io.gfile.GFile(input_file, "r") as reader:
input_data = json.load(reader)["data"]
if translated_input_folder is not None:
translated_files = tf.io.gfile.glob(
os.path.join(translated_input_folder, "*.json"))
for file in translated_files:
with tf.io.gfile.GFile(file, "r") as reader:
input_data.extend(json.load(reader)["data"])
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if version_2_with_negative:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length -
1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(doc_tokens[start_position:(end_position +
1)])
cleaned_answer_text = " ".join(
tokenization.whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
logging.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
return examples
def convert_examples_to_features(examples,
tokenizer,
max_seq_length,
doc_stride,
max_query_length,
is_training,
output_fn,
xlnet_format=False,
batch_size=None):
"""Loads a data file into a list of `InputBatch`s."""
base_id = 1000000000
unique_id = base_id
feature = None
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
# Paragraph mask used in XLNet.
# 1 represents paragraph and class tokens.
# 0 represents query and other special tokens.
paragraph_mask = []
# pylint: disable=cell-var-from-loop
def process_query(seg_q):
for token in query_tokens:
tokens.append(token)
segment_ids.append(seg_q)
paragraph_mask.append(0)
tokens.append("[SEP]")
segment_ids.append(seg_q)
paragraph_mask.append(0)
def process_paragraph(seg_p):
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(seg_p)
paragraph_mask.append(1)
tokens.append("[SEP]")
segment_ids.append(seg_p)
paragraph_mask.append(0)
def process_class(seg_class):
class_index = len(segment_ids)
tokens.append("[CLS]")
segment_ids.append(seg_class)
paragraph_mask.append(1)
return class_index
if xlnet_format:
seg_p, seg_q, seg_class, seg_pad = 0, 1, 2, 3
process_paragraph(seg_p)
process_query(seg_q)
class_index = process_class(seg_class)
else:
seg_p, seg_q, seg_class, seg_pad = 1, 0, 0, 0
class_index = process_class(seg_class)
process_query(seg_q)
process_paragraph(seg_p)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(seg_pad)
paragraph_mask.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(paragraph_mask) == max_seq_length
start_position = 0
end_position = 0
span_contains_answer = False
if is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
span_contains_answer = (tok_start_position >= doc_start and
tok_end_position <= doc_end)
if span_contains_answer:
doc_offset = 0 if xlnet_format else len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if example_index < 20:
logging.info("*** Example ***")
logging.info("unique_id: %s", (unique_id))
logging.info("example_index: %s", (example_index))
logging.info("doc_span_index: %s", (doc_span_index))
logging.info("tokens: %s",
" ".join([tokenization.printable_text(x) for x in tokens]))
logging.info(
"token_to_orig_map: %s", " ".join([
"%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)
]))
logging.info(
"token_is_max_context: %s", " ".join([
"%d:%s" % (x, y)
for (x, y) in six.iteritems(token_is_max_context)
]))
logging.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logging.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logging.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logging.info("paragraph_mask: %s", " ".join(
[str(x) for x in paragraph_mask]))
logging.info("class_index: %d", class_index)
if is_training:
if span_contains_answer:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
logging.info("start_position: %d", (start_position))
logging.info("end_position: %d", (end_position))
logging.info("answer: %s", tokenization.printable_text(answer_text))
else:
logging.info("document span doesn't contain answer")
feature = InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
paragraph_mask=paragraph_mask,
class_index=class_index,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=not span_contains_answer)
# Run callback
if is_training:
output_fn(feature)
else:
output_fn(feature, is_padding=False)
unique_id += 1
if not is_training and feature:
assert batch_size
num_padding = 0
num_examples = unique_id - base_id
if unique_id % batch_size != 0:
num_padding = batch_size - (num_examples % batch_size)
logging.info("Adding padding examples to make sure no partial batch.")
logging.info("Adds %d padding examples for inference.", num_padding)
dummy_feature = copy.deepcopy(feature)
for _ in range(num_padding):
dummy_feature.unique_id = unique_id
# Run callback
output_fn(feature, is_padding=True)
unique_id += 1
return unique_id - base_id
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def write_predictions(all_examples,
all_features,
all_results,
n_best_size,
max_answer_length,
do_lower_case,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
version_2_with_negative=False,
null_score_diff_threshold=0.0,
verbose=False):
"""Write final predictions to the json file and log-odds of null if needed."""
logging.info("Writing predictions to: %s", (output_prediction_file))
logging.info("Writing nbest to: %s", (output_nbest_file))
all_predictions, all_nbest_json, scores_diff_json = (
postprocess_output(
all_examples=all_examples,
all_features=all_features,
all_results=all_results,
n_best_size=n_best_size,
max_answer_length=max_answer_length,
do_lower_case=do_lower_case,
version_2_with_negative=version_2_with_negative,
null_score_diff_threshold=null_score_diff_threshold,
verbose=verbose))
write_to_json_files(all_predictions, output_prediction_file)
write_to_json_files(all_nbest_json, output_nbest_file)
if version_2_with_negative:
write_to_json_files(scores_diff_json, output_null_log_odds_file)
def postprocess_output(all_examples,
all_features,
all_results,
n_best_size,
max_answer_length,
do_lower_case,
version_2_with_negative=False,
null_score_diff_threshold=0.0,
xlnet_format=False,
verbose=False):
"""Postprocess model output, to form predicton results."""
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
if feature.unique_id not in unique_id_to_result:
logging.info("Skip eval example %s, not in pred.", feature.unique_id)
continue
result = unique_id_to_result[feature.unique_id]
# if we could have irrelevant answers, get the min score of irrelevant
if version_2_with_negative:
if xlnet_format:
feature_null_score = result.class_logits
else:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for (start_index, start_logit,
end_index, end_logit) in _get_best_indexes_and_logits(
result=result,
n_best_size=n_best_size,
xlnet_format=xlnet_format):
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=start_logit,
end_logit=end_logit))
if version_2_with_negative and not xlnet_format:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0 or xlnet_format: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(
tok_text, orig_text, do_lower_case, verbose=verbose)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't inlude the empty option in the n-best, inlcude it
if version_2_with_negative and not xlnet_format:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="", start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if not version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# pytype: disable=attribute-error
# predict "" iff the null score - the score of best non-null > threshold
if best_non_null_entry is not None:
if xlnet_format:
score_diff = score_null
scores_diff_json[example.qas_id] = score_diff
all_predictions[example.qas_id] = best_non_null_entry.text
else:
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
else:
logging.warning("best_non_null_entry is None")
scores_diff_json[example.qas_id] = score_null
all_predictions[example.qas_id] = ""
# pytype: enable=attribute-error
all_nbest_json[example.qas_id] = nbest_json
return all_predictions, all_nbest_json, scores_diff_json
def write_to_json_files(json_records, json_file):
with tf.io.gfile.GFile(json_file, "w") as writer:
writer.write(json.dumps(json_records, indent=4) + "\n")
def get_final_text(pred_text, orig_text, do_lower_case, verbose=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose:
logging.info("Unable to find text: '%s' in '%s'", pred_text, orig_text)
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose:
logging.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose:
logging.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose:
logging.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes_and_logits(result,
n_best_size,
xlnet_format=False):
"""Generates the n-best indexes and logits from a list."""
if xlnet_format:
for i in range(n_best_size):
for j in range(n_best_size):
j_index = i * n_best_size + j
yield (result.start_indexes[i], result.start_logits[i],
result.end_indexes[j_index], result.end_logits[j_index])
else:
start_index_and_score = sorted(enumerate(result.start_logits),
key=lambda x: x[1], reverse=True)
end_index_and_score = sorted(enumerate(result.end_logits),
key=lambda x: x[1], reverse=True)
for i in range(len(start_index_and_score)):
if i >= n_best_size:
break
for j in range(len(end_index_and_score)):
if j >= n_best_size:
break
yield (start_index_and_score[i][0], start_index_and_score[i][1],
end_index_and_score[j][0], end_index_and_score[j][1])
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def generate_tf_record_from_json_file(input_file_path,
vocab_file_path,
output_path,
translated_input_folder=None,
max_seq_length=384,
do_lower_case=True,
max_query_length=64,
doc_stride=128,
version_2_with_negative=False,
xlnet_format=False):
"""Generates and saves training data into a tf record file."""
train_examples = read_squad_examples(
input_file=input_file_path,
is_training=True,
version_2_with_negative=version_2_with_negative,
translated_input_folder=translated_input_folder)
tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_file_path, do_lower_case=do_lower_case)
train_writer = FeatureWriter(filename=output_path, is_training=True)
number_of_examples = convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=True,
output_fn=train_writer.process_feature,
xlnet_format=xlnet_format)
train_writer.close()
meta_data = {
"task_type": "bert_squad",
"train_data_size": number_of_examples,
"max_seq_length": max_seq_length,
"max_query_length": max_query_length,
"doc_stride": doc_stride,
"version_2_with_negative": version_2_with_negative,
}
return meta_data
| 36,512 | 36.410861 | 80 | py |
models | models-master/official/nlp/data/tagging_dataloader.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loads dataset for the tagging (e.g., NER/POS) task."""
import dataclasses
from typing import Mapping, Optional
import tensorflow as tf
from official.common import dataset_fn
from official.core import config_definitions as cfg
from official.core import input_reader
from official.nlp.data import data_loader
from official.nlp.data import data_loader_factory
@dataclasses.dataclass
class TaggingDataConfig(cfg.DataConfig):
"""Data config for tagging (tasks/tagging)."""
is_training: bool = True
seq_length: int = 128
include_sentence_id: bool = False
file_type: str = 'tfrecord'
@data_loader_factory.register_data_loader_cls(TaggingDataConfig)
class TaggingDataLoader(data_loader.DataLoader):
"""A class to load dataset for tagging (e.g., NER and POS) task."""
def __init__(self, params: TaggingDataConfig):
self._params = params
self._seq_length = params.seq_length
self._include_sentence_id = params.include_sentence_id
def _decode(self, record: tf.Tensor):
"""Decodes a serialized tf.Example."""
name_to_features = {
'input_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64),
'input_mask': tf.io.FixedLenFeature([self._seq_length], tf.int64),
'segment_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64),
'label_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64),
}
if self._include_sentence_id:
name_to_features['sentence_id'] = tf.io.FixedLenFeature([], tf.int64)
name_to_features['sub_sentence_id'] = tf.io.FixedLenFeature([], tf.int64)
example = tf.io.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in example:
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example
def _parse(self, record: Mapping[str, tf.Tensor]):
"""Parses raw tensors into a dict of tensors to be consumed by the model."""
x = {
'input_word_ids': record['input_ids'],
'input_mask': record['input_mask'],
'input_type_ids': record['segment_ids']
}
if self._include_sentence_id:
x['sentence_id'] = record['sentence_id']
x['sub_sentence_id'] = record['sub_sentence_id']
y = record['label_ids']
return (x, y)
def load(self, input_context: Optional[tf.distribute.InputContext] = None):
"""Returns a tf.dataset.Dataset."""
reader = input_reader.InputReader(
params=self._params,
dataset_fn=dataset_fn.pick_dataset_fn(self._params.file_type),
decoder_fn=self._decode,
parser_fn=self._parse)
return reader.read(input_context)
| 3,337 | 35.681319 | 80 | py |
models | models-master/official/nlp/data/create_finetuning_data.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning task dataset generator."""
import functools
import json
import os
# Import libraries
from absl import app
from absl import flags
import tensorflow as tf
from official.nlp.data import classifier_data_lib
from official.nlp.data import sentence_retrieval_lib
# word-piece tokenizer based squad_lib
from official.nlp.data import squad_lib as squad_lib_wp
# sentence-piece tokenizer based squad_lib
from official.nlp.data import squad_lib_sp
from official.nlp.data import tagging_data_lib
from official.nlp.tools import tokenization
FLAGS = flags.FLAGS
flags.DEFINE_enum(
"fine_tuning_task_type", "classification",
["classification", "regression", "squad", "retrieval", "tagging"],
"The name of the BERT fine tuning task for which data "
"will be generated.")
# BERT classification specific flags.
flags.DEFINE_string(
"input_data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_enum(
"classification_task_name", "MNLI", [
"AX", "COLA", "IMDB", "MNLI", "MRPC", "PAWS-X", "QNLI", "QQP", "RTE",
"SST-2", "STS-B", "WNLI", "XNLI", "XTREME-XNLI", "XTREME-PAWS-X",
"AX-g", "SUPERGLUE-RTE", "CB", "BoolQ", "WIC"
], "The name of the task to train BERT classifier. The "
"difference between XTREME-XNLI and XNLI is: 1. the format "
"of input tsv files; 2. the dev set for XTREME is english "
"only and for XNLI is all languages combined. Same for "
"PAWS-X.")
# MNLI task-specific flag.
flags.DEFINE_enum("mnli_type", "matched", ["matched", "mismatched"],
"The type of MNLI dataset.")
# XNLI task-specific flag.
flags.DEFINE_string(
"xnli_language", "en",
"Language of training data for XNLI task. If the value is 'all', the data "
"of all languages will be used for training.")
# PAWS-X task-specific flag.
flags.DEFINE_string(
"pawsx_language", "en",
"Language of training data for PAWS-X task. If the value is 'all', the data "
"of all languages will be used for training.")
# XTREME classification specific flags. Only used in XtremePawsx and XtremeXnli.
flags.DEFINE_string(
"translated_input_data_dir", None,
"The translated input data dir. Should contain the .tsv files (or other "
"data files) for the task.")
# Retrieval task-specific flags.
flags.DEFINE_enum("retrieval_task_name", "bucc", ["bucc", "tatoeba"],
"The name of sentence retrieval task for scoring")
# Tagging task-specific flags.
flags.DEFINE_enum("tagging_task_name", "panx", ["panx", "udpos"],
"The name of BERT tagging (token classification) task.")
flags.DEFINE_bool("tagging_only_use_en_train", True,
"Whether only use english training data in tagging.")
# BERT Squad task-specific flags.
flags.DEFINE_string(
"squad_data_file", None,
"The input data file in for generating training data for BERT squad task.")
flags.DEFINE_string(
"translated_squad_data_folder", None,
"The translated data folder for generating training data for BERT squad "
"task.")
flags.DEFINE_integer(
"doc_stride", 128,
"When splitting up a long document into chunks, how much stride to "
"take between chunks.")
flags.DEFINE_integer(
"max_query_length", 64,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool(
"version_2_with_negative", False,
"If true, the SQuAD examples contain some that do not have an answer.")
flags.DEFINE_bool(
"xlnet_format", False,
"If true, then data will be preprocessed in a paragraph, query, class order"
" instead of the BERT-style class, paragraph, query order.")
# XTREME specific flags.
flags.DEFINE_bool("only_use_en_dev", True, "Whether only use english dev data.")
# Shared flags across BERT fine-tuning tasks.
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"train_data_output_path", None,
"The path in which generated training input data will be written as tf"
" records.")
flags.DEFINE_string(
"eval_data_output_path", None,
"The path in which generated evaluation input data will be written as tf"
" records.")
flags.DEFINE_string(
"test_data_output_path", None,
"The path in which generated test input data will be written as tf"
" records. If None, do not generate test data. Must be a pattern template"
" as test_{}.tfrecords if processor has language specific test data.")
flags.DEFINE_string("meta_data_file_path", None,
"The path in which input meta data will be written.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_string("sp_model_file", "",
"The path to the model used by sentence piece tokenizer.")
flags.DEFINE_enum(
"tokenization", "WordPiece", ["WordPiece", "SentencePiece"],
"Specifies the tokenizer implementation, i.e., whether to use WordPiece "
"or SentencePiece tokenizer. Canonical BERT uses WordPiece tokenizer, "
"while ALBERT uses SentencePiece tokenizer.")
flags.DEFINE_string(
"tfds_params", "", "Comma-separated list of TFDS parameter assigments for "
"generic classfication data import (for more details "
"see the TfdsProcessor class documentation).")
def generate_classifier_dataset():
"""Generates classifier dataset and returns input meta data."""
if FLAGS.classification_task_name in [
"COLA",
"WNLI",
"SST-2",
"MRPC",
"QQP",
"STS-B",
"MNLI",
"QNLI",
"RTE",
"AX",
"SUPERGLUE-RTE",
"CB",
"BoolQ",
"WIC",
]:
assert not FLAGS.input_data_dir or FLAGS.tfds_params
else:
assert (FLAGS.input_data_dir and FLAGS.classification_task_name or
FLAGS.tfds_params)
if FLAGS.tokenization == "WordPiece":
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
processor_text_fn = tokenization.convert_to_unicode
else:
assert FLAGS.tokenization == "SentencePiece"
tokenizer = tokenization.FullSentencePieceTokenizer(FLAGS.sp_model_file)
processor_text_fn = functools.partial(
tokenization.preprocess_text, lower=FLAGS.do_lower_case)
if FLAGS.tfds_params:
processor = classifier_data_lib.TfdsProcessor(
tfds_params=FLAGS.tfds_params, process_text_fn=processor_text_fn)
return classifier_data_lib.generate_tf_record_from_data_file(
processor,
None,
tokenizer,
train_data_output_path=FLAGS.train_data_output_path,
eval_data_output_path=FLAGS.eval_data_output_path,
test_data_output_path=FLAGS.test_data_output_path,
max_seq_length=FLAGS.max_seq_length)
else:
processors = {
"ax":
classifier_data_lib.AxProcessor,
"cola":
classifier_data_lib.ColaProcessor,
"imdb":
classifier_data_lib.ImdbProcessor,
"mnli":
functools.partial(
classifier_data_lib.MnliProcessor, mnli_type=FLAGS.mnli_type),
"mrpc":
classifier_data_lib.MrpcProcessor,
"qnli":
classifier_data_lib.QnliProcessor,
"qqp":
classifier_data_lib.QqpProcessor,
"rte":
classifier_data_lib.RteProcessor,
"sst-2":
classifier_data_lib.SstProcessor,
"sts-b":
classifier_data_lib.StsBProcessor,
"xnli":
functools.partial(
classifier_data_lib.XnliProcessor,
language=FLAGS.xnli_language),
"paws-x":
functools.partial(
classifier_data_lib.PawsxProcessor,
language=FLAGS.pawsx_language),
"wnli":
classifier_data_lib.WnliProcessor,
"xtreme-xnli":
functools.partial(
classifier_data_lib.XtremeXnliProcessor,
translated_data_dir=FLAGS.translated_input_data_dir,
only_use_en_dev=FLAGS.only_use_en_dev),
"xtreme-paws-x":
functools.partial(
classifier_data_lib.XtremePawsxProcessor,
translated_data_dir=FLAGS.translated_input_data_dir,
only_use_en_dev=FLAGS.only_use_en_dev),
"ax-g":
classifier_data_lib.AXgProcessor,
"superglue-rte":
classifier_data_lib.SuperGLUERTEProcessor,
"cb":
classifier_data_lib.CBProcessor,
"boolq":
classifier_data_lib.BoolQProcessor,
"wic":
classifier_data_lib.WnliProcessor,
}
task_name = FLAGS.classification_task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name](process_text_fn=processor_text_fn)
return classifier_data_lib.generate_tf_record_from_data_file(
processor,
FLAGS.input_data_dir,
tokenizer,
train_data_output_path=FLAGS.train_data_output_path,
eval_data_output_path=FLAGS.eval_data_output_path,
test_data_output_path=FLAGS.test_data_output_path,
max_seq_length=FLAGS.max_seq_length)
def generate_regression_dataset():
"""Generates regression dataset and returns input meta data."""
if FLAGS.tokenization == "WordPiece":
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
processor_text_fn = tokenization.convert_to_unicode
else:
assert FLAGS.tokenization == "SentencePiece"
tokenizer = tokenization.FullSentencePieceTokenizer(FLAGS.sp_model_file)
processor_text_fn = functools.partial(
tokenization.preprocess_text, lower=FLAGS.do_lower_case)
if FLAGS.tfds_params:
processor = classifier_data_lib.TfdsProcessor(
tfds_params=FLAGS.tfds_params, process_text_fn=processor_text_fn)
return classifier_data_lib.generate_tf_record_from_data_file(
processor,
None,
tokenizer,
train_data_output_path=FLAGS.train_data_output_path,
eval_data_output_path=FLAGS.eval_data_output_path,
test_data_output_path=FLAGS.test_data_output_path,
max_seq_length=FLAGS.max_seq_length)
else:
raise ValueError("No data processor found for the given regression task.")
def generate_squad_dataset():
"""Generates squad training dataset and returns input meta data."""
assert FLAGS.squad_data_file
if FLAGS.tokenization == "WordPiece":
return squad_lib_wp.generate_tf_record_from_json_file(
input_file_path=FLAGS.squad_data_file,
vocab_file_path=FLAGS.vocab_file,
output_path=FLAGS.train_data_output_path,
translated_input_folder=FLAGS.translated_squad_data_folder,
max_seq_length=FLAGS.max_seq_length,
do_lower_case=FLAGS.do_lower_case,
max_query_length=FLAGS.max_query_length,
doc_stride=FLAGS.doc_stride,
version_2_with_negative=FLAGS.version_2_with_negative,
xlnet_format=FLAGS.xlnet_format)
else:
assert FLAGS.tokenization == "SentencePiece"
return squad_lib_sp.generate_tf_record_from_json_file(
input_file_path=FLAGS.squad_data_file,
sp_model_file=FLAGS.sp_model_file,
output_path=FLAGS.train_data_output_path,
translated_input_folder=FLAGS.translated_squad_data_folder,
max_seq_length=FLAGS.max_seq_length,
do_lower_case=FLAGS.do_lower_case,
max_query_length=FLAGS.max_query_length,
doc_stride=FLAGS.doc_stride,
xlnet_format=FLAGS.xlnet_format,
version_2_with_negative=FLAGS.version_2_with_negative)
def generate_retrieval_dataset():
"""Generate retrieval test and dev dataset and returns input meta data."""
assert (FLAGS.input_data_dir and FLAGS.retrieval_task_name)
if FLAGS.tokenization == "WordPiece":
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
processor_text_fn = tokenization.convert_to_unicode
else:
assert FLAGS.tokenization == "SentencePiece"
tokenizer = tokenization.FullSentencePieceTokenizer(FLAGS.sp_model_file)
processor_text_fn = functools.partial(
tokenization.preprocess_text, lower=FLAGS.do_lower_case)
processors = {
"bucc": sentence_retrieval_lib.BuccProcessor,
"tatoeba": sentence_retrieval_lib.TatoebaProcessor,
}
task_name = FLAGS.retrieval_task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % task_name)
processor = processors[task_name](process_text_fn=processor_text_fn)
return sentence_retrieval_lib.generate_sentence_retrevial_tf_record(
processor, FLAGS.input_data_dir, tokenizer, FLAGS.eval_data_output_path,
FLAGS.test_data_output_path, FLAGS.max_seq_length)
def generate_tagging_dataset():
"""Generates tagging dataset."""
processors = {
"panx":
functools.partial(
tagging_data_lib.PanxProcessor,
only_use_en_train=FLAGS.tagging_only_use_en_train,
only_use_en_dev=FLAGS.only_use_en_dev),
"udpos":
functools.partial(
tagging_data_lib.UdposProcessor,
only_use_en_train=FLAGS.tagging_only_use_en_train,
only_use_en_dev=FLAGS.only_use_en_dev),
}
task_name = FLAGS.tagging_task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % task_name)
if FLAGS.tokenization == "WordPiece":
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
processor_text_fn = tokenization.convert_to_unicode
elif FLAGS.tokenization == "SentencePiece":
tokenizer = tokenization.FullSentencePieceTokenizer(FLAGS.sp_model_file)
processor_text_fn = functools.partial(
tokenization.preprocess_text, lower=FLAGS.do_lower_case)
else:
raise ValueError("Unsupported tokenization: %s" % FLAGS.tokenization)
processor = processors[task_name]()
return tagging_data_lib.generate_tf_record_from_data_file(
processor, FLAGS.input_data_dir, tokenizer, FLAGS.max_seq_length,
FLAGS.train_data_output_path, FLAGS.eval_data_output_path,
FLAGS.test_data_output_path, processor_text_fn)
def main(_):
if FLAGS.tokenization == "WordPiece":
if not FLAGS.vocab_file:
raise ValueError(
"FLAG vocab_file for word-piece tokenizer is not specified.")
else:
assert FLAGS.tokenization == "SentencePiece"
if not FLAGS.sp_model_file:
raise ValueError(
"FLAG sp_model_file for sentence-piece tokenizer is not specified.")
if FLAGS.fine_tuning_task_type != "retrieval":
flags.mark_flag_as_required("train_data_output_path")
if FLAGS.fine_tuning_task_type == "classification":
input_meta_data = generate_classifier_dataset()
elif FLAGS.fine_tuning_task_type == "regression":
input_meta_data = generate_regression_dataset()
elif FLAGS.fine_tuning_task_type == "retrieval":
input_meta_data = generate_retrieval_dataset()
elif FLAGS.fine_tuning_task_type == "squad":
input_meta_data = generate_squad_dataset()
else:
assert FLAGS.fine_tuning_task_type == "tagging"
input_meta_data = generate_tagging_dataset()
tf.io.gfile.makedirs(os.path.dirname(FLAGS.meta_data_file_path))
with tf.io.gfile.GFile(FLAGS.meta_data_file_path, "w") as writer:
writer.write(json.dumps(input_meta_data, indent=4) + "\n")
if __name__ == "__main__":
flags.mark_flag_as_required("meta_data_file_path")
app.run(main)
| 16,693 | 36.769231 | 81 | py |
models | models-master/official/nlp/data/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/nlp/data/squad_lib_sp.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run ALBERT on SQuAD 1.1 and SQuAD 2.0 using sentence piece tokenization.
The file is forked from:
https://github.com/google-research/ALBERT/blob/master/run_squad_sp.py
"""
import collections
import copy
import json
import math
import os
from absl import logging
import numpy as np
import tensorflow as tf
from official.nlp.tools import tokenization
class SquadExample(object):
"""A single training/test example for simple sequence classification.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
paragraph_text,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.paragraph_text = paragraph_text
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
s += ", question_text: %s" % (
tokenization.printable_text(self.question_text))
s += ", paragraph_text: [%s]" % (" ".join(self.paragraph_text))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
if self.start_position:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tok_start_to_orig_index,
tok_end_to_orig_index,
token_is_max_context,
tokens,
input_ids,
input_mask,
segment_ids,
paragraph_len,
class_index=None,
paragraph_mask=None,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tok_start_to_orig_index = tok_start_to_orig_index
self.tok_end_to_orig_index = tok_end_to_orig_index
self.token_is_max_context = token_is_max_context
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.paragraph_mask = paragraph_mask
self.segment_ids = segment_ids
self.paragraph_len = paragraph_len
self.class_index = class_index
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def read_squad_examples(input_file,
is_training,
version_2_with_negative,
translated_input_folder=None):
"""Read a SQuAD json file into a list of SquadExample."""
del version_2_with_negative
with tf.io.gfile.GFile(input_file, "r") as reader:
input_data = json.load(reader)["data"]
if translated_input_folder is not None:
translated_files = tf.io.gfile.glob(
os.path.join(translated_input_folder, "*.json"))
for file in translated_files:
with tf.io.gfile.GFile(file, "r") as reader:
input_data.extend(json.load(reader)["data"])
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
orig_answer_text = None
is_impossible = False
if is_training:
is_impossible = qa.get("is_impossible", False)
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
start_position = answer["answer_start"]
else:
start_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
paragraph_text=paragraph_text,
orig_answer_text=orig_answer_text,
start_position=start_position,
is_impossible=is_impossible)
examples.append(example)
return examples
def _convert_index(index, pos, m=None, is_start=True):
"""Converts index."""
if index[pos] is not None:
return index[pos]
n = len(index)
rear = pos
while rear < n - 1 and index[rear] is None:
rear += 1
front = pos
while front > 0 and index[front] is None:
front -= 1
assert index[front] is not None or index[rear] is not None
if index[front] is None:
if index[rear] >= 1: # pytype: disable=unsupported-operands
if is_start:
return 0
else:
return index[rear] - 1
return index[rear]
if index[rear] is None:
if m is not None and index[front] < m - 1:
if is_start:
return index[front] + 1
else:
return m - 1
return index[front]
if is_start:
if index[rear] > index[front] + 1:
return index[front] + 1
else:
return index[rear]
else:
if index[rear] > index[front] + 1:
return index[rear] - 1
else:
return index[front]
def convert_examples_to_features(examples,
tokenizer,
max_seq_length,
doc_stride,
max_query_length,
is_training,
output_fn,
do_lower_case,
xlnet_format=False,
batch_size=None):
"""Loads a data file into a list of `InputBatch`s."""
cnt_pos, cnt_neg = 0, 0
base_id = 1000000000
unique_id = base_id
max_n, max_m = 1024, 1024
f = np.zeros((max_n, max_m), dtype=np.float32)
for (example_index, example) in enumerate(examples):
if example_index % 100 == 0:
logging.info("Converting %d/%d pos %d neg %d", example_index,
len(examples), cnt_pos, cnt_neg)
query_tokens = tokenization.encode_ids(
tokenizer.sp_model,
tokenization.preprocess_text(
example.question_text, lower=do_lower_case))
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
paragraph_text = example.paragraph_text
para_tokens = tokenization.encode_pieces(
tokenizer.sp_model,
tokenization.preprocess_text(
example.paragraph_text, lower=do_lower_case))
chartok_to_tok_index = []
tok_start_to_chartok_index = []
tok_end_to_chartok_index = []
char_cnt = 0
for i, token in enumerate(para_tokens):
new_token = token.replace(tokenization.SPIECE_UNDERLINE, " ")
chartok_to_tok_index.extend([i] * len(new_token))
tok_start_to_chartok_index.append(char_cnt)
char_cnt += len(new_token)
tok_end_to_chartok_index.append(char_cnt - 1)
tok_cat_text = "".join(para_tokens).replace(tokenization.SPIECE_UNDERLINE,
" ")
n, m = len(paragraph_text), len(tok_cat_text)
if n > max_n or m > max_m:
max_n = max(n, max_n)
max_m = max(m, max_m)
f = np.zeros((max_n, max_m), dtype=np.float32)
g = {}
# pylint: disable=cell-var-from-loop
def _lcs_match(max_dist, n=n, m=m):
"""Longest-common-substring algorithm."""
f.fill(0)
g.clear()
### longest common sub sequence
# f[i, j] = max(f[i - 1, j], f[i, j - 1], f[i - 1, j - 1] + match(i, j))
for i in range(n):
# unlike standard LCS, this is specifically optimized for the setting
# because the mismatch between sentence pieces and original text will
# be small
for j in range(i - max_dist, i + max_dist):
if j >= m or j < 0:
continue
if i > 0:
g[(i, j)] = 0
f[i, j] = f[i - 1, j]
if j > 0 and f[i, j - 1] > f[i, j]:
g[(i, j)] = 1
f[i, j] = f[i, j - 1]
f_prev = f[i - 1, j - 1] if i > 0 and j > 0 else 0
if (tokenization.preprocess_text(
paragraph_text[i], lower=do_lower_case,
remove_space=False) == tok_cat_text[j] and f_prev + 1 > f[i, j]):
g[(i, j)] = 2
f[i, j] = f_prev + 1
# pylint: enable=cell-var-from-loop
max_dist = abs(n - m) + 5
for _ in range(2):
_lcs_match(max_dist)
if f[n - 1, m - 1] > 0.8 * n:
break
max_dist *= 2
orig_to_chartok_index = [None] * n
chartok_to_orig_index = [None] * m
i, j = n - 1, m - 1
while i >= 0 and j >= 0:
if (i, j) not in g:
break
if g[(i, j)] == 2:
orig_to_chartok_index[i] = j
chartok_to_orig_index[j] = i
i, j = i - 1, j - 1
elif g[(i, j)] == 1:
j = j - 1
else:
i = i - 1
if (all(v is None for v in orig_to_chartok_index) or
f[n - 1, m - 1] < 0.8 * n):
logging.info("MISMATCH DETECTED!")
continue
tok_start_to_orig_index = []
tok_end_to_orig_index = []
for i in range(len(para_tokens)):
start_chartok_pos = tok_start_to_chartok_index[i]
end_chartok_pos = tok_end_to_chartok_index[i]
start_orig_pos = _convert_index(
chartok_to_orig_index, start_chartok_pos, n, is_start=True)
end_orig_pos = _convert_index(
chartok_to_orig_index, end_chartok_pos, n, is_start=False)
tok_start_to_orig_index.append(start_orig_pos)
tok_end_to_orig_index.append(end_orig_pos)
if not is_training:
tok_start_position = tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = 0
tok_end_position = 0
if is_training and not example.is_impossible:
start_position = example.start_position
end_position = start_position + len(example.orig_answer_text) - 1
start_chartok_pos = _convert_index(
orig_to_chartok_index, start_position, is_start=True)
tok_start_position = chartok_to_tok_index[start_chartok_pos]
end_chartok_pos = _convert_index(
orig_to_chartok_index, end_position, is_start=False)
tok_end_position = chartok_to_tok_index[end_chartok_pos]
assert tok_start_position <= tok_end_position
def _piece_to_id(x):
return tokenizer.sp_model.PieceToId(x)
all_doc_tokens = list(map(_piece_to_id, para_tokens))
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_is_max_context = {}
segment_ids = []
# Paragraph mask used in XLNet.
# 1 represents paragraph and class tokens.
# 0 represents query and other special tokens.
paragraph_mask = []
cur_tok_start_to_orig_index = []
cur_tok_end_to_orig_index = []
# pylint: disable=cell-var-from-loop
def process_query(seg_q):
for token in query_tokens:
tokens.append(token)
segment_ids.append(seg_q)
paragraph_mask.append(0)
tokens.append(tokenizer.sp_model.PieceToId("[SEP]"))
segment_ids.append(seg_q)
paragraph_mask.append(0)
def process_paragraph(seg_p):
for i in range(doc_span.length):
split_token_index = doc_span.start + i
cur_tok_start_to_orig_index.append(
tok_start_to_orig_index[split_token_index])
cur_tok_end_to_orig_index.append(
tok_end_to_orig_index[split_token_index])
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(seg_p)
paragraph_mask.append(1)
tokens.append(tokenizer.sp_model.PieceToId("[SEP]"))
segment_ids.append(seg_p)
paragraph_mask.append(0)
return len(tokens)
def process_class(seg_class):
class_index = len(segment_ids)
tokens.append(tokenizer.sp_model.PieceToId("[CLS]"))
segment_ids.append(seg_class)
paragraph_mask.append(1)
return class_index
if xlnet_format:
seg_p, seg_q, seg_class, seg_pad = 0, 1, 2, 3
paragraph_len = process_paragraph(seg_p)
process_query(seg_q)
class_index = process_class(seg_class)
else:
seg_p, seg_q, seg_class, seg_pad = 1, 0, 0, 0
class_index = process_class(seg_class)
process_query(seg_q)
paragraph_len = process_paragraph(seg_p)
input_ids = tokens
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(seg_pad)
paragraph_mask.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(paragraph_mask) == max_seq_length
span_is_impossible = example.is_impossible
start_position = None
end_position = None
if is_training and not span_is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
# continue
start_position = 0
end_position = 0
span_is_impossible = True
else:
doc_offset = 0 if xlnet_format else len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and span_is_impossible:
start_position = class_index
end_position = class_index
if example_index < 20:
logging.info("*** Example ***")
logging.info("unique_id: %s", (unique_id))
logging.info("example_index: %s", (example_index))
logging.info("doc_span_index: %s", (doc_span_index))
logging.info("tok_start_to_orig_index: %s",
" ".join([str(x) for x in cur_tok_start_to_orig_index]))
logging.info("tok_end_to_orig_index: %s",
" ".join([str(x) for x in cur_tok_end_to_orig_index]))
logging.info(
"token_is_max_context: %s", " ".join(
["%d:%s" % (x, y) for (x, y) in token_is_max_context.items()]))
logging.info(
"input_pieces: %s",
" ".join([tokenizer.sp_model.IdToPiece(x) for x in tokens]))
logging.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logging.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logging.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logging.info("paragraph_mask: %s", " ".join(
[str(x) for x in paragraph_mask]))
logging.info("class_index: %d", class_index)
if is_training and span_is_impossible:
logging.info("impossible example span")
if is_training and not span_is_impossible:
pieces = [
tokenizer.sp_model.IdToPiece(token)
for token in tokens[start_position:(end_position + 1)]
]
answer_text = tokenizer.sp_model.DecodePieces(pieces)
logging.info("start_position: %d", (start_position))
logging.info("end_position: %d", (end_position))
logging.info("answer: %s", (tokenization.printable_text(answer_text)))
# With multi processing, the example_index is actually the index
# within the current process therefore we use example_index=None
# to avoid being used in the future.
# The current code does not use example_index of training data.
if is_training:
feat_example_index = None
else:
feat_example_index = example_index
feature = InputFeatures(
unique_id=unique_id,
example_index=feat_example_index,
doc_span_index=doc_span_index,
tok_start_to_orig_index=cur_tok_start_to_orig_index,
tok_end_to_orig_index=cur_tok_end_to_orig_index,
token_is_max_context=token_is_max_context,
tokens=[tokenizer.sp_model.IdToPiece(x) for x in tokens],
input_ids=input_ids,
input_mask=input_mask,
paragraph_mask=paragraph_mask,
segment_ids=segment_ids,
paragraph_len=paragraph_len,
class_index=class_index,
start_position=start_position,
end_position=end_position,
is_impossible=span_is_impossible)
# Run callback
if is_training:
output_fn(feature)
else:
output_fn(feature, is_padding=False)
unique_id += 1
if span_is_impossible:
cnt_neg += 1
else:
cnt_pos += 1
if not is_training and feature:
assert batch_size
num_padding = 0
num_examples = unique_id - base_id
if unique_id % batch_size != 0:
num_padding = batch_size - (num_examples % batch_size)
dummy_feature = copy.deepcopy(feature)
for _ in range(num_padding):
dummy_feature.unique_id = unique_id
# Run callback
output_fn(feature, is_padding=True)
unique_id += 1
logging.info("Total number of instances: %d = pos %d neg %d",
cnt_pos + cnt_neg, cnt_pos, cnt_neg)
return unique_id - base_id
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def write_predictions(all_examples,
all_features,
all_results,
n_best_size,
max_answer_length,
do_lower_case,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
version_2_with_negative=False,
null_score_diff_threshold=0.0,
verbose=False):
"""Write final predictions to the json file and log-odds of null if needed."""
logging.info("Writing predictions to: %s", (output_prediction_file))
logging.info("Writing nbest to: %s", (output_nbest_file))
all_predictions, all_nbest_json, scores_diff_json = (
postprocess_output(
all_examples=all_examples,
all_features=all_features,
all_results=all_results,
n_best_size=n_best_size,
max_answer_length=max_answer_length,
do_lower_case=do_lower_case,
version_2_with_negative=version_2_with_negative,
null_score_diff_threshold=null_score_diff_threshold,
verbose=verbose))
write_to_json_files(all_predictions, output_prediction_file)
write_to_json_files(all_nbest_json, output_nbest_file)
if version_2_with_negative:
write_to_json_files(scores_diff_json, output_null_log_odds_file)
def postprocess_output(all_examples,
all_features,
all_results,
n_best_size,
max_answer_length,
do_lower_case,
version_2_with_negative=False,
null_score_diff_threshold=0.0,
xlnet_format=False,
verbose=False):
"""Postprocess model output, to form predicton results."""
del do_lower_case, verbose
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
if feature.unique_id not in unique_id_to_result:
logging.info("Skip eval example %s, not in pred.", feature.unique_id)
continue
result = unique_id_to_result[feature.unique_id]
# if we could have irrelevant answers, get the min score of irrelevant
if version_2_with_negative:
if xlnet_format:
feature_null_score = result.class_logits
else:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
doc_offset = 0 if xlnet_format else feature.tokens.index("[SEP]") + 1
for (start_index, start_logit,
end_index, end_logit) in _get_best_indexes_and_logits(
result=result,
n_best_size=n_best_size,
xlnet_format=xlnet_format):
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index - doc_offset >= len(feature.tok_start_to_orig_index):
continue
if end_index - doc_offset >= len(feature.tok_end_to_orig_index):
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index - doc_offset,
end_index=end_index - doc_offset,
start_logit=start_logit,
end_logit=end_logit))
if version_2_with_negative and not xlnet_format:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=-1,
end_index=-1,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index >= 0 or xlnet_format: # this is a non-null prediction
tok_start_to_orig_index = feature.tok_start_to_orig_index
tok_end_to_orig_index = feature.tok_end_to_orig_index
start_orig_pos = tok_start_to_orig_index[pred.start_index]
end_orig_pos = tok_end_to_orig_index[pred.end_index]
paragraph_text = example.paragraph_text
final_text = paragraph_text[start_orig_pos:end_orig_pos + 1].strip()
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't inlude the empty option in the n-best, include it
if version_2_with_negative and not xlnet_format:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="", start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if not version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
assert best_non_null_entry is not None
if xlnet_format:
score_diff = score_null
scores_diff_json[example.qas_id] = score_diff
all_predictions[example.qas_id] = best_non_null_entry.text
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
return all_predictions, all_nbest_json, scores_diff_json
def write_to_json_files(json_records, json_file):
with tf.io.gfile.GFile(json_file, "w") as writer:
writer.write(json.dumps(json_records, indent=4) + "\n")
def _get_best_indexes_and_logits(result,
n_best_size,
xlnet_format=False):
"""Generates the n-best indexes and logits from a list."""
if xlnet_format:
for i in range(n_best_size):
for j in range(n_best_size):
j_index = i * n_best_size + j
yield (result.start_indexes[i], result.start_logits[i],
result.end_indexes[j_index], result.end_logits[j_index])
else:
start_index_and_score = sorted(enumerate(result.start_logits),
key=lambda x: x[1], reverse=True)
end_index_and_score = sorted(enumerate(result.end_logits),
key=lambda x: x[1], reverse=True)
for i in range(len(start_index_and_score)):
if i >= n_best_size:
break
for j in range(len(end_index_and_score)):
if j >= n_best_size:
break
yield (start_index_and_score[i][0], start_index_and_score[i][1],
end_index_and_score[j][0], end_index_and_score[j][1])
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
class FeatureWriter(object):
"""Writes InputFeature to TF example file."""
def __init__(self, filename, is_training):
self.filename = filename
self.is_training = is_training
self.num_features = 0
tf.io.gfile.makedirs(os.path.dirname(filename))
self._writer = tf.io.TFRecordWriter(filename)
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return feature
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
if feature.paragraph_mask is not None:
features["paragraph_mask"] = create_int_feature(feature.paragraph_mask)
if feature.class_index is not None:
features["class_index"] = create_int_feature([feature.class_index])
if self.is_training:
features["start_positions"] = create_int_feature([feature.start_position])
features["end_positions"] = create_int_feature([feature.end_position])
impossible = 0
if feature.is_impossible:
impossible = 1
features["is_impossible"] = create_int_feature([impossible])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
self._writer.write(tf_example.SerializeToString())
def close(self):
self._writer.close()
def generate_tf_record_from_json_file(input_file_path,
sp_model_file,
output_path,
translated_input_folder=None,
max_seq_length=384,
do_lower_case=True,
max_query_length=64,
doc_stride=128,
xlnet_format=False,
version_2_with_negative=False):
"""Generates and saves training data into a tf record file."""
train_examples = read_squad_examples(
input_file=input_file_path,
is_training=True,
version_2_with_negative=version_2_with_negative,
translated_input_folder=translated_input_folder)
tokenizer = tokenization.FullSentencePieceTokenizer(
sp_model_file=sp_model_file)
train_writer = FeatureWriter(
filename=output_path, is_training=True)
number_of_examples = convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=True,
output_fn=train_writer.process_feature,
xlnet_format=xlnet_format,
do_lower_case=do_lower_case)
train_writer.close()
meta_data = {
"task_type": "bert_squad",
"train_data_size": number_of_examples,
"max_seq_length": max_seq_length,
"max_query_length": max_query_length,
"doc_stride": doc_stride,
"version_2_with_negative": version_2_with_negative,
}
return meta_data
| 34,939 | 34.762538 | 80 | py |
models | models-master/official/nlp/data/wmt_dataloader.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input pipeline for the transformer model to read, filter, and batch examples.
Batching scheme
Prior to batching, elements in the dataset are grouped by length (max between
'inputs' and 'targets' length). Each group is then batched such that:
group_batch_size * length <= batch_size.
Another way to view batch_size is the maximum number of tokens in each batch.
Once batched, each element in the dataset will have the shape:
{'inputs': [group_batch_size, padded_input_length],
'targets': [group_batch_size, padded_target_length]}
Lengths are padded to the longest 'inputs' or 'targets' sequence in the batch
(padded_input_length and padded_target_length can be different).
This batching scheme decreases the fraction of padding tokens per training
batch, thus improving the training speed significantly.
"""
from typing import Dict, Optional
import dataclasses
import tensorflow as tf
import tensorflow_text as tftxt
from official.core import config_definitions as cfg
from official.core import input_reader
from official.nlp.data import data_loader
from official.nlp.data import data_loader_factory
# Example grouping constants. Defines length boundaries for each group.
# These values are the defaults used in Tensor2Tensor.
_MIN_BOUNDARY = 8
_BOUNDARY_SCALE = 1.1
def _get_example_length(example):
"""Returns the maximum length between the example inputs and targets."""
length = tf.maximum(tf.shape(example[0])[0], tf.shape(example[1])[0])
return length
def _create_min_max_boundaries(max_length,
min_boundary=_MIN_BOUNDARY,
boundary_scale=_BOUNDARY_SCALE):
"""Create min and max boundary lists up to max_length.
For example, when max_length=24, min_boundary=4 and boundary_scale=2, the
returned values will be:
buckets_min = [0, 4, 8, 16]
buckets_max = [4, 8, 16, 25]
Args:
max_length: The maximum length of example in dataset.
min_boundary: Minimum length in boundary.
boundary_scale: Amount to scale consecutive boundaries in the list.
Returns:
min and max boundary lists
"""
# Create bucket boundaries list by scaling the previous boundary or adding 1
# (to ensure increasing boundary sizes).
bucket_boundaries = []
x = min_boundary
while x < max_length:
bucket_boundaries.append(x)
x = max(x + 1, int(x * boundary_scale))
# Create min and max boundary lists from the initial list.
buckets_min = [0] + bucket_boundaries
buckets_max = bucket_boundaries + [max_length + 1]
return buckets_min, buckets_max
def _batch_examples(dataset, batch_size, max_length):
"""Group examples by similar lengths, and return batched dataset.
Each batch of similar-length examples are padded to the same length, and may
have different number of elements in each batch, such that:
group_batch_size * padded_length <= batch_size.
This decreases the number of padding tokens per batch, which improves the
training speed.
Args:
dataset: Dataset of unbatched examples.
batch_size: Max number of tokens per batch of examples.
max_length: Max number of tokens in an example input or target sequence.
Returns:
Dataset of batched examples with similar lengths.
"""
# Get min and max boundary lists for each example. These are used to calculate
# the `bucket_id`, which is the index at which:
# buckets_min[bucket_id] <= len(example) < buckets_max[bucket_id]
# Note that using both min and max lists improves the performance.
buckets_min, buckets_max = _create_min_max_boundaries(max_length)
# Create list of batch sizes for each bucket_id, so that
# bucket_batch_size[bucket_id] * buckets_max[bucket_id] <= batch_size
bucket_batch_sizes = [int(batch_size) // x for x in buckets_max]
# Validates bucket batch sizes.
if any([batch_size <= 0 for batch_size in bucket_batch_sizes]):
raise ValueError(
'The token budget, global batch size, is too small to yeild 0 bucket '
'window: %s' % str(bucket_batch_sizes))
# bucket_id will be a tensor, so convert this list to a tensor as well.
bucket_batch_sizes = tf.constant(bucket_batch_sizes, dtype=tf.int64)
def example_to_bucket_id(example):
"""Return int64 bucket id for this example, calculated based on length."""
example_input = example['inputs']
example_target = example['targets']
seq_length = _get_example_length((example_input, example_target))
conditions_c = tf.logical_and(
tf.less_equal(buckets_min, seq_length), tf.less(seq_length,
buckets_max))
bucket_id = tf.reduce_min(tf.where(conditions_c))
return bucket_id
def window_size_fn(bucket_id):
"""Return number of examples to be grouped when given a bucket id."""
return bucket_batch_sizes[bucket_id]
def batching_fn(bucket_id, grouped_dataset):
"""Batch and add padding to a dataset of elements with similar lengths."""
bucket_batch_size = window_size_fn(bucket_id)
# Batch the dataset and add padding so that all input sequences in the
# examples have the same length, and all target sequences have the same
# lengths as well. Resulting lengths of inputs and targets can differ.
padded_shapes = dict([
(name, [None] * len(spec.shape))
for name, spec in grouped_dataset.element_spec.items()
])
return grouped_dataset.padded_batch(bucket_batch_size, padded_shapes)
return dataset.apply(
tf.data.experimental.group_by_window(
key_func=example_to_bucket_id,
reduce_func=batching_fn,
window_size=None,
window_size_func=window_size_fn))
@dataclasses.dataclass
class WMTDataConfig(cfg.DataConfig):
"""Data config for WMT translation."""
max_seq_length: int = 64
static_batch: bool = False
sentencepiece_model_path: str = ''
src_lang: str = ''
tgt_lang: str = ''
transform_and_batch: bool = True
has_unique_id: bool = False
@data_loader_factory.register_data_loader_cls(WMTDataConfig)
class WMTDataLoader(data_loader.DataLoader):
"""A class to load dataset for WMT translation task."""
def __init__(self, params: WMTDataConfig):
self._params = params
self._max_seq_length = params.max_seq_length
self._static_batch = params.static_batch
self._global_batch_size = params.global_batch_size
if self._params.transform_and_batch:
self._tokenizer = tftxt.SentencepieceTokenizer(
model=tf.io.gfile.GFile(params.sentencepiece_model_path, 'rb').read(),
add_eos=True)
def _decode(self, record: tf.Tensor):
"""Decodes a serialized tf.Example."""
name_to_features = {
self._params.src_lang: tf.io.FixedLenFeature([], tf.string),
self._params.tgt_lang: tf.io.FixedLenFeature([], tf.string),
}
if self._params.has_unique_id:
name_to_features['unique_id'] = tf.io.FixedLenFeature([], tf.int64)
example = tf.io.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in example:
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example
def _tokenize(self, inputs) -> Dict[str, tf.Tensor]:
tokenized_inputs = {}
for k, v in inputs.items():
if k == self._params.src_lang:
tokenized_inputs['inputs'] = self._tokenizer.tokenize(v)
elif k == self._params.tgt_lang:
tokenized_inputs['targets'] = self._tokenizer.tokenize(v)
else:
tokenized_inputs[k] = v
print(tokenized_inputs)
return tokenized_inputs
def _filter_max_length(self, inputs):
# return tf.constant(True)
return tf.logical_and(
tf.shape(inputs['inputs'])[0] <= self._max_seq_length,
tf.shape(inputs['targets'])[0] <= self._max_seq_length)
def _maybe_truncate(self, inputs):
truncated_inputs = {}
for k, v in inputs.items():
if k == 'inputs' or k == 'targets':
truncated_inputs[k] = tf.pad(
v[:self._max_seq_length - 1], [[0, 1]],
constant_values=1) if tf.shape(v)[0] > self._max_seq_length else v
else:
truncated_inputs[k] = v
return truncated_inputs
def _tokenize_bucketize_and_batch(
self,
dataset,
input_context: Optional[tf.distribute.InputContext] = None):
dataset = dataset.map(
self._tokenize, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if self._params.is_training:
dataset = dataset.filter(self._filter_max_length)
else:
dataset = dataset.map(
self._maybe_truncate,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
per_replica_batch_size = input_context.get_per_replica_batch_size(
self._global_batch_size) if input_context else self._global_batch_size
if self._static_batch:
padded_shapes = {}
for name, _ in dataset.element_spec.items():
if name == 'unique_id':
padded_shapes[name] = []
else:
padded_shapes[name] = [self._max_seq_length
] if self._static_batch else [None]
batch_size = per_replica_batch_size
if self._params.is_training:
batch_size = int(batch_size // self._max_seq_length)
dataset = dataset.padded_batch(
batch_size,
padded_shapes,
drop_remainder=True)
else:
# Group and batch such that each batch has examples of similar length.
dataset = _batch_examples(dataset, per_replica_batch_size,
self._max_seq_length)
# Prefetch the next element to improve speed of input pipeline.
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return dataset
def load(self, input_context: Optional[tf.distribute.InputContext] = None):
"""Returns a tf.dataset.Dataset."""
decoder_fn = None
# Only decode for TFRecords.
if self._params.input_path:
decoder_fn = self._decode
def _identity(
dataset, input_context: Optional[tf.distribute.InputContext] = None):
del input_context
return dataset
transform_and_batch_fn = _identity
if self._params.transform_and_batch:
transform_and_batch_fn = self._tokenize_bucketize_and_batch
reader = input_reader.InputReader(
params=self._params,
decoder_fn=decoder_fn,
transform_and_batch_fn=transform_and_batch_fn)
return reader.read(input_context)
| 11,174 | 36.753378 | 80 | py |
models | models-master/official/nlp/data/pretrain_dynamic_dataloader.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset loader for the pre-training with dynamic sequence length."""
from typing import Optional, Tuple
import dataclasses
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import input_reader
from official.nlp.data import data_loader_factory
from official.nlp.data import pretrain_dataloader
@dataclasses.dataclass
class BertPretrainDataConfig(cfg.DataConfig):
"""Data config for BERT pretraining task (tasks/masked_lm)."""
input_path: str = ''
global_batch_size: int = 512
is_training: bool = True
seq_bucket_lengths: Tuple[int, ...] = (128, 256, 384, 512,)
# TODO(rxsang): `seq_bucket_window_scale` is only useful when round robin
# tf.data service is disabled. Deprecate this flag once we always enable round
# robin tf.data service.
seq_bucket_window_scale: int = 8
use_next_sentence_label: bool = True
use_position_id: bool = False
deterministic: bool = False
enable_tf_data_service: bool = False
enable_round_robin_tf_data_service: bool = False
tf_data_service_job_name: str = 'bert_pretrain'
use_v2_feature_names: bool = False
@data_loader_factory.register_data_loader_cls(BertPretrainDataConfig)
class PretrainingDynamicDataLoader(pretrain_dataloader.BertPretrainDataLoader):
"""Dataset loader for bert-style pretraining with dynamic sequenece length.
Bucketizes the input id features by the seq_bucket_lengths and features are
padded to the bucket boundaries. The mask features are usually short than
input id features and can also be dynamic. We require the mask feature lengths
within a bucket must be the same. For example, with [128, 256] buckets,
the mask features for bucket 128 should always have the length as X and
features for bucket 256 should always have the length as Y.
The dataloader does not filter out empty masks. Make sure to handle this
in the model.
"""
def __init__(self, params):
self._params = params
if len(params.seq_bucket_lengths) < 1:
raise ValueError('The seq_bucket_lengths cannot be empty.')
self._seq_bucket_lengths = params.seq_bucket_lengths
self._seq_bucket_window_scale = params.seq_bucket_window_scale
self._global_batch_size = params.global_batch_size
self._use_next_sentence_label = params.use_next_sentence_label
self._use_position_id = params.use_position_id
self._drop_remainder = params.drop_remainder
self._enable_tf_data_service = params.enable_tf_data_service
self._enable_round_robin_tf_data_service = (
params.enable_round_robin_tf_data_service)
self._mask_keys = [
'masked_lm_positions', 'masked_lm_ids', 'masked_lm_weights'
]
def _decode(self, record: tf.Tensor):
"""Decodes a serialized tf.Example."""
name_to_features = {
'input_mask': tf.io.VarLenFeature(tf.int64),
'masked_lm_positions': tf.io.VarLenFeature(tf.int64),
'masked_lm_ids': tf.io.VarLenFeature(tf.int64),
'masked_lm_weights': tf.io.VarLenFeature(tf.float32),
}
if self._params.use_v2_feature_names:
input_ids_key = 'input_word_ids'
segment_key = 'input_type_ids'
name_to_features.update({
input_ids_key: tf.io.VarLenFeature(tf.int64),
segment_key: tf.io.VarLenFeature(tf.int64),
})
else:
input_ids_key = 'input_ids'
segment_key = 'segment_ids'
name_to_features.update({
input_ids_key: tf.io.VarLenFeature(tf.int64),
segment_key: tf.io.VarLenFeature(tf.int64),
})
if self._use_next_sentence_label:
name_to_features['next_sentence_labels'] = tf.io.FixedLenFeature([1],
tf.int64)
dynamic_keys = [input_ids_key, 'input_mask', segment_key]
if self._use_position_id:
name_to_features['position_ids'] = tf.io.VarLenFeature(tf.int64)
dynamic_keys.append('position_ids')
example = tf.io.parse_single_example(record, name_to_features)
for key in dynamic_keys + self._mask_keys:
example[key] = tf.sparse.to_dense(example[key])
# Truncate padded data after the first non pad in the
# sequence length dimension.
# Pad before the first non pad from the back should not be removed.
mask = tf.math.greater(
tf.math.cumsum(example[input_ids_key], reverse=True), 0)
for key in dynamic_keys:
example[key] = tf.boolean_mask(example[key], mask)
# masked_lm_ids should be 0 padded.
# Change mask features to -1 padding so that we can differentiate
# padding from data or from bucketizing.
mask = tf.math.not_equal(example['masked_lm_ids'], 0)
example['masked_lm_ids'] = tf.where(
mask, example['masked_lm_ids'],
-tf.ones(tf.shape(example['masked_lm_ids']), dtype=example[key].dtype))
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
# tf.data service uses dataset graph fingerprint to distinguish input
# pipeline jobs, thus we sort the keys here to make sure they are generated
# in a deterministic order each time the dataset function is traced.
for name in sorted(list(example.keys())):
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example
def _bucketize_and_batch(
self,
dataset,
input_context: Optional[tf.distribute.InputContext] = None):
"""Bucketize by sequence length and batch the datasets."""
per_replica_batch_size = input_context.get_per_replica_batch_size(
self._global_batch_size) if input_context else self._global_batch_size
def element_length_func(example, seq_len_dim):
return tf.shape(example['input_word_ids'])[seq_len_dim]
bucket_boundaries = [length + 1 for length in self._seq_bucket_lengths]
bucket_batch_sizes = [per_replica_batch_size] * (len(bucket_boundaries) + 1)
# Bucketize and batch the dataset with per replica batch size first.
dataset = dataset.apply(
tf.data.experimental.bucket_by_sequence_length(
lambda example: tf.cast(element_length_func(example, 0), tf.int32),
bucket_boundaries,
bucket_batch_sizes,
pad_to_bucket_boundary=True,
drop_remainder=self._drop_remainder))
if input_context:
window_size = input_context.num_replicas_in_sync
if self._enable_tf_data_service and (
not self._enable_round_robin_tf_data_service):
# If tf.data service is enabled but round-robin behavior is not enabled,
# different TPU workers may fetch data from one tf.data service worker
# in different speed. We set the window size to be
# `seq_bucket_window_scale` larger to leave buffer if some workers are
# fetching data faster than others, so all the data within the same
# global batch can still have more chances to be in the same bucket.
window_size *= self._seq_bucket_window_scale
# Group `num_replicas_in_sync` batches from same bucket together, so all
# replicas can get the same sequence length for one global step.
dataset = dataset.apply(
tf.data.experimental.group_by_window(
key_func=lambda example: tf.cast( # pylint: disable=g-long-lambda
element_length_func(example, 1), tf.int64),
reduce_func=lambda _, x: tf.data.Dataset.from_tensors(x),
window_size=window_size))
dataset = dataset.flat_map(lambda x: x)
def _remove_pads_from_bucketize(features):
# All mask features must have the same effective length.
# The real masked ids padding token is -1 and 0 comes from
# bucket_by_sequence_length.
mask = tf.math.not_equal(features['masked_lm_ids'], 0)
mask_per_example = tf.math.reduce_sum(tf.cast(mask, tf.int32), axis=1)
normalized = tf.cast(
mask_per_example / tf.math.reduce_max(mask_per_example), tf.int32)
assert_op = tf.debugging.assert_equal(
tf.math.reduce_sum(normalized), per_replica_batch_size,
'Number of non padded mask tokens is not the same for each example '
'in the same sequence length.')
with tf.control_dependencies([assert_op]):
for key in self._mask_keys:
features[key] = tf.reshape(
tf.boolean_mask(
features[key], mask), [per_replica_batch_size, -1])
# Revert masked_lm_ids to be 0-padded.
mask = tf.math.not_equal(features['masked_lm_ids'], -1)
features['masked_lm_ids'] = tf.where(
mask, features['masked_lm_ids'],
tf.zeros(
tf.shape(features['masked_lm_ids']),
dtype=features['masked_lm_ids'].dtype))
return features
dataset = dataset.map(_remove_pads_from_bucketize)
return dataset
def load(self, input_context: Optional[tf.distribute.InputContext] = None):
"""Returns a tf.dataset.Dataset."""
reader = input_reader.InputReader(
params=self._params,
decoder_fn=self._decode,
parser_fn=self._parse,
transform_and_batch_fn=self._bucketize_and_batch)
return reader.read(input_context)
| 9,814 | 42.816964 | 80 | py |
models | models-master/official/nlp/data/pretrain_text_dataloader.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loads text dataset for the BERT pretraining task."""
import dataclasses
from typing import List, Mapping, Optional, Text
import tensorflow as tf
import tensorflow_text as tf_text
from official.common import dataset_fn
from official.core import config_definitions as cfg
from official.core import input_reader
from official.nlp.data import data_loader
from official.nlp.data import data_loader_factory
from official.nlp.modeling.ops import segment_extractor
@dataclasses.dataclass
class BertPretrainTextDataConfig(cfg.DataConfig):
"""Data config for BERT pretraining task (tasks/masked_lm) from text."""
input_path: str = ""
doc_batch_size: int = 8
global_batch_size: int = 512
is_training: bool = True
seq_length: int = 512
max_predictions_per_seq: int = 76
use_next_sentence_label: bool = True
# The name of the text feature fields. The text features will be
# concatenated in order.
# Note: More than 1 field name is not compatible with NSP.
text_field_names: Optional[List[str]] = dataclasses.field(
default_factory=lambda: ["text"])
vocab_file_path: str = ""
masking_rate: float = 0.15
use_whole_word_masking: bool = False
file_type: str = "tfrecord"
_CLS_TOKEN = b"[CLS]"
_SEP_TOKEN = b"[SEP]"
_MASK_TOKEN = b"[MASK]"
_NUM_OOV_BUCKETS = 1
# Accounts for [CLS] and 2 x [SEP] tokens
_NUM_SPECIAL_TOKENS = 3
@data_loader_factory.register_data_loader_cls(BertPretrainTextDataConfig)
class BertPretrainTextDataLoader(data_loader.DataLoader):
"""A class to load text dataset for BERT pretraining task."""
def __init__(self, params):
"""Inits `BertPretrainTextDataLoader` class.
Args:
params: A `BertPretrainTextDataConfig` object.
"""
if len(params.text_field_names) > 1 and params.use_next_sentence_label:
raise ValueError("Currently there is no support for more than text field "
"while generating next sentence labels.")
self._params = params
self._seq_length = params.seq_length
self._max_predictions_per_seq = params.max_predictions_per_seq
self._use_next_sentence_label = params.use_next_sentence_label
self._masking_rate = params.masking_rate
self._use_whole_word_masking = params.use_whole_word_masking
lookup_table_init = tf.lookup.TextFileInitializer(
params.vocab_file_path,
key_dtype=tf.string,
key_index=tf.lookup.TextFileIndex.WHOLE_LINE,
value_dtype=tf.int64,
value_index=tf.lookup.TextFileIndex.LINE_NUMBER)
self._vocab_lookup_table = tf.lookup.StaticVocabularyTable(
lookup_table_init,
num_oov_buckets=_NUM_OOV_BUCKETS,
lookup_key_dtype=tf.string)
self._cls_token = self._vocab_lookup_table.lookup(tf.constant(_CLS_TOKEN))
self._sep_token = self._vocab_lookup_table.lookup(tf.constant(_SEP_TOKEN))
self._mask_token = self._vocab_lookup_table.lookup(tf.constant(_MASK_TOKEN))
# -_NUM_OOV_BUCKETS to offset unused OOV bucket.
self._vocab_size = self._vocab_lookup_table.size() - _NUM_OOV_BUCKETS
def _decode(self, record: tf.Tensor) -> Mapping[Text, tf.Tensor]:
"""Decodes a serialized tf.Example."""
name_to_features = {}
for text_field_name in self._params.text_field_names:
name_to_features[text_field_name] = tf.io.FixedLenFeature([], tf.string)
return tf.io.parse_single_example(record, name_to_features)
def _tokenize(self, segments):
"""Tokenize the input segments."""
# Tokenize segments
tokenizer = tf_text.BertTokenizer(
self._vocab_lookup_table, token_out_type=tf.int64)
if self._use_whole_word_masking:
# tokenize the segments which should have the shape:
# [num_sentence, (num_words), (num_wordpieces)]
segments = [tokenizer.tokenize(s) for s in segments]
else:
# tokenize the segments and merge out the token dimension so that each
# segment has the shape: [num_sentence, (num_wordpieces)]
segments = [tokenizer.tokenize(s).merge_dims(-2, -1) for s in segments]
# Truncate inputs
trimmer = tf_text.WaterfallTrimmer(
self._seq_length - _NUM_SPECIAL_TOKENS, axis=-1)
truncated_segments = trimmer.trim(segments)
# Combine segments, get segment ids and add special tokens
return tf_text.combine_segments(
truncated_segments,
start_of_sequence_id=self._cls_token,
end_of_segment_id=self._sep_token)
def _bert_preprocess(self, record: Mapping[str, tf.Tensor]):
"""Parses raw tensors into a dict of tensors to be consumed by the model."""
if self._use_next_sentence_label:
input_text = record[self._params.text_field_names[0]]
# Split sentences
sentence_breaker = tf_text.RegexSplitter()
sentences = sentence_breaker.split(input_text)
# Extract next-sentence-prediction labels and segments
next_or_random_segment, is_next = (
segment_extractor.get_next_sentence_labels(sentences))
# merge dims to change shape from [num_docs, (num_segments)] to
# [total_num_segments]
is_next = is_next.merge_dims(-2, -1)
# construct segments with shape [(num_sentence)]
segments = [
sentences.merge_dims(-2, -1),
next_or_random_segment.merge_dims(-2, -1)
]
else:
segments = [record[name] for name in self._params.text_field_names]
segments_combined, segment_ids = self._tokenize(segments)
# Dynamic masking
item_selector = tf_text.RandomItemSelector(
self._max_predictions_per_seq,
selection_rate=self._masking_rate,
unselectable_ids=[self._cls_token, self._sep_token],
shuffle_fn=(tf.identity if self._params.deterministic else None))
values_chooser = tf_text.MaskValuesChooser(
vocab_size=self._vocab_size, mask_token=self._mask_token)
masked_input_ids, masked_lm_positions, masked_lm_ids = (
tf_text.mask_language_model(
segments_combined,
item_selector=item_selector,
mask_values_chooser=values_chooser,
))
# Pad out to fixed shape and get input mask.
seq_lengths = {
"input_word_ids": self._seq_length,
"input_type_ids": self._seq_length,
"masked_lm_positions": self._max_predictions_per_seq,
"masked_lm_ids": self._max_predictions_per_seq,
}
model_inputs = {
"input_word_ids": masked_input_ids,
"input_type_ids": segment_ids,
"masked_lm_positions": masked_lm_positions,
"masked_lm_ids": masked_lm_ids,
}
padded_inputs_and_mask = tf.nest.map_structure(tf_text.pad_model_inputs,
model_inputs, seq_lengths)
model_inputs = {
k: padded_inputs_and_mask[k][0] for k in padded_inputs_and_mask
}
model_inputs["masked_lm_weights"] = tf.cast(
padded_inputs_and_mask["masked_lm_ids"][1], tf.float32)
model_inputs["input_mask"] = padded_inputs_and_mask["input_word_ids"][1]
if self._use_next_sentence_label:
model_inputs["next_sentence_labels"] = is_next
for name in model_inputs:
t = model_inputs[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
model_inputs[name] = t
return model_inputs
def load(self, input_context: Optional[tf.distribute.InputContext] = None):
"""Returns a tf.dataset.Dataset."""
def _batch_docs(dataset, input_context):
per_core_doc_batch_size = (
input_context.get_per_replica_batch_size(self._params.doc_batch_size)
if input_context else self._params.doc_batch_size)
return dataset.batch(per_core_doc_batch_size)
reader = input_reader.InputReader(
params=self._params,
dataset_fn=dataset_fn.pick_dataset_fn(self._params.file_type),
decoder_fn=self._decode if self._params.input_path else None,
transform_and_batch_fn=_batch_docs
if self._use_next_sentence_label else None,
postprocess_fn=self._bert_preprocess)
transformed_inputs = reader.read(input_context)
per_core_example_batch_size = (
input_context.get_per_replica_batch_size(self._params.global_batch_size)
if input_context else self._params.global_batch_size)
batched_inputs = transformed_inputs.unbatch().batch(
per_core_example_batch_size, self._params.drop_remainder)
return batched_inputs.prefetch(tf.data.experimental.AUTOTUNE)
| 8,997 | 38.638767 | 80 | py |
models | models-master/official/nlp/data/create_pretraining_data.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create masked LM/next sentence masked_lm TF examples for BERT."""
import collections
import itertools
import random
# Import libraries
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from official.nlp.tools import tokenization
FLAGS = flags.FLAGS
flags.DEFINE_string("input_file", None,
"Input raw text file (or comma-separated list of files).")
flags.DEFINE_string(
"output_file", None,
"Output TF example file (or comma-separated list of files).")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_bool(
"do_whole_word_mask", False,
"Whether to use whole word masking rather than per-WordPiece masking.")
flags.DEFINE_integer(
"max_ngram_size", None,
"Mask contiguous whole words (n-grams) of up to `max_ngram_size` using a "
"weighting scheme to favor shorter n-grams. "
"Note: `--do_whole_word_mask=True` must also be set when n-gram masking.")
flags.DEFINE_bool(
"gzip_compress", False,
"Whether to use `GZIP` compress option to get compressed TFRecord files.")
flags.DEFINE_bool(
"use_v2_feature_names", False,
"Whether to use the feature names consistent with the models.")
flags.DEFINE_integer("max_seq_length", 128, "Maximum sequence length.")
flags.DEFINE_integer("max_predictions_per_seq", 20,
"Maximum number of masked LM predictions per sequence.")
flags.DEFINE_integer("random_seed", 12345, "Random seed for data generation.")
flags.DEFINE_integer(
"dupe_factor", 10,
"Number of times to duplicate the input data (with different masks).")
flags.DEFINE_float("masked_lm_prob", 0.15, "Masked LM probability.")
flags.DEFINE_float(
"short_seq_prob", 0.1,
"Probability of creating sequences which are shorter than the "
"maximum length.")
class TrainingInstance(object):
"""A single training instance (sentence pair)."""
def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels,
is_random_next):
self.tokens = tokens
self.segment_ids = segment_ids
self.is_random_next = is_random_next
self.masked_lm_positions = masked_lm_positions
self.masked_lm_labels = masked_lm_labels
def __str__(self):
s = ""
s += "tokens: %s\n" % (" ".join(
[tokenization.printable_text(x) for x in self.tokens]))
s += "segment_ids: %s\n" % (" ".join([str(x) for x in self.segment_ids]))
s += "is_random_next: %s\n" % self.is_random_next
s += "masked_lm_positions: %s\n" % (" ".join(
[str(x) for x in self.masked_lm_positions]))
s += "masked_lm_labels: %s\n" % (" ".join(
[tokenization.printable_text(x) for x in self.masked_lm_labels]))
s += "\n"
return s
def __repr__(self):
return self.__str__()
def write_instance_to_example_files(instances, tokenizer, max_seq_length,
max_predictions_per_seq, output_files,
gzip_compress, use_v2_feature_names):
"""Creates TF example files from `TrainingInstance`s."""
writers = []
for output_file in output_files:
writers.append(
tf.io.TFRecordWriter(
output_file, options="GZIP" if gzip_compress else ""))
writer_index = 0
total_written = 0
for (inst_index, instance) in enumerate(instances):
input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
input_mask = [1] * len(input_ids)
segment_ids = list(instance.segment_ids)
assert len(input_ids) <= max_seq_length
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
masked_lm_positions = list(instance.masked_lm_positions)
masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
masked_lm_weights = [1.0] * len(masked_lm_ids)
while len(masked_lm_positions) < max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(0)
masked_lm_weights.append(0.0)
next_sentence_label = 1 if instance.is_random_next else 0
features = collections.OrderedDict()
if use_v2_feature_names:
features["input_word_ids"] = create_int_feature(input_ids)
features["input_type_ids"] = create_int_feature(segment_ids)
else:
features["input_ids"] = create_int_feature(input_ids)
features["segment_ids"] = create_int_feature(segment_ids)
features["input_mask"] = create_int_feature(input_mask)
features["masked_lm_positions"] = create_int_feature(masked_lm_positions)
features["masked_lm_ids"] = create_int_feature(masked_lm_ids)
features["masked_lm_weights"] = create_float_feature(masked_lm_weights)
features["next_sentence_labels"] = create_int_feature([next_sentence_label])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writers[writer_index].write(tf_example.SerializeToString())
writer_index = (writer_index + 1) % len(writers)
total_written += 1
if inst_index < 20:
logging.info("*** Example ***")
logging.info("tokens: %s", " ".join(
[tokenization.printable_text(x) for x in instance.tokens]))
for feature_name in features.keys():
feature = features[feature_name]
values = []
if feature.int64_list.value:
values = feature.int64_list.value
elif feature.float_list.value:
values = feature.float_list.value
logging.info("%s: %s", feature_name, " ".join([str(x) for x in values]))
for writer in writers:
writer.close()
logging.info("Wrote %d total instances", total_written)
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
def create_float_feature(values):
feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return feature
def create_training_instances(input_files,
tokenizer,
max_seq_length,
dupe_factor,
short_seq_prob,
masked_lm_prob,
max_predictions_per_seq,
rng,
do_whole_word_mask=False,
max_ngram_size=None):
"""Create `TrainingInstance`s from raw text."""
all_documents = [[]]
# Input file format:
# (1) One sentence per line. These should ideally be actual sentences, not
# entire paragraphs or arbitrary spans of text. (Because we use the
# sentence boundaries for the "next sentence prediction" task).
# (2) Blank lines between documents. Document boundaries are needed so
# that the "next sentence prediction" task doesn't span between documents.
for input_file in input_files:
with tf.io.gfile.GFile(input_file, "rb") as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
# Empty lines are used as document delimiters
if not line:
all_documents.append([])
tokens = tokenizer.tokenize(line)
if tokens:
all_documents[-1].append(tokens)
# Remove empty documents
all_documents = [x for x in all_documents if x]
rng.shuffle(all_documents)
vocab_words = list(tokenizer.vocab.keys())
instances = []
for _ in range(dupe_factor):
for document_index in range(len(all_documents)):
instances.extend(
create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng,
do_whole_word_mask, max_ngram_size))
rng.shuffle(instances)
return instances
def create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng,
do_whole_word_mask=False,
max_ngram_size=None):
"""Creates `TrainingInstance`s for a single document."""
document = all_documents[document_index]
# Account for [CLS], [SEP], [SEP]
max_num_tokens = max_seq_length - 3
# We *usually* want to fill up the entire sequence since we are padding
# to `max_seq_length` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
# sequences to minimize the mismatch between pre-training and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `max_seq_length` is a hard limit.
target_seq_length = max_num_tokens
if rng.random() < short_seq_prob:
target_seq_length = rng.randint(2, max_num_tokens)
# We DON'T just concatenate all of the tokens from a document into a long
# sequence and choose an arbitrary split point because this would make the
# next sentence prediction task too easy. Instead, we split the input into
# segments "A" and "B" based on the actual "sentences" provided by the user
# input.
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# `a_end` is how many segments from `current_chunk` go into the `A`
# (first) sentence.
a_end = 1
if len(current_chunk) >= 2:
a_end = rng.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
# Random next
is_random_next = False
if len(current_chunk) == 1 or rng.random() < 0.5:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
# This should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document
# we're processing.
for _ in range(10):
random_document_index = rng.randint(0, len(all_documents) - 1)
if random_document_index != document_index:
break
random_document = all_documents[random_document_index]
random_start = rng.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
# We didn't actually use these segments so we "put them back" so
# they don't go to waste.
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
# Actual next
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)
assert len(tokens_a) >= 1
assert len(tokens_b) >= 1
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
(tokens, masked_lm_positions,
masked_lm_labels) = create_masked_lm_predictions(
tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng,
do_whole_word_mask, max_ngram_size)
instance = TrainingInstance(
tokens=tokens,
segment_ids=segment_ids,
is_random_next=is_random_next,
masked_lm_positions=masked_lm_positions,
masked_lm_labels=masked_lm_labels)
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
return instances
MaskedLmInstance = collections.namedtuple("MaskedLmInstance",
["index", "label"])
# A _Gram is a [half-open) interval of token indices which form a word.
# E.g.,
# words: ["The", "doghouse"]
# tokens: ["The", "dog", "##house"]
# grams: [(0,1), (1,3)]
_Gram = collections.namedtuple("_Gram", ["begin", "end"])
def _window(iterable, size):
"""Helper to create a sliding window iterator with a given size.
E.g.,
input = [1, 2, 3, 4]
_window(input, 1) => [1], [2], [3], [4]
_window(input, 2) => [1, 2], [2, 3], [3, 4]
_window(input, 3) => [1, 2, 3], [2, 3, 4]
_window(input, 4) => [1, 2, 3, 4]
_window(input, 5) => None
Args:
iterable: elements to iterate over.
size: size of the window.
Yields:
Elements of `iterable` batched into a sliding window of length `size`.
"""
i = iter(iterable)
window = []
try:
for e in range(0, size):
window.append(next(i))
yield window
except StopIteration:
# handle the case where iterable's length is less than the window size.
return
for e in i:
window = window[1:] + [e]
yield window
def _contiguous(sorted_grams):
"""Test whether a sequence of grams is contiguous.
Args:
sorted_grams: _Grams which are sorted in increasing order.
Returns:
True if `sorted_grams` are touching each other.
E.g.,
_contiguous([(1, 4), (4, 5), (5, 10)]) == True
_contiguous([(1, 2), (4, 5)]) == False
"""
for a, b in _window(sorted_grams, 2):
if a.end != b.begin:
return False
return True
def _masking_ngrams(grams, max_ngram_size, max_masked_tokens, rng):
"""Create a list of masking {1, ..., n}-grams from a list of one-grams.
This is an extention of 'whole word masking' to mask multiple, contiguous
words such as (e.g., "the red boat").
Each input gram represents the token indices of a single word,
words: ["the", "red", "boat"]
tokens: ["the", "red", "boa", "##t"]
grams: [(0,1), (1,2), (2,4)]
For a `max_ngram_size` of three, possible outputs masks include:
1-grams: (0,1), (1,2), (2,4)
2-grams: (0,2), (1,4)
3-grams; (0,4)
Output masks will not overlap and contain less than `max_masked_tokens` total
tokens. E.g., for the example above with `max_masked_tokens` as three,
valid outputs are,
[(0,1), (1,2)] # "the", "red" covering two tokens
[(1,2), (2,4)] # "red", "boa", "##t" covering three tokens
The length of the selected n-gram follows a zipf weighting to
favor shorter n-gram sizes (weight(1)=1, weight(2)=1/2, weight(3)=1/3, ...).
Args:
grams: List of one-grams.
max_ngram_size: Maximum number of contiguous one-grams combined to create
an n-gram.
max_masked_tokens: Maximum total number of tokens to be masked.
rng: `random.Random` generator.
Returns:
A list of n-grams to be used as masks.
"""
if not grams:
return None
grams = sorted(grams)
num_tokens = grams[-1].end
# Ensure our grams are valid (i.e., they don't overlap).
for a, b in _window(grams, 2):
if a.end > b.begin:
raise ValueError("overlapping grams: {}".format(grams))
# Build map from n-gram length to list of n-grams.
ngrams = {i: [] for i in range(1, max_ngram_size+1)}
for gram_size in range(1, max_ngram_size+1):
for g in _window(grams, gram_size):
if _contiguous(g):
# Add an n-gram which spans these one-grams.
ngrams[gram_size].append(_Gram(g[0].begin, g[-1].end))
# Shuffle each list of n-grams.
for v in ngrams.values():
rng.shuffle(v)
# Create the weighting for n-gram length selection.
# Stored cummulatively for `random.choices` below.
cummulative_weights = list(
itertools.accumulate([1./n for n in range(1, max_ngram_size+1)]))
output_ngrams = []
# Keep a bitmask of which tokens have been masked.
masked_tokens = [False] * num_tokens
# Loop until we have enough masked tokens or there are no more candidate
# n-grams of any length.
# Each code path should ensure one or more elements from `ngrams` are removed
# to guarentee this loop terminates.
while (sum(masked_tokens) < max_masked_tokens and
sum(len(s) for s in ngrams.values())):
# Pick an n-gram size based on our weights.
sz = random.choices(range(1, max_ngram_size+1),
cum_weights=cummulative_weights)[0]
# Ensure this size doesn't result in too many masked tokens.
# E.g., a two-gram contains _at least_ two tokens.
if sum(masked_tokens) + sz > max_masked_tokens:
# All n-grams of this length are too long and can be removed from
# consideration.
ngrams[sz].clear()
continue
# All of the n-grams of this size have been used.
if not ngrams[sz]:
continue
# Choose a random n-gram of the given size.
gram = ngrams[sz].pop()
num_gram_tokens = gram.end-gram.begin
# Check if this would add too many tokens.
if num_gram_tokens + sum(masked_tokens) > max_masked_tokens:
continue
# Check if any of the tokens in this gram have already been masked.
if sum(masked_tokens[gram.begin:gram.end]):
continue
# Found a usable n-gram! Mark its tokens as masked and add it to return.
masked_tokens[gram.begin:gram.end] = [True] * (gram.end-gram.begin)
output_ngrams.append(gram)
return output_ngrams
def _wordpieces_to_grams(tokens):
"""Reconstitue grams (words) from `tokens`.
E.g.,
tokens: ['[CLS]', 'That', 'lit', '##tle', 'blue', 'tru', '##ck', '[SEP]']
grams: [ [1,2), [2, 4), [4,5) , [5, 6)]
Args:
tokens: list of wordpieces
Returns:
List of _Grams representing spans of whole words
(without "[CLS]" and "[SEP]").
"""
grams = []
gram_start_pos = None
for i, token in enumerate(tokens):
if gram_start_pos is not None and token.startswith("##"):
continue
if gram_start_pos is not None:
grams.append(_Gram(gram_start_pos, i))
if token not in ["[CLS]", "[SEP]"]:
gram_start_pos = i
else:
gram_start_pos = None
if gram_start_pos is not None:
grams.append(_Gram(gram_start_pos, len(tokens)))
return grams
def create_masked_lm_predictions(tokens, masked_lm_prob,
max_predictions_per_seq, vocab_words, rng,
do_whole_word_mask,
max_ngram_size=None):
"""Creates the predictions for the masked LM objective."""
if do_whole_word_mask:
grams = _wordpieces_to_grams(tokens)
else:
# Here we consider each token to be a word to allow for sub-word masking.
if max_ngram_size:
raise ValueError("cannot use ngram masking without whole word masking")
grams = [_Gram(i, i+1) for i in range(0, len(tokens))
if tokens[i] not in ["[CLS]", "[SEP]"]]
num_to_predict = min(max_predictions_per_seq,
max(1, int(round(len(tokens) * masked_lm_prob))))
# Generate masks. If `max_ngram_size` in [0, None] it means we're doing
# whole word masking or token level masking. Both of these can be treated
# as the `max_ngram_size=1` case.
masked_grams = _masking_ngrams(grams, max_ngram_size or 1,
num_to_predict, rng)
masked_lms = []
output_tokens = list(tokens)
for gram in masked_grams:
# 80% of the time, replace all n-gram tokens with [MASK]
if rng.random() < 0.8:
replacement_action = lambda idx: "[MASK]"
else:
# 10% of the time, keep all the original n-gram tokens.
if rng.random() < 0.5:
replacement_action = lambda idx: tokens[idx]
# 10% of the time, replace each n-gram token with a random word.
else:
replacement_action = lambda idx: rng.choice(vocab_words)
for idx in range(gram.begin, gram.end):
output_tokens[idx] = replacement_action(idx)
masked_lms.append(MaskedLmInstance(index=idx, label=tokens[idx]))
assert len(masked_lms) <= num_to_predict
masked_lms = sorted(masked_lms, key=lambda x: x.index)
masked_lm_positions = []
masked_lm_labels = []
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (output_tokens, masked_lm_positions, masked_lm_labels)
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):
"""Truncates a pair of sequences to a maximum sequence length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
assert len(trunc_tokens) >= 1
# We want to sometimes truncate from the front and sometimes from the
# back to add more randomness and avoid biases.
if rng.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
def main(_):
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
input_files = []
for input_pattern in FLAGS.input_file.split(","):
input_files.extend(tf.io.gfile.glob(input_pattern))
logging.info("*** Reading from input files ***")
for input_file in input_files:
logging.info(" %s", input_file)
rng = random.Random(FLAGS.random_seed)
instances = create_training_instances(
input_files, tokenizer, FLAGS.max_seq_length, FLAGS.dupe_factor,
FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq,
rng, FLAGS.do_whole_word_mask, FLAGS.max_ngram_size)
output_files = FLAGS.output_file.split(",")
logging.info("*** Writing to output files ***")
for output_file in output_files:
logging.info(" %s", output_file)
write_instance_to_example_files(instances, tokenizer, FLAGS.max_seq_length,
FLAGS.max_predictions_per_seq, output_files,
FLAGS.gzip_compress,
FLAGS.use_v2_feature_names)
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("output_file")
flags.mark_flag_as_required("vocab_file")
app.run(main)
| 23,360 | 33.867164 | 80 | py |
models | models-master/official/nlp/data/dual_encoder_dataloader_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.data.dual_encoder_dataloader."""
import os
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.data import dual_encoder_dataloader
_LEFT_FEATURE_NAME = 'left_input'
_RIGHT_FEATURE_NAME = 'right_input'
def _create_fake_dataset(output_path):
"""Creates a fake dataset contains examples for training a dual encoder model.
The created dataset contains examples with two byteslist features keyed by
_LEFT_FEATURE_NAME and _RIGHT_FEATURE_NAME.
Args:
output_path: The output path of the fake dataset.
"""
def create_str_feature(values):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=values))
with tf.io.TFRecordWriter(output_path) as writer:
for _ in range(100):
features = {}
features[_LEFT_FEATURE_NAME] = create_str_feature([b'hello world.'])
features[_RIGHT_FEATURE_NAME] = create_str_feature([b'world hello.'])
tf_example = tf.train.Example(
features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
def _make_vocab_file(vocab, output_path):
with tf.io.gfile.GFile(output_path, 'w') as f:
f.write('\n'.join(vocab + ['']))
class DualEncoderDataTest(tf.test.TestCase, parameterized.TestCase):
def test_load_dataset(self):
seq_length = 16
batch_size = 10
train_data_path = os.path.join(self.get_temp_dir(), 'train.tf_record')
vocab_path = os.path.join(self.get_temp_dir(), 'vocab.txt')
_create_fake_dataset(train_data_path)
_make_vocab_file(
['[PAD]', '[UNK]', '[CLS]', '[SEP]', 'he', '#llo', 'world'], vocab_path)
data_config = dual_encoder_dataloader.DualEncoderDataConfig(
input_path=train_data_path,
seq_length=seq_length,
vocab_file=vocab_path,
lower_case=True,
left_text_fields=(_LEFT_FEATURE_NAME,),
right_text_fields=(_RIGHT_FEATURE_NAME,),
global_batch_size=batch_size)
dataset = dual_encoder_dataloader.DualEncoderDataLoader(
data_config).load()
features = next(iter(dataset))
self.assertCountEqual(
['left_word_ids', 'left_mask', 'left_type_ids', 'right_word_ids',
'right_mask', 'right_type_ids'],
features.keys())
self.assertEqual(features['left_word_ids'].shape, (batch_size, seq_length))
self.assertEqual(features['left_mask'].shape, (batch_size, seq_length))
self.assertEqual(features['left_type_ids'].shape, (batch_size, seq_length))
self.assertEqual(features['right_word_ids'].shape, (batch_size, seq_length))
self.assertEqual(features['right_mask'].shape, (batch_size, seq_length))
self.assertEqual(features['right_type_ids'].shape, (batch_size, seq_length))
@parameterized.parameters(False, True)
def test_load_tfds(self, use_preprocessing_hub):
seq_length = 16
batch_size = 10
if use_preprocessing_hub:
vocab_path = ''
preprocessing_hub = (
'https://tfhub.dev/tensorflow/bert_multi_cased_preprocess/3')
else:
vocab_path = os.path.join(self.get_temp_dir(), 'vocab.txt')
_make_vocab_file(
['[PAD]', '[UNK]', '[CLS]', '[SEP]', 'he', '#llo', 'world'],
vocab_path)
preprocessing_hub = ''
data_config = dual_encoder_dataloader.DualEncoderDataConfig(
tfds_name='para_crawl/enmt',
tfds_split='train',
seq_length=seq_length,
vocab_file=vocab_path,
lower_case=True,
left_text_fields=('en',),
right_text_fields=('mt',),
preprocessing_hub_module_url=preprocessing_hub,
global_batch_size=batch_size)
dataset = dual_encoder_dataloader.DualEncoderDataLoader(
data_config).load()
features = next(iter(dataset))
self.assertCountEqual(
['left_word_ids', 'left_mask', 'left_type_ids', 'right_word_ids',
'right_mask', 'right_type_ids'],
features.keys())
self.assertEqual(features['left_word_ids'].shape, (batch_size, seq_length))
self.assertEqual(features['left_mask'].shape, (batch_size, seq_length))
self.assertEqual(features['left_type_ids'].shape, (batch_size, seq_length))
self.assertEqual(features['right_word_ids'].shape, (batch_size, seq_length))
self.assertEqual(features['right_mask'].shape, (batch_size, seq_length))
self.assertEqual(features['right_type_ids'].shape, (batch_size, seq_length))
if __name__ == '__main__':
tf.test.main()
| 5,040 | 37.189394 | 80 | py |
models | models-master/official/nlp/data/train_sentencepiece.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script to train sentencepiece model from tensorflow datasets.
Reserved tokens:
pad: 0,
eos: 1,
unk: 2
(bos is not reserved)
"""
import os
import tempfile
from typing import List, Tuple
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
import tensorflow_datasets as tfds
from sentencepiece import SentencePieceTrainer
FLAGS = flags.FLAGS
flags.DEFINE_string("output_model_path", None,
"Path to save the sentencepiece model.")
flags.mark_flag_as_required("output_model_path")
flags.DEFINE_string("tfds_dir", None, "Directory of the tfds.")
flags.DEFINE_string("tfds_name", "wmt14_translate/de-en",
"Name of the dataset we generate vacabulay from.")
flags.DEFINE_string("tfds_split", "train", "Split of the dataset.")
flags.DEFINE_integer("vocab_size", 32000, "Size of vocabulary.")
flags.DEFINE_integer(
"max_char", -1,
"Maximum number of characters to use. "
"If a non-positive number is provided, all sentences are used.")
flags.DEFINE_string("model_type", "bpe",
"Model algorithm: unigram, bpe, word or char.")
flags.DEFINE_float("character_coverage", 0.9995,
"Character coverage to determine the minimum symbols")
flags.DEFINE_list(
"data_keys", ["en", "de"],
"Comma-separated list of keys to use for training the vocabulary.")
def dump_chars_to_textfile(dataset: tf.data.Dataset,
data_keys: Tuple[str],
max_char: int = -1):
"""Write part of a TFDS sentence dataset to lines in a text file.
Args:
dataset: tf.dataset containing string-data.
data_keys: what keys in dataset to dump from.
max_char: max character to dump to text file.
Returns:
name of temp file with dataset bytes, exact number of characters dumped.
"""
ds_iter = dataset.as_numpy_iterator()
with tempfile.NamedTemporaryFile(delete=False) as outfp:
char_count = 0
while True:
example = next(ds_iter, None)
if example is None or (
max_char > 0 and char_count > max_char):
break
for k in data_keys:
line = example[k] + b"\n"
char_count += len(line)
outfp.write(line)
return outfp.name
def train_sentencepiece(
file_path: str,
model_path: str,
vocab_size: int,
character_coverage: float,
model_type: str):
"""Train SentencePiece tokenizer from subset of tf dataset.
Args:
file_path: path of data to train sentencepiece.
model_path: path of model file to save vocab model to.
vocab_size: size of vocab tokens to train.
character_coverage: amount of characters covered by the model, good defaults
are 0.9995 for languages with rich character set like Japanese or Chinese
and 1.0 for other languages with small character set.
model_type: type of sentencepiece vocab to train.
Returns:
path to the trained sentencepiece vocabulary model.
"""
argstr = " ".join([
f"--input={file_path}", f"--vocab_size={vocab_size}",
f"--character_coverage={character_coverage}",
f"--model_prefix={model_path}", f"--model_type={model_type}",
"--bos_id=-1", "--pad_id=0", "--eos_id=1", "--unk_id=2"
])
SentencePieceTrainer.Train(argstr)
def main(argv: List[str]):
del argv
builder = tfds.builder(FLAGS.tfds_name, data_dir=FLAGS.tfds_dir)
ds = builder.as_dataset(split=FLAGS.tfds_split)
tmp_filename = dump_chars_to_textfile(ds, FLAGS.data_keys, FLAGS.max_char)
logging.info("Sentencepiece model will be placed here: %s",
FLAGS.output_model_path)
train_sentencepiece(tmp_filename,
FLAGS.output_model_path,
FLAGS.vocab_size,
FLAGS.character_coverage,
FLAGS.model_type)
os.remove(tmp_filename)
if __name__ == "__main__":
app.run(main)
| 4,513 | 32.686567 | 80 | py |
models | models-master/official/nlp/modeling/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NLP Modeling Library.
This library provides a set of Keras primitives (`tf.keras.Layer` and
`tf.keras.Model`) that can be assembled into transformer-based models.
They are flexible, validated, interoperable, and both TF1 and TF2 compatible.
"""
from official.nlp.modeling import layers
from official.nlp.modeling import losses
from official.nlp.modeling import models
from official.nlp.modeling import networks
from official.nlp.modeling import ops
| 1,062 | 39.884615 | 77 | py |
models | models-master/official/nlp/modeling/networks/classification.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classification and regression network."""
# pylint: disable=g-classes-have-attributes
import collections
import tensorflow as tf
from tensorflow.python.util import deprecation
@tf.keras.utils.register_keras_serializable(package='Text')
class Classification(tf.keras.Model):
"""Classification network head for BERT modeling.
This network implements a simple classifier head based on a dense layer. If
num_classes is one, it can be considered as a regression problem.
*Note* that the network is constructed by
[Keras Functional API](https://keras.io/guides/functional_api/).
Args:
input_width: The innermost dimension of the input tensor to this network.
num_classes: The number of classes that this network should classify to. If
equal to 1, a regression problem is assumed.
activation: The activation, if any, for the dense layer in this network.
initializer: The initializer for the dense layer in this network. Defaults
to a Glorot uniform initializer.
output: The output style for this network. Can be either `logits` or
`predictions`.
"""
@deprecation.deprecated(None, 'Classification as a network is deprecated. '
'Please use the layers.ClassificationHead instead.')
def __init__(self,
input_width,
num_classes,
initializer='glorot_uniform',
output='logits',
**kwargs):
cls_output = tf.keras.layers.Input(
shape=(input_width,), name='cls_output', dtype=tf.float32)
logits = tf.keras.layers.Dense(
num_classes,
activation=None,
kernel_initializer=initializer,
name='predictions/transform/logits')(
cls_output)
if output == 'logits':
output_tensors = logits
elif output == 'predictions':
policy = tf.keras.mixed_precision.global_policy()
if policy.name == 'mixed_bfloat16':
# b/158514794: bf16 is not stable with post-softmax cross-entropy.
policy = tf.float32
output_tensors = tf.keras.layers.Activation(
tf.nn.log_softmax, dtype=policy)(
logits)
else:
raise ValueError(
('Unknown `output` value "%s". `output` can be either "logits" or '
'"predictions"') % output)
super().__init__(
inputs=[cls_output], outputs=output_tensors, **kwargs)
# b/164516224
# Once we've created the network using the Functional API, we call
# super().__init__ as though we were invoking the Functional API Model
# constructor, resulting in this object having all the properties of a model
# created using the Functional API. Once super().__init__ is called, we
# can assign attributes to `self` - note that all `self` assignments are
# below this line.
config_dict = {
'input_width': input_width,
'num_classes': num_classes,
'initializer': initializer,
'output': output,
}
# We are storing the config dict as a namedtuple here to ensure checkpoint
# compatibility with an earlier version of this model which did not track
# the config dict attribute. TF does not track immutable attrs which
# do not contain Trackables, so by creating a config namedtuple instead of
# a dict we avoid tracking it.
config_cls = collections.namedtuple('Config', config_dict.keys())
self._config = config_cls(**config_dict)
self.logits = logits
def get_config(self):
return dict(self._config._asdict())
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 4,222 | 38.101852 | 80 | py |
models | models-master/official/nlp/modeling/networks/packed_sequence_embedding.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An embedding network supporting packed sequences and position ids."""
# pylint: disable=g-classes-have-attributes
import collections
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling import layers
@tf.keras.utils.register_keras_serializable(package='Text')
class PackedSequenceEmbedding(tf.keras.Model):
"""An embedding network supporting packed sequences and position ids.
This network implements an embedding layer similar to the one described in
"BERT: Pre-training of Deep Bidirectional Transformers for Language
Understanding" (https://arxiv.org/abs/1810.04805). On top of it, it supports
to (1) pack multiple sequences into one sequence and (2) allow additional
"position_ids" as input.
Args:
vocab_size: The size of the token vocabulary.
type_vocab_size: The size of the type vocabulary.
embedding_width: Width of token embeddings.
hidden_size: The output size for this encoder.
max_seq_length: The maximum sequence length for this encoder.
initializer: The initializer for the embedding portion of this encoder.
dropout_rate: The dropout rate to apply before the encoding layers.
pack_multiple_sequences: If `True`, we can feed multiple sequences into one
sequence for training and inference (they don't impact each other).
use_position_id: Whether to expect `position_ids` as an input to the
network. If False, the `position_ids` will be inferred: (1) when
pack_multiple_sequences is False, we assume the position ids are `0, 1,
2, ..., seq_length - 1`; (2) when `pack_multiple_sequences` is `True`,
there may be multiple sub sequences, and for each sub sequence, its
position ids start from 0, 1, 2, ...
"""
def __init__(self,
vocab_size,
type_vocab_size,
embedding_width,
hidden_size,
max_seq_length,
initializer,
dropout_rate,
use_position_id=False,
pack_multiple_sequences=False,
**kwargs):
initializer = tf.keras.initializers.get(initializer)
if embedding_width is None:
embedding_width = hidden_size
config_dict = {
'vocab_size': vocab_size,
'type_vocab_size': type_vocab_size,
'embedding_width': embedding_width,
'hidden_size': hidden_size,
'max_seq_length': max_seq_length,
'initializer': tf.keras.initializers.serialize(initializer),
'dropout_rate': dropout_rate,
'use_position_id': use_position_id,
'pack_multiple_sequences': pack_multiple_sequences,
}
word_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_word_ids')
mask = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_mask')
type_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_type_ids')
inputs = [word_ids, mask, type_ids]
if use_position_id:
position_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='position_ids')
inputs.append(position_ids)
else:
position_ids = None
if pack_multiple_sequences:
sub_seq_mask = PackedSequenceMask()(word_ids)
else:
sub_seq_mask = None
embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=tf_utils.clone_initializer(initializer),
name='word_embeddings')
word_embeddings = embedding_layer(word_ids)
# Always uses dynamic slicing for simplicity.
position_embedding_layer = PositionEmbeddingWithSubSeqMask(
initializer=tf_utils.clone_initializer(initializer),
use_dynamic_slicing=True,
max_sequence_length=max_seq_length,
name='position_embedding')
position_embeddings = position_embedding_layer(
word_embeddings, position_ids, sub_seq_mask)
type_embeddings = (
layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=tf_utils.clone_initializer(initializer),
use_one_hot=True,
name='type_embeddings')(type_ids))
embeddings = tf.keras.layers.Add()(
[word_embeddings, position_embeddings, type_embeddings])
embeddings = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)(
embeddings)
embeddings = tf.keras.layers.Dropout(
rate=dropout_rate, dtype=tf.float32)(
embeddings)
if embedding_width != hidden_size:
embeddings = tf.keras.layers.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes=None,
kernel_initializer=tf_utils.clone_initializer(initializer),
name='embedding_projection')(
embeddings)
attention_mask = layers.SelfAttentionMask()(embeddings, mask)
if sub_seq_mask is not None:
attention_mask = tf.keras.layers.Lambda(
lambda x: x[0] * tf.cast(x[1], x[0].dtype))(
[attention_mask, sub_seq_mask])
outputs = [embeddings, attention_mask]
super().__init__(
inputs=inputs, outputs=outputs, **kwargs)
# TF does not track immutable attrs which do not contain Trackables,
# so by creating a config namedtuple instead of a dict we avoid tracking it.
config_cls = collections.namedtuple('Config', config_dict.keys())
self._config = config_cls(**config_dict)
self._embedding_layer = embedding_layer
self._position_embedding_layer = position_embedding_layer
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_config(self):
return dict(self._config._asdict())
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@tf.keras.utils.register_keras_serializable(package='Text')
class PackedSequenceMask(tf.keras.layers.Layer):
"""A layer to create a mask to indicate multiple sub sequences."""
def call(self, input_ids):
"""Implements call() for the layer.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length].
Returns:
boolean Tensor of shape [batch_size, seq_length, seq_length]. [x, y, z]
is True if for x'th instance in a batch, y'th token and z'th token are
from the same sub sequence.
"""
# Suppose
# - the first token in the parent sequence is [CLS].
# - every sequence starts from [CLS].
# - every sequence only contains one [CLS].
seq_start_token = input_ids[:, 0:1]
seq_start_loc = tf.cast(tf.equal(input_ids, seq_start_token), tf.int32)
# Set different ids for different sub sequences.
seq_ids = tf.expand_dims(tf.cumsum(seq_start_loc, -1), -1)
return tf.equal(seq_ids, tf.transpose(seq_ids, [0, 2, 1]))
@tf.keras.utils.register_keras_serializable(package='Text')
class PositionEmbeddingWithSubSeqMask(tf.keras.layers.Layer):
"""Creates a positional embedding with sub-sequence masking.
This layer creates a positional embedding as described in "BERT: Pre-training
of Deep Bidirectional Transformers for Language Understanding"
(https://arxiv.org/abs/1810.04805). On top of it, it supports
`position_ids` and `sub_sequence_mask` tensors.
This layer can be set up to either create a statically shaped slice or a
dynamically shaped slice. If `use_dynamic_slicing` is True, the input tensor
can have a dynamic 1st dimension, while if `use_dynamic_slicing` is False the
input size must be fixed.
Args:
initializer: The initializer to use for the embedding weights. Defaults to
"glorot_uniform".
use_dynamic_slicing: Whether to use the dynamic slicing path.
max_sequence_length: The maximum size of the dynamic sequence. Only
applicable if `use_dynamic_slicing` is True.
"""
def __init__(self,
initializer='glorot_uniform',
use_dynamic_slicing=False,
max_sequence_length=None,
**kwargs):
# We need to have a default dtype of float32, since the inputs (which Keras
# usually uses to infer the dtype) will always be int32.
if 'dtype' not in kwargs:
kwargs['dtype'] = 'float32'
super().__init__(**kwargs)
if use_dynamic_slicing and max_sequence_length is None:
raise ValueError(
'If `use_dynamic_slicing` is True, `max_sequence_length` must be set.'
)
self._max_sequence_length = max_sequence_length
self._initializer = tf.keras.initializers.get(initializer)
self._use_dynamic_slicing = use_dynamic_slicing
def get_config(self):
config = {
'max_sequence_length': self._max_sequence_length,
'initializer': tf.keras.initializers.serialize(self._initializer),
'use_dynamic_slicing': self._use_dynamic_slicing,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
"""Implements build() for the layer."""
dimension_list = input_shape.as_list()
if len(dimension_list) != 3:
raise ValueError('PositionEmbedding expects a 3-dimensional input tensor '
'of shape [batch, sequence, width]')
seq_length = dimension_list[1]
width = dimension_list[2]
# If we are not using dynamic slicing, we must assume that the sequence
# length is fixed and max_sequence_length should not be specified.
if not self._use_dynamic_slicing:
if seq_length is None:
raise ValueError(
'PositionEmbedding must have `use_dynamic_slicing` set '
'to True (and max_sequence_length set) when the '
'sequence (1st) dimension of the input is None.')
if self._max_sequence_length is not None:
raise ValueError(
'When `use_dynamic_slicing` is False, max_sequence_length should '
'not be specified and we ought to use seq_length to get the '
'variable shape.')
if self._max_sequence_length is not None:
weight_sequence_length = self._max_sequence_length
else:
weight_sequence_length = seq_length
self._position_embeddings = self.add_weight(
'embeddings',
shape=[weight_sequence_length, width],
initializer=self._initializer)
super().build(input_shape)
def call(self, inputs, position_ids=None, sub_sequence_mask=None):
"""Implements call() for the layer.
When `position_ids` is specified, it will return the position embeddings
corresponding to this `position_ids`; otherwise, `position_ids` will be
inferred in the following way:
(1) When `sub_sequence_mask` is None, we assume the position ids are
0, 1, 2, ..., seq_length - 1.
(2) When `sub_sequence_mask` is specified, there may be multiple sub
sequences, and for each sub sequence, its position ids start from
0, 1, 2, ...
Args:
inputs: Word embeddings in shape [batch, seq_length, embedding_dim].
position_ids: An optional int32 tensor in shape [batch, seq_length].
sub_sequence_mask: An optional bool tensor in shape [batch, seq_length,
seq_length]. [x, y, z] is True if for x'th instance in a batch, y'th
token and z'th token are from the same sub sequence.
Returns:
The position embeddings in shape [batch, seq_length, embedding_dim].
"""
input_shape = tf_utils.get_shape_list(inputs, expected_rank=3)
if self._use_dynamic_slicing:
position_embeddings = self._position_embeddings[:input_shape[1], :]
else:
position_embeddings = self._position_embeddings
if position_ids is not None:
return tf.gather(position_embeddings, position_ids)
if sub_sequence_mask is None:
return tf.broadcast_to(position_embeddings, input_shape)
else:
sub_sequence_mask = tf.cast(sub_sequence_mask, tf.int32)
# For each sub sequence, its position ids start from 0, 1, 2, ...
position_ids = tf.linalg.diag_part(tf.cumsum(sub_sequence_mask, -1)) - 1
return tf.gather(position_embeddings, position_ids)
| 12,800 | 39.381703 | 80 | py |
models | models-master/official/nlp/modeling/networks/funnel_transformer_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for transformer-based bert encoder network."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.networks import funnel_transformer
class SingleLayerModel(tf.keras.Model):
def __init__(self, layer):
super().__init__()
self.layer = layer
def call(self, inputs):
return self.layer(inputs)
class FunnelTransformerEncoderTest(parameterized.TestCase, tf.test.TestCase):
def tearDown(self):
super(FunnelTransformerEncoderTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy("float32")
@parameterized.named_parameters(
("mix_truncated_avg_rezero", "mixed_float16", tf.float16, "truncated_avg",
"ReZeroTransformer"), ("float32_truncated_avg_rezero", "float32",
tf.float32, "truncated_avg", "ReZeroTransformer"),
("mix_truncated_avg", "mixed_float16", tf.float16, "truncated_avg",
"TransformerEncoderBlock"),
("float32_truncated_avg", "float32", tf.float32, "truncated_avg",
"TransformerEncoderBlock"), ("mix_max", "mixed_float16", tf.float16,
"max", "TransformerEncoderBlock"),
("float32_max", "float32", tf.float32, "max", "TransformerEncoderBlock"),
("mix_avg", "mixed_float16", tf.float16, "avg",
"TransformerEncoderBlock"),
("float32_avg", "float32", tf.float32, "avg", "TransformerEncoderBlock"))
def test_network_creation(self, policy, pooled_dtype, pool_type,
transformer_cls):
tf.keras.mixed_precision.set_global_policy(policy)
hidden_size = 32
sequence_length = 21
pool_stride = 2
num_layers = 3
# Create a small FunnelTransformerEncoder for testing.
test_network = funnel_transformer.FunnelTransformerEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=num_layers,
pool_stride=pool_stride,
pool_type=pool_type,
max_sequence_length=sequence_length,
unpool_length=0,
transformer_cls=transformer_cls)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network([word_ids, mask, type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
self.assertIsInstance(test_network.transformer_layers, list)
self.assertLen(test_network.transformer_layers, num_layers)
self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense)
# Stride=2 compresses sequence length to half the size at each layer.
# For pool_type = max or avg,
# this configuration gives each layer of seq length: 21->11->6->3.
# For pool_type = truncated_avg,
# seq length: 21->10->5->2.
if pool_type in ["max", "avg"]:
expected_data_shape = [None, 3, hidden_size]
else:
expected_data_shape = [None, 2, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
# If float_dtype is set to float16, the data output is float32 (from a layer
# norm) and pool output should be float16.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(pooled_dtype, pooled.dtype)
@parameterized.named_parameters(
("append_dense_inputs", True),
("dense_inputs_at_sequence_begin", False),
)
def test_network_creation_dense(self, append_dense_inputs):
tf.keras.mixed_precision.set_global_policy("mixed_float16")
pool_type = "avg"
hidden_size = 32
sequence_length = 21
dense_sequence_length = 3
pool_stride = 2
num_layers = 3
# Create a small FunnelTransformerEncoder for testing.
test_network = funnel_transformer.FunnelTransformerEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=num_layers,
pool_stride=pool_stride,
pool_type=pool_type,
max_sequence_length=sequence_length + dense_sequence_length,
unpool_length=0,
transformer_cls="TransformerEncoderBlock",
append_dense_inputs=append_dense_inputs)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dense_inputs = tf.keras.Input(
shape=(dense_sequence_length, hidden_size), dtype=tf.float32)
dense_mask = tf.keras.Input(shape=(dense_sequence_length,), dtype=tf.int32)
dense_type_ids = tf.keras.Input(
shape=(dense_sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
[word_ids, mask, type_ids, dense_inputs, dense_mask, dense_type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
self.assertIsInstance(test_network.transformer_layers, list)
self.assertLen(test_network.transformer_layers, num_layers)
self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense)
# Stride=2 compresses sequence length to half the size at each layer.
# For pool_type = max or avg,
# this configuration gives each layer of seq length: 24->12->6->3.
expected_data_shape = [None, 3, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
@parameterized.named_parameters(
("frac_pool_rezero", "ReZeroTransformer"),
("frac_pool_vanilla", "TransformerEncoderBlock"),
)
def test_fractional_pooling(self, transformer_cls):
hidden_size = 16
sequence_length = 32
pool_strides = [1.33333, 3, 2, 1]
num_layers = 4
pool_type = "truncated_avg"
test_network = funnel_transformer.FunnelTransformerEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=num_layers,
pool_stride=pool_strides,
pool_type=pool_type,
max_sequence_length=sequence_length,
unpool_length=0,
transformer_cls=transformer_cls)
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network([word_ids, mask, type_ids])
data = dict_outputs["sequence_output"]
expected_data_shape = [None, 4, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
def test_invalid_stride_and_num_layers(self):
hidden_size = 32
num_layers = 3
pool_stride = [2, 2]
unpool_length = 1
with self.assertRaisesRegex(ValueError,
"pool_stride and num_layers are not equal"):
_ = funnel_transformer.FunnelTransformerEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=num_layers,
pool_stride=pool_stride,
unpool_length=unpool_length)
@parameterized.named_parameters(
("no_stride_no_unpool", 1, 0),
("stride_list_with_unpool", [2, 3, 4], 1),
("large_stride_with_unpool", 3, 1),
("large_stride_with_large_unpool", 5, 10),
("no_stride_with_unpool", 1, 1),
)
def test_all_encoder_outputs_network_creation(self, pool_stride,
unpool_length):
hidden_size = 32
sequence_length = 21
num_layers = 3
# Create a small FunnelTransformerEncoder for testing.
test_network = funnel_transformer.FunnelTransformerEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=num_layers,
pool_stride=pool_stride,
unpool_length=unpool_length)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network([word_ids, mask, type_ids])
all_encoder_outputs = dict_outputs["encoder_outputs"]
pooled = dict_outputs["pooled_output"]
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertLen(all_encoder_outputs, num_layers)
if isinstance(pool_stride, int):
pool_stride = [pool_stride] * num_layers
for layer_pool_stride, data in zip(pool_stride, all_encoder_outputs):
expected_data_shape[1] = unpool_length + (
expected_data_shape[1] + layer_pool_stride - 1 -
unpool_length) // layer_pool_stride
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
@parameterized.named_parameters(
("all_sequence", None, 3, 0),
("output_range", 1, 1, 0),
("all_sequence_with_unpool", None, 4, 1),
("output_range_with_unpool", 1, 1, 1),
("output_range_with_large_unpool", 1, 1, 2),
)
def test_network_invocation(self, output_range, out_seq_len, unpool_length):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
pool_stride = 2
# Create a small FunnelTransformerEncoder for testing.
test_network = funnel_transformer.FunnelTransformerEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
pool_stride=pool_stride,
unpool_length=unpool_length)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network([word_ids, mask, type_ids],
output_range=output_range)
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
# Create a model based off of this network:
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[1], out_seq_len) # output_range
# Creates a FunnelTransformerEncoder with max_sequence_length !=
# sequence_length
max_sequence_length = 128
test_network = funnel_transformer.FunnelTransformerEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
pool_stride=pool_stride)
dict_outputs = test_network([word_ids, mask, type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[1], 3)
# Creates a FunnelTransformerEncoder with embedding_width != hidden_size
test_network = funnel_transformer.FunnelTransformerEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
embedding_width=16,
pool_stride=pool_stride)
dict_outputs = test_network([word_ids, mask, type_ids])
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[-1], hidden_size)
self.assertTrue(hasattr(test_network, "_embedding_projection"))
def test_embeddings_as_inputs(self):
hidden_size = 32
sequence_length = 21
# Create a small BertEncoder for testing.
test_network = funnel_transformer.FunnelTransformerEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
pool_stride=2,
)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
test_network.build(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)
)
embeddings = test_network.get_embedding_layer()(word_ids)
# Calls with the embeddings.
dict_outputs = test_network(
dict(
input_word_embeddings=embeddings,
input_mask=mask,
input_type_ids=type_ids,
)
)
all_encoder_outputs = dict_outputs["encoder_outputs"]
pooled = dict_outputs["pooled_output"]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
vocab_size=100,
hidden_size=32,
num_layers=3,
num_attention_heads=2,
max_sequence_length=21,
type_vocab_size=12,
inner_dim=1223,
inner_activation="relu",
output_dropout=0.05,
attention_dropout=0.22,
initializer="glorot_uniform",
output_range=-1,
embedding_width=16,
embedding_layer=None,
norm_first=False,
pool_type="max",
pool_stride=2,
unpool_length=0,
transformer_cls="TransformerEncoderBlock")
network = funnel_transformer.FunnelTransformerEncoder(**kwargs)
expected_config = dict(kwargs)
expected_config["inner_activation"] = tf.keras.activations.serialize(
tf.keras.activations.get(expected_config["inner_activation"]))
expected_config["initializer"] = tf.keras.initializers.serialize(
tf.keras.initializers.get(expected_config["initializer"]))
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = funnel_transformer.FunnelTransformerEncoder.from_config(
network.get_config())
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
# Tests model saving/loading.
model_path = self.get_temp_dir() + "/model"
network_wrapper = SingleLayerModel(network)
# One forward-path to ensure input_shape.
batch_size = 3
sequence_length = 21
vocab_size = 100
num_types = 12
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
_ = network_wrapper.predict([word_id_data, mask_data, type_id_data])
network_wrapper.save(model_path)
_ = tf.keras.models.load_model(model_path)
if __name__ == "__main__":
tf.test.main()
| 17,167 | 39.682464 | 80 | py |
models | models-master/official/nlp/modeling/networks/packed_sequence_embedding_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.modeling.networks.packed_sequence_embedding."""
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.networks import packed_sequence_embedding
class PackedSequenceEmbeddingTest(tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(PackedSequenceEmbeddingTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy('float32')
@parameterized.parameters([
(True, True, True),
(False, False, True),
(False, True, False),
(True, False, False),
])
def test_network_creation(self, use_position_id, pack_multiple_sequences,
use_float16):
"""Validate that the Keras object can be created."""
if use_float16:
tf.keras.mixed_precision.set_global_policy('mixed_float16')
seq_length = 16
vocab_size = 100
max_position_embeddings = 32
type_vocab_size = 2
embedding_width = 16
hidden_size = 32
embedding_cfg = dict(
vocab_size=vocab_size,
type_vocab_size=2,
embedding_width=embedding_width,
hidden_size=hidden_size,
max_seq_length=max_position_embeddings,
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
dropout_rate=0.1,
use_position_id=use_position_id,
pack_multiple_sequences=pack_multiple_sequences,
)
test_object = packed_sequence_embedding.PackedSequenceEmbedding(
**embedding_cfg)
input_word_ids = tf.keras.Input(shape=(seq_length,), dtype=tf.int32)
input_mask = tf.keras.Input(shape=(seq_length,), dtype=tf.int32)
input_type_ids = tf.keras.Input(shape=(seq_length,), dtype=tf.int32)
network_inputs = {
'input_word_ids': input_word_ids,
'input_mask': input_mask,
'input_type_ids': input_type_ids,
}
if use_position_id:
network_inputs['position_ids'] = tf.keras.Input(
shape=(seq_length,), dtype=tf.int32)
embedding, mask = test_object(network_inputs)
# Create a model based off of this network:
model = tf.keras.Model(network_inputs, [embedding, mask])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(vocab_size, size=(batch_size, seq_length))
mask_data = np.random.randint(2, size=(batch_size, seq_length))
type_id_data = np.random.randint(
type_vocab_size, size=(batch_size, seq_length))
feed_input = {
'input_word_ids': word_id_data,
'input_mask': mask_data,
'input_type_ids': type_id_data,
}
if use_position_id:
feed_input['position_ids'] = np.random.randint(
seq_length, size=(batch_size, seq_length))
embeddings, attention_mask = model.predict(feed_input)
expected_embeddings_shape = [3, seq_length, hidden_size]
expected_attention_mask_shape = [3, seq_length, seq_length]
self.assertAllEqual(expected_embeddings_shape, embeddings.shape)
self.assertAllEqual(expected_attention_mask_shape, attention_mask.shape)
def test_serialize_deserialize(self):
tf.keras.mixed_precision.set_global_policy('mixed_float16')
# Create a network object that sets all of its config options.
embedding_cfg = dict(
vocab_size=100,
type_vocab_size=2,
embedding_width=64,
hidden_size=64,
max_seq_length=32,
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
dropout_rate=0.1,
use_position_id=True,
pack_multiple_sequences=False,
)
network = packed_sequence_embedding.PackedSequenceEmbedding(**embedding_cfg)
expected_config = dict(embedding_cfg)
expected_config['initializer'] = tf.keras.initializers.serialize(
tf.keras.initializers.get(expected_config['initializer']))
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = packed_sequence_embedding.PackedSequenceEmbedding.from_config(
network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 5,066 | 36.533333 | 80 | py |
models | models-master/official/nlp/modeling/networks/xlnet_base_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras based XLNet model."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from official.nlp.modeling.networks import xlnet_base
class RelativePositionEncodingTest(tf.test.TestCase):
def test_positional_embedding(self):
"""A low-dimensional example is tested.
With len(pos_seq)=2 and d_model=4:
pos_seq = [[1.], [0.]]
inv_freq = [1., 0.01]
pos_seq x inv_freq = [[1, 0.01], [0., 0.]]
pos_emb = [[sin(1.), sin(0.01), cos(1.), cos(0.01)],
[sin(0.), sin(0.), cos(0.), cos(0.)]]
= [[0.84147096, 0.00999983, 0.54030228, 0.99994999],
[0., 0., 1., 1.]]
"""
target = np.array([[[0.84147096, 0.00999983, 0.54030228, 0.99994999],
[0., 0., 1., 1.]]])
hidden_size = 4
pos_seq = tf.range(1, -1, -1.0) # [1., 0.]
encoding_layer = xlnet_base.RelativePositionEncoding(
hidden_size=hidden_size)
encoding = encoding_layer(pos_seq, batch_size=None).numpy().astype(float)
self.assertAllClose(encoding, target)
class ComputePositionEncodingTest(tf.test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(
attention_type=["uni", "bi"],
bi_data=[False, True],
))
def test_compute_position_encoding_smoke(self, attention_type, bi_data):
hidden_size = 4
batch_size = 4
total_length = 8
seq_length = 4
position_encoding_layer = xlnet_base.RelativePositionEncoding(
hidden_size=hidden_size)
encoding = xlnet_base._compute_positional_encoding(
attention_type=attention_type,
position_encoding_layer=position_encoding_layer,
hidden_size=hidden_size,
batch_size=batch_size,
total_length=total_length,
seq_length=seq_length,
clamp_length=2,
bi_data=bi_data,
dtype=tf.float32)
self.assertEqual(encoding.shape[0], batch_size)
self.assertEqual(encoding.shape[2], hidden_size)
class CausalAttentionMaskTests(tf.test.TestCase):
def test_casual_attention_mask_with_no_memory(self):
seq_length, memory_length = 3, 0
causal_attention_mask = xlnet_base._create_causal_attention_mask(
seq_length=seq_length,
memory_length=memory_length)
expected_output = np.array([[1, 0, 0],
[1, 1, 0],
[1, 1, 1]])
self.assertAllClose(causal_attention_mask, expected_output)
def test_casual_attention_mask_with_memory(self):
seq_length, memory_length = 3, 2
causal_attention_mask = xlnet_base._create_causal_attention_mask(
seq_length=seq_length,
memory_length=memory_length)
expected_output = np.array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
self.assertAllClose(causal_attention_mask, expected_output)
def test_causal_attention_mask_with_same_length(self):
seq_length, memory_length = 3, 2
causal_attention_mask = xlnet_base._create_causal_attention_mask(
seq_length=seq_length,
memory_length=memory_length,
same_length=True)
expected_output = np.array([[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1]])
self.assertAllClose(causal_attention_mask, expected_output)
class MaskComputationTests(tf.test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(
use_input_mask=[False, True],
use_permutation_mask=[False, True],
attention_type=["uni", "bi"],
memory_length=[0, 4],
))
def test_compute_attention_mask_smoke(self,
use_input_mask,
use_permutation_mask,
attention_type,
memory_length):
"""Tests coverage and functionality for different configurations."""
batch_size = 2
seq_length = 8
if use_input_mask:
input_mask = tf.zeros(shape=(batch_size, seq_length))
else:
input_mask = None
if use_permutation_mask:
permutation_mask = tf.zeros(shape=(batch_size, seq_length, seq_length))
else:
permutation_mask = None
_, content_mask = xlnet_base._compute_attention_mask(
input_mask=input_mask,
permutation_mask=permutation_mask,
attention_type=attention_type,
seq_length=seq_length,
memory_length=memory_length,
batch_size=batch_size,
dtype=tf.float32)
expected_mask_shape = (batch_size, 1,
seq_length, seq_length + memory_length)
if use_input_mask or use_permutation_mask:
self.assertEqual(content_mask.shape, expected_mask_shape)
def test_no_input_masks(self):
query_mask, content_mask = xlnet_base._compute_attention_mask(
input_mask=None,
permutation_mask=None,
attention_type="uni",
seq_length=8,
memory_length=2,
batch_size=2,
dtype=tf.float32)
self.assertIsNone(query_mask)
self.assertIsNone(content_mask)
def test_input_mask_no_permutation(self):
"""Tests if an input mask is provided but not permutation.
In the case that only one of input mask or permutation mask is provided
and the attention type is bidirectional, the query mask should be
a broadcasted version of the provided mask.
Content mask should be a broadcasted version of the query mask, where the
diagonal is 0s.
"""
seq_length = 4
batch_size = 1
memory_length = 0
input_mask = np.array([[1, 1, 0, 0]])
permutation_mask = None
expected_query_mask = input_mask[None, None, :, :]
expected_content_mask = np.array([[[
[1, 1, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0],
[1, 1, 0, 1]]]])
query_mask, content_mask = xlnet_base._compute_attention_mask(
input_mask=input_mask,
permutation_mask=permutation_mask,
attention_type="bi",
seq_length=seq_length,
memory_length=memory_length,
batch_size=batch_size,
dtype=tf.float32)
self.assertAllClose(query_mask, expected_query_mask)
self.assertAllClose(content_mask, expected_content_mask)
def test_permutation_mask_no_input_mask(self):
"""Tests if a permutation mask is provided but not input."""
seq_length = 2
batch_size = 1
memory_length = 0
input_mask = None
permutation_mask = np.array([
[[1, 0],
[1, 0]],
])
expected_query_mask = permutation_mask[:, None, :, :]
expected_content_mask = np.array([[[
[1, 0],
[1, 1]]]])
query_mask, content_mask = xlnet_base._compute_attention_mask(
input_mask=input_mask,
permutation_mask=permutation_mask,
attention_type="bi",
seq_length=seq_length,
memory_length=memory_length,
batch_size=batch_size,
dtype=tf.float32)
self.assertAllClose(query_mask, expected_query_mask)
self.assertAllClose(content_mask, expected_content_mask)
def test_permutation_and_input_mask(self):
"""Tests if both an input and permutation mask are provided."""
seq_length = 4
batch_size = 1
memory_length = 0
input_mask = np.array([[1, 1, 0, 0]])
permutation_mask = np.array([[
[0, 1, 1, 1],
[1, 0, 1, 1],
[1, 1, 0, 1],
[1, 1, 1, 0],
]])
expected_query_mask = np.array([[[
[0, 1, 0, 0],
[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 0, 0]]]])
expected_content_mask = np.array([[[
[1, 1, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0],
[1, 1, 0, 1]]]])
query_mask, content_mask = xlnet_base._compute_attention_mask(
input_mask=input_mask,
permutation_mask=permutation_mask,
attention_type="bi",
seq_length=seq_length,
memory_length=memory_length,
batch_size=batch_size,
dtype=tf.float32)
self.assertAllClose(query_mask, expected_query_mask)
self.assertAllClose(content_mask, expected_content_mask)
def test_permutation_input_uni_mask(self):
"""Tests if an input, permutation and causal mask are provided."""
seq_length = 4
batch_size = 1
memory_length = 0
input_mask = np.array([[1, 1, 1, 0]])
permutation_mask = np.array([[
[0, 1, 1, 1],
[1, 0, 1, 1],
[1, 1, 0, 1],
[1, 1, 1, 0],
]])
expected_query_mask = np.array([[[
[0, 0, 0, 0],
[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0]]]])
expected_content_mask = np.array([[[
[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0],
[1, 1, 1, 1]]]])
query_mask, content_mask = xlnet_base._compute_attention_mask(
input_mask=input_mask,
permutation_mask=permutation_mask,
attention_type="uni",
seq_length=seq_length,
memory_length=memory_length,
batch_size=batch_size,
dtype=tf.float32)
self.assertAllClose(query_mask, expected_query_mask)
self.assertAllClose(content_mask, expected_content_mask)
class SegmentMatrixTests(tf.test.TestCase):
def test_no_segment_ids(self):
segment_matrix = xlnet_base._compute_segment_matrix(
segment_ids=None,
memory_length=2,
batch_size=1,
use_cls_mask=False)
self.assertIsNone(segment_matrix)
def test_basic(self):
batch_size = 1
memory_length = 0
segment_ids = np.array([
[1, 1, 2, 1]
])
expected_segment_matrix = np.array([[
[False, False, True, False],
[False, False, True, False],
[True, True, False, True],
[False, False, True, False]
]])
segment_matrix = xlnet_base._compute_segment_matrix(
segment_ids=segment_ids,
memory_length=memory_length,
batch_size=batch_size,
use_cls_mask=False)
self.assertAllClose(segment_matrix, expected_segment_matrix)
def test_basic_with_memory(self):
batch_size = 1
memory_length = 1
segment_ids = np.array([
[1, 1, 2, 1]
])
expected_segment_matrix = np.array([[
[True, False, False, True, False],
[True, False, False, True, False],
[True, True, True, False, True],
[True, False, False, True, False]
]]).astype(int)
segment_matrix = tf.cast(xlnet_base._compute_segment_matrix(
segment_ids=segment_ids,
memory_length=memory_length,
batch_size=batch_size,
use_cls_mask=False), dtype=tf.uint8)
self.assertAllClose(segment_matrix, expected_segment_matrix)
def dont_test_basic_with_class_mask(self):
# TODO(allencwang) - this test should pass but illustrates the legacy issue
# of using class mask. Enable once addressed.
batch_size = 1
memory_length = 0
segment_ids = np.array([
[1, 1, 2, 1]
])
expected_segment_matrix = np.array([[
[False, False, True, False],
[False, False, True, False],
[True, True, False, True],
[False, False, True, False]
]]).astype(int)
segment_matrix = tf.cast(xlnet_base._compute_segment_matrix(
segment_ids=segment_ids,
memory_length=memory_length,
batch_size=batch_size,
use_cls_mask=True), dtype=tf.uint8)
self.assertAllClose(segment_matrix, expected_segment_matrix)
class XLNetModelTests(tf.test.TestCase):
def _generate_data(self,
batch_size,
seq_length,
num_predictions=None):
"""Generates sample XLNet data for testing."""
sequence_shape = (batch_size, seq_length)
if num_predictions is not None:
target_mapping = tf.random.uniform(
shape=(batch_size, num_predictions, seq_length))
return {
"input_ids": np.random.randint(10, size=sequence_shape, dtype="int32"),
"segment_ids":
np.random.randint(2, size=sequence_shape, dtype="int32"),
"input_mask":
np.random.randint(2, size=sequence_shape).astype("float32"),
"permutation_mask":
np.random.randint(
2, size=(batch_size, seq_length, seq_length)).astype("float32"),
"target_mapping": target_mapping,
"masked_tokens": tf.random.uniform(shape=sequence_shape),
}
def test_xlnet_model(self):
batch_size = 2
seq_length = 8
num_predictions = 2
hidden_size = 4
xlnet_model = xlnet_base.XLNetBase(
vocab_size=32000,
num_layers=2,
hidden_size=hidden_size,
num_attention_heads=2,
head_size=2,
inner_size=2,
dropout_rate=0.,
attention_dropout_rate=0.,
attention_type="bi",
bi_data=True,
initializer=tf.keras.initializers.RandomNormal(stddev=0.1),
two_stream=False,
tie_attention_biases=True,
reuse_length=0,
inner_activation="relu")
input_data = self._generate_data(batch_size=batch_size,
seq_length=seq_length,
num_predictions=num_predictions)
model_output = xlnet_model(**input_data)
self.assertEqual(model_output[0].shape,
(batch_size, seq_length, hidden_size))
def test_get_config(self):
xlnet_model = xlnet_base.XLNetBase(
vocab_size=32000,
num_layers=12,
hidden_size=36,
num_attention_heads=12,
head_size=12,
inner_size=12,
dropout_rate=0.,
attention_dropout_rate=0.,
attention_type="bi",
bi_data=True,
initializer=tf.keras.initializers.RandomNormal(stddev=0.1),
two_stream=False,
tie_attention_biases=True,
memory_length=0,
reuse_length=0,
inner_activation="relu")
config = xlnet_model.get_config()
new_xlnet = xlnet_base.XLNetBase.from_config(config)
self.assertEqual(config, new_xlnet.get_config())
if __name__ == "__main__":
tf.random.set_seed(0)
tf.test.main()
| 14,778 | 31.696903 | 80 | py |
models | models-master/official/nlp/modeling/networks/bert_encoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for transformer-based bert encoder network."""
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.networks import bert_encoder
class BertEncoderTest(tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(BertEncoderTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy("float32")
@parameterized.named_parameters(
("encoder_v2", bert_encoder.BertEncoderV2),
("encoder_v1", bert_encoder.BertEncoder),
)
def test_dict_outputs_network_creation(self, encoder_cls):
hidden_size = 32
sequence_length = 21
# Create a small BertEncoder for testing.
if encoder_cls is bert_encoder.BertEncoderV2:
kwargs = {}
else:
kwargs = dict(dict_outputs=True)
test_network = encoder_cls(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
**kwargs)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
self.assertIsInstance(test_network.transformer_layers, list)
self.assertLen(test_network.transformer_layers, 3)
self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense)
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
@parameterized.named_parameters(
("encoder_v2", bert_encoder.BertEncoderV2),
("encoder_v1", bert_encoder.BertEncoder),
)
def test_dict_outputs_all_encoder_outputs_network_creation(self, encoder_cls):
hidden_size = 32
sequence_length = 21
# Create a small BertEncoder for testing.
test_network = encoder_cls(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
dict_outputs=True)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
all_encoder_outputs = dict_outputs["encoder_outputs"]
pooled = dict_outputs["pooled_output"]
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertLen(all_encoder_outputs, 3)
for data in all_encoder_outputs:
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
@parameterized.named_parameters(
("encoder_v2", bert_encoder.BertEncoderV2),
("encoder_v1", bert_encoder.BertEncoder),
)
def test_dict_outputs_network_creation_return_attention_scores(
self, encoder_cls):
hidden_size = 32
sequence_length = 21
num_attention_heads = 5
num_layers = 3
# Create a small BertEncoder for testing.
test_network = encoder_cls(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=num_attention_heads,
num_layers=num_layers,
return_attention_scores=True,
dict_outputs=True)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
all_attention_outputs = dict_outputs["attention_scores"]
expected_data_shape = [
None, num_attention_heads, sequence_length, sequence_length
]
self.assertLen(all_attention_outputs, num_layers)
for data in all_attention_outputs:
self.assertAllEqual(expected_data_shape, data.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, all_attention_outputs[-1].dtype)
@parameterized.named_parameters(
("encoder_v2", bert_encoder.BertEncoderV2),
("encoder_v1", bert_encoder.BertEncoder),
)
def test_dict_outputs_network_creation_with_float16_dtype(self, encoder_cls):
hidden_size = 32
sequence_length = 21
tf.keras.mixed_precision.set_global_policy("mixed_float16")
# Create a small BertEncoder for testing.
test_network = encoder_cls(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
dict_outputs=True)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# If float_dtype is set to float16, the data output is float32 (from a layer
# norm) and pool output should be float16.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float16, pooled.dtype)
@parameterized.named_parameters(
("all_sequence_encoder_v1", bert_encoder.BertEncoder, None, 21),
("output_range_encoder_v1", bert_encoder.BertEncoder, 1, 1),
("all_sequence_encoder_v2", bert_encoder.BertEncoderV2, None, 21),
("output_range_encoder_v2", bert_encoder.BertEncoderV2, 1, 1),
)
def test_dict_outputs_network_invocation(
self, encoder_cls, output_range, out_seq_len):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
# Create a small BertEncoder for testing.
test_network = encoder_cls(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
output_range=output_range,
dict_outputs=True)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
# Create a model based off of this network:
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[1], out_seq_len)
# Creates a BertEncoder with max_sequence_length != sequence_length
max_sequence_length = 128
test_network = encoder_cls(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
dict_outputs=True)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[1], sequence_length)
# Creates a BertEncoder with embedding_width != hidden_size
test_network = encoder_cls(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
embedding_width=16,
dict_outputs=True)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[-1], hidden_size)
self.assertTrue(hasattr(test_network, "_embedding_projection"))
def test_embeddings_as_inputs(self):
hidden_size = 32
sequence_length = 21
# Create a small BertEncoder for testing.
test_network = bert_encoder.BertEncoderV2(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
test_network.build(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
embeddings = test_network.get_embedding_layer()(word_ids)
# Calls with the embeddings.
dict_outputs = test_network(
dict(
input_word_embeddings=embeddings,
input_mask=mask,
input_type_ids=type_ids))
all_encoder_outputs = dict_outputs["encoder_outputs"]
pooled = dict_outputs["pooled_output"]
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertLen(all_encoder_outputs, 3)
for data in all_encoder_outputs:
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
vocab_size=100,
hidden_size=32,
num_layers=3,
num_attention_heads=2,
max_sequence_length=21,
type_vocab_size=12,
inner_dim=1223,
inner_activation="relu",
output_dropout=0.05,
attention_dropout=0.22,
initializer="glorot_uniform",
output_range=-1,
embedding_width=16,
embedding_layer=None,
norm_first=False)
network = bert_encoder.BertEncoder(**kwargs)
# Validate that the config can be forced to JSON.
_ = network.to_json()
# Tests model saving/loading.
model_path = self.get_temp_dir() + "/model"
network.save(model_path)
_ = tf.keras.models.load_model(model_path)
def test_network_creation(self):
hidden_size = 32
sequence_length = 21
# Create a small BertEncoder for testing.
test_network = bert_encoder.BertEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask, type_ids])
self.assertIsInstance(test_network.transformer_layers, list)
self.assertLen(test_network.transformer_layers, 3)
self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense)
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
test_network_dict = bert_encoder.BertEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
dict_outputs=True)
# Create the inputs (note that the first dimension is implicit).
inputs = dict(
input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)
_ = test_network_dict(inputs)
test_network_dict.set_weights(test_network.get_weights())
batch_size = 2
vocab_size = 100
num_types = 2
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
list_outputs = test_network([word_id_data, mask_data, type_id_data])
dict_outputs = test_network_dict(
dict(
input_word_ids=word_id_data,
input_mask=mask_data,
input_type_ids=type_id_data))
self.assertAllEqual(list_outputs[0], dict_outputs["sequence_output"])
self.assertAllEqual(list_outputs[1], dict_outputs["pooled_output"])
def test_all_encoder_outputs_network_creation(self):
hidden_size = 32
sequence_length = 21
# Create a small BertEncoder for testing.
test_network = bert_encoder.BertEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
return_all_encoder_outputs=True)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
all_encoder_outputs, pooled = test_network([word_ids, mask, type_ids])
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertLen(all_encoder_outputs, 3)
for data in all_encoder_outputs:
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
def test_attention_scores_output_network_creation(self):
hidden_size = 32
sequence_length = 21
num_attention_heads = 5
num_layers = 3
# Create a small BertEncoder for testing.
test_network = bert_encoder.BertEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=num_attention_heads,
num_layers=num_layers,
return_attention_scores=True)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
_, _, all_attention_outputs = test_network([word_ids, mask, type_ids])
expected_data_shape = [
None, num_attention_heads, sequence_length, sequence_length
]
self.assertLen(all_attention_outputs, num_layers)
for data in all_attention_outputs:
self.assertAllEqual(expected_data_shape, data.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, all_attention_outputs[-1].dtype)
def test_network_creation_with_float16_dtype(self):
hidden_size = 32
sequence_length = 21
tf.keras.mixed_precision.set_global_policy("mixed_float16")
# Create a small BertEncoder for testing.
test_network = bert_encoder.BertEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask, type_ids])
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# If float_dtype is set to float16, the data output is float32 (from a layer
# norm) and pool output should be float16.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float16, pooled.dtype)
@parameterized.named_parameters(
("all_sequence", None, 21),
("output_range", 1, 1),
)
def test_network_invocation(self, output_range, out_seq_len):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
# Create a small BertEncoder for testing.
test_network = bert_encoder.BertEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
output_range=output_range)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask, type_ids])
# Create a model based off of this network:
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[1], out_seq_len)
# Creates a BertEncoder with max_sequence_length != sequence_length
max_sequence_length = 128
test_network = bert_encoder.BertEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types)
data, pooled = test_network([word_ids, mask, type_ids])
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[1], sequence_length)
# Creates a BertEncoder with embedding_width != hidden_size
test_network = bert_encoder.BertEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
embedding_width=16)
data, pooled = test_network([word_ids, mask, type_ids])
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[-1], hidden_size)
self.assertTrue(hasattr(test_network, "_embedding_projection"))
class BertEncoderV2CompatibilityTest(tf.test.TestCase):
def tearDown(self):
super().tearDown()
tf.keras.mixed_precision.set_global_policy("float32")
def test_weights_forward_compatible(self):
batch_size = 3
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
kwargs = dict(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types)
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
data = dict(
input_word_ids=word_id_data,
input_mask=mask_data,
input_type_ids=type_id_data)
# Create small BertEncoders for testing.
new_net = bert_encoder.BertEncoderV2(**kwargs)
_ = new_net(data)
kwargs["dict_outputs"] = True
old_net = bert_encoder.BertEncoder(**kwargs)
_ = old_net(data)
new_net._embedding_layer.set_weights(old_net._embedding_layer.get_weights())
new_net._position_embedding_layer.set_weights(
old_net._position_embedding_layer.get_weights())
new_net._type_embedding_layer.set_weights(
old_net._type_embedding_layer.get_weights())
new_net._embedding_norm_layer.set_weights(
old_net._embedding_norm_layer.get_weights())
# embedding_dropout has no weights.
if hasattr(old_net, "_embedding_projection"):
new_net._embedding_projection.set_weights(
old_net._embedding_projection.get_weights())
# attention_mask_layer has no weights.
new_net._pooler_layer.set_weights(old_net._pooler_layer.get_weights())
for otl, ntl in zip(old_net._transformer_layers,
new_net._transformer_layers):
ntl.set_weights(otl.get_weights())
def check_output_close(data, net1, net2):
output1 = net1(data)
output2 = net2(data)
for key in output1:
self.assertAllClose(output1[key], output2[key])
check_output_close(data, old_net, new_net)
def test_checkpoint_forward_compatible(self):
batch_size = 3
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
kwargs = dict(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types)
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
data = dict(
input_word_ids=word_id_data,
input_mask=mask_data,
input_type_ids=type_id_data)
kwargs["dict_outputs"] = True
old_net = bert_encoder.BertEncoder(**kwargs)
old_net_outputs = old_net(data)
ckpt = tf.train.Checkpoint(net=old_net)
path = ckpt.save(self.get_temp_dir())
del kwargs["dict_outputs"]
new_net = bert_encoder.BertEncoderV2(**kwargs)
new_ckpt = tf.train.Checkpoint(net=new_net)
status = new_ckpt.restore(path)
status.assert_existing_objects_matched()
# assert_consumed will fail because the old model has redundant nodes.
new_net_outputs = new_net(data)
self.assertAllEqual(old_net_outputs.keys(), new_net_outputs.keys())
for key in old_net_outputs:
self.assertAllClose(old_net_outputs[key], new_net_outputs[key])
def test_keras_model_checkpoint_forward_compatible(self):
batch_size = 3
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
kwargs = dict(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
output_range=None)
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
data = dict(
input_word_ids=word_id_data,
input_mask=mask_data,
input_type_ids=type_id_data)
kwargs["dict_outputs"] = True
old_net = bert_encoder.BertEncoder(**kwargs)
inputs = old_net.inputs
outputs = old_net(inputs)
old_model = tf.keras.Model(inputs=inputs, outputs=outputs)
old_model_outputs = old_model(data)
ckpt = tf.train.Checkpoint(net=old_model)
path = ckpt.save(self.get_temp_dir())
del kwargs["dict_outputs"]
new_net = bert_encoder.BertEncoderV2(**kwargs)
inputs = new_net.inputs
outputs = new_net(inputs)
new_model = tf.keras.Model(inputs=inputs, outputs=outputs)
new_ckpt = tf.train.Checkpoint(net=new_model)
status = new_ckpt.restore(path)
status.assert_existing_objects_matched()
new_model_outputs = new_model(data)
self.assertAllEqual(old_model_outputs.keys(), new_model_outputs.keys())
for key in old_model_outputs:
self.assertAllClose(old_model_outputs[key], new_model_outputs[key])
if __name__ == "__main__":
tf.test.main()
| 26,920 | 38.358187 | 80 | py |
models | models-master/official/nlp/modeling/networks/funnel_transformer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Funnel Transformer network."""
# pylint: disable=g-classes-have-attributes
import math
from typing import Any, Callable, Optional, Sequence, Union
from absl import logging
import numpy as np
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling import layers
_Initializer = Union[str, tf.keras.initializers.Initializer]
_Activation = Union[str, Callable[..., Any]]
_MAX = 'max'
_AVG = 'avg'
_TRUNCATED_AVG = 'truncated_avg'
_transformer_cls2str = {
layers.TransformerEncoderBlock: 'TransformerEncoderBlock',
layers.ReZeroTransformer: 'ReZeroTransformer'
}
_str2transformer_cls = {
'TransformerEncoderBlock': layers.TransformerEncoderBlock,
'ReZeroTransformer': layers.ReZeroTransformer
}
_approx_gelu = lambda x: tf.keras.activations.gelu(x, approximate=True)
def _get_policy_dtype():
try:
return tf.keras.mixed_precision.global_policy().compute_dtype or tf.float32
except AttributeError: # tf1 has no attribute 'global_policy'
return tf.float32
def _pool_and_concat(mask, unpool_length: int, strides: Union[Sequence[int],
int],
axes: Union[Sequence[int], int]):
"""Pools the mask along a given axis with stride.
It also skips first unpool_length elements.
Args:
mask: Tensor to be pooled.
unpool_length: Leading elements to be skipped.
strides: Strides for the given axes.
axes: Axes to pool the Tensor.
Returns:
Pooled and concatenated Tensor.
"""
# Wraps the axes as a list.
if isinstance(axes, int):
axes = [axes]
if isinstance(strides, int):
strides = [strides] * len(axes)
else:
if len(strides) != len(axes):
raise ValueError('The lengths of strides and axes need to match.')
# Bypass no pooling cases.
if np.all(np.array(strides) == 1):
return mask
for axis, stride in zip(axes, strides):
# Skips first `unpool_length` tokens.
unpool_tensor_shape = [slice(None)] * axis + [slice(None, unpool_length)]
unpool_tensor = mask[unpool_tensor_shape]
# Pools the second half.
pool_tensor_shape = [slice(None)] * axis + [
slice(unpool_length, None, stride)
]
pool_tensor = mask[pool_tensor_shape]
mask = tf.concat((unpool_tensor, pool_tensor), axis=axis)
return mask
def _create_fractional_pool_transform(sl: int, pool_factor: float):
"""Create pooling transform for fractional pooling factor."""
assert pool_factor > 1.0, '`pool_factor` should be > 1.0.'
psl = int(sl / pool_factor)
gcd_ = math.gcd(sl, psl)
# It is expected chunk_sl and chunk_psl are small integers.
# The transform is built by tiling a [chunk_sl, chunk_psl] submatrix
# gcd_ times. The submatrix sums to chunk_psl.
chunk_sl = sl // gcd_
chunk_psl = psl // gcd_
num_one_entries = chunk_psl - 1
num_frac_entries = chunk_sl - (chunk_psl - 1)
# The transform is of shape [sl, psl].
transform = np.zeros((sl, psl))
for i in range(sl // chunk_sl):
row_start = chunk_sl * i
col_start = chunk_psl * i
for idx in range(num_one_entries):
transform[row_start + idx][col_start + idx] = 1.0
for idx in range(num_frac_entries):
transform[row_start + num_one_entries + idx][
col_start + num_one_entries
] = (1.0 / num_frac_entries)
return tf.constant(transform, dtype=_get_policy_dtype())
def _create_truncated_avg_transforms(
seq_length: int, pool_strides: Sequence[int]
):
"""Computes pooling transforms.
The pooling_transform is of shape [seq_length,
seq_length//pool_stride] and
pooling_transform[i,j] = 1.0/pool_stride if i//pool_stride == j
0.0 otherwise.
It's in essense average pooling but truncate the final window if it
seq_length % pool_stride != 0.
For seq_length==6 and pool_stride==2, it is
[[ 0.5, 0.0, 0.0 ],
[ 0.5, 0.0, 0.0 ],
[ 0.0, 0.5, 0.0 ],
[ 0.0, 0.5, 0.0 ],
[ 0.0, 0.0, 0.5 ],
[ 0.0, 0.0, 0.5 ]]
Args:
seq_length: int, sequence length.
pool_strides: Sequence of pooling strides for each layer.
Returns:
pooling_transforms: Sequence of pooling transforms (Tensors) for each layer.
"""
pooling_transforms = []
for pool_stride in pool_strides:
if pool_stride == 1:
pooling_transforms.append(None)
else:
pooled_seq_length = int(seq_length / pool_stride)
if (1.0 * pool_stride).is_integer():
pfac, sl, psl = pool_stride, seq_length, pooled_seq_length
transform = [
[1.0 if (i // pfac) == j else 0.0 for j in range(psl)]
for i in range(sl)
]
transform = (
tf.constant(transform, dtype=_get_policy_dtype()) / pool_stride
)
else:
transform = _create_fractional_pool_transform(seq_length, pool_stride)
pooling_transforms.append(transform)
seq_length = pooled_seq_length
return pooling_transforms
def _create_truncated_avg_masks(input_mask: tf.Tensor,
pool_strides: Sequence[int],
transforms: Sequence[tf.Tensor]):
"""Computes attention masks.
For [1,1,1,0,0]
Args:
input_mask: Tensor of shape [batch_size, seq_length].
pool_strides: Sequence of pooling strides for each layer.
transforms: Sequence of off-diagonal matrices filling with 0.0 and
1/pool_stride.
Returns:
attention_masks: Sequence of attention masks for each layer.
"""
def create_2d_mask(from_length, mask):
return tf.einsum('F,BT->BFT', tf.ones([from_length], dtype=mask.dtype),
mask)
attention_masks = []
seq_length = tf.shape(input_mask)[-1]
layer_mask = tf.cast(input_mask, dtype=_get_policy_dtype())
for pool_stride, transform in zip(pool_strides, transforms):
if pool_stride == 1:
attention_masks.append(create_2d_mask(seq_length, layer_mask))
else:
pooled_seq_length = tf.cast(
tf.cast(seq_length, tf.float32) / tf.cast(pool_stride, tf.float32),
tf.int32,
)
attention_masks.append(create_2d_mask(pooled_seq_length, layer_mask))
layer_mask = tf.cast(
tf.einsum('BF,FT->BT', layer_mask, transform) > 0.0,
dtype=layer_mask.dtype,
)
seq_length = pooled_seq_length
del seq_length
return attention_masks
@tf.keras.utils.register_keras_serializable(package='Text')
class FunnelTransformerEncoder(tf.keras.layers.Layer):
"""Funnel Transformer-based encoder network.
Funnel Transformer Implementation of https://arxiv.org/abs/2006.03236.
This implementation utilizes the base framework with Bert
(https://arxiv.org/abs/1810.04805).
Its output is compatible with `BertEncoder`.
Args:
vocab_size: The size of the token vocabulary.
hidden_size: The size of the transformer hidden layers.
num_layers: The number of transformer layers.
num_attention_heads: The number of attention heads for each transformer. The
hidden size must be divisible by the number of attention heads.
max_sequence_length: The maximum sequence length that this encoder can
consume. If None, max_sequence_length uses the value from sequence length.
This determines the variable shape for positional embeddings.
type_vocab_size: The number of types that the 'type_ids' input can take.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network for each transformer.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network for each transformer.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: The dropout rate to use for the attention layers within
the transformer layers.
pool_type: Pooling type. Choose from ['max', 'avg', 'truncated_avg'].
pool_stride: An int or a list of ints. Pooling stride(s) to compress the
sequence length. If set to int, each layer will have the same stride size.
If set to list, the number of elements needs to match num_layers.
unpool_length: Leading n tokens to be skipped from pooling.
initializer: The initialzer to use for all weights in this encoder.
output_range: The sequence output range, [0, output_range), by slicing the
target sequence of the last transformer layer. `None` means the entire
target sequence will attend to the source sequence, which yields the full
output.
embedding_width: The width of the word embeddings. If the embedding width is
not equal to hidden size, embedding parameters will be factorized into two
matrices in the shape of ['vocab_size', 'embedding_width'] and
['embedding_width', 'hidden_size'] ('embedding_width' is usually much
smaller than 'hidden_size').
embedding_layer: An optional Layer instance which will be called to generate
embeddings for the input word IDs.
norm_first: Whether to normalize inputs to attention and intermediate dense
layers. If set False, output of attention and intermediate dense layers is
normalized. This does not apply to ReZero.
transformer_cls: str or a keras Layer. This is the base TransformerBlock the
funnel encoder relies on.
share_rezero: bool. Whether to share ReZero alpha between the attention
layer and the ffn layer. This option is specific to ReZero.
with_dense_inputs: Whether to accept dense embeddings as the input.
"""
def __init__(
self,
vocab_size: int,
hidden_size: int = 768,
num_layers: int = 12,
num_attention_heads: int = 12,
max_sequence_length: int = 512,
type_vocab_size: int = 16,
inner_dim: int = 3072,
inner_activation: _Activation = _approx_gelu,
output_dropout: float = 0.1,
attention_dropout: float = 0.1,
pool_type: str = _MAX,
pool_stride: Union[int, Sequence[Union[int, float]]] = 2,
unpool_length: int = 0,
initializer: _Initializer = tf.keras.initializers.TruncatedNormal(
stddev=0.02
),
output_range: Optional[int] = None,
embedding_width: Optional[int] = None,
embedding_layer: Optional[tf.keras.layers.Layer] = None,
norm_first: bool = False,
transformer_cls: Union[
str, tf.keras.layers.Layer
] = layers.TransformerEncoderBlock,
share_rezero: bool = False,
append_dense_inputs: bool = False,
**kwargs
):
super().__init__(**kwargs)
if output_range is not None:
logging.warning('`output_range` is available as an argument for `call()`.'
'The `output_range` as __init__ argument is deprecated.')
activation = tf.keras.activations.get(inner_activation)
initializer = tf.keras.initializers.get(initializer)
if embedding_width is None:
embedding_width = hidden_size
if embedding_layer is None:
self._embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=tf_utils.clone_initializer(initializer),
name='word_embeddings')
else:
self._embedding_layer = embedding_layer
self._position_embedding_layer = layers.PositionEmbedding(
initializer=tf_utils.clone_initializer(initializer),
max_length=max_sequence_length,
name='position_embedding')
self._type_embedding_layer = layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=tf_utils.clone_initializer(initializer),
use_one_hot=True,
name='type_embeddings')
self._embedding_norm_layer = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)
self._embedding_dropout = tf.keras.layers.Dropout(
rate=output_dropout, name='embedding_dropout')
# We project the 'embedding' output to 'hidden_size' if it is not already
# 'hidden_size'.
self._embedding_projection = None
if embedding_width != hidden_size:
self._embedding_projection = tf.keras.layers.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
kernel_initializer=tf_utils.clone_initializer(initializer),
name='embedding_projection')
self._transformer_layers = []
self._attention_mask_layer = layers.SelfAttentionMask(
name='self_attention_mask')
# Will raise an error if the string is not supported.
if isinstance(transformer_cls, str):
transformer_cls = _str2transformer_cls[transformer_cls]
self._num_layers = num_layers
for i in range(num_layers):
layer = transformer_cls(
num_attention_heads=num_attention_heads,
intermediate_size=inner_dim,
inner_dim=inner_dim,
intermediate_activation=inner_activation,
inner_activation=inner_activation,
output_dropout=output_dropout,
attention_dropout=attention_dropout,
norm_first=norm_first,
kernel_initializer=tf_utils.clone_initializer(initializer),
share_rezero=share_rezero,
name='transformer/layer_%d' % i)
self._transformer_layers.append(layer)
self._pooler_layer = tf.keras.layers.Dense(
units=hidden_size,
activation='tanh',
kernel_initializer=tf_utils.clone_initializer(initializer),
name='pooler_transform')
if isinstance(pool_stride, int):
# TODO(b/197133196): Pooling layer can be shared.
pool_strides = [pool_stride] * num_layers
else:
if len(pool_stride) != num_layers:
raise ValueError('Lengths of pool_stride and num_layers are not equal.')
pool_strides = pool_stride
is_fractional_pooling = False in [
(1.0 * pool_stride).is_integer() for pool_stride in pool_strides
]
if is_fractional_pooling and pool_type in [_MAX, _AVG]:
raise ValueError(
'Fractional pooling is only supported for'
' `pool_type`=`truncated_average`'
)
# TODO(crickwu): explore tf.keras.layers.serialize method.
if pool_type == _MAX:
pool_cls = tf.keras.layers.MaxPooling1D
elif pool_type == _AVG:
pool_cls = tf.keras.layers.AveragePooling1D
elif pool_type == _TRUNCATED_AVG:
# TODO(b/203665205): unpool_length should be implemented.
if unpool_length != 0:
raise ValueError('unpool_length is not supported by truncated_avg now.')
else:
raise ValueError('pool_type not supported.')
if pool_type in (_MAX, _AVG):
self._att_input_pool_layers = []
for layer_pool_stride in pool_strides:
att_input_pool_layer = pool_cls(
pool_size=layer_pool_stride,
strides=layer_pool_stride,
padding='same',
name='att_input_pool_layer')
self._att_input_pool_layers.append(att_input_pool_layer)
self._max_sequence_length = max_sequence_length
self._pool_strides = pool_strides # This is a list here.
self._unpool_length = unpool_length
self._pool_type = pool_type
self._append_dense_inputs = append_dense_inputs
self._config = {
'vocab_size': vocab_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'num_attention_heads': num_attention_heads,
'max_sequence_length': max_sequence_length,
'type_vocab_size': type_vocab_size,
'inner_dim': inner_dim,
'inner_activation': tf.keras.activations.serialize(activation),
'output_dropout': output_dropout,
'attention_dropout': attention_dropout,
'initializer': tf.keras.initializers.serialize(initializer),
'output_range': output_range,
'embedding_width': embedding_width,
'embedding_layer': embedding_layer,
'norm_first': norm_first,
'pool_type': pool_type,
'pool_stride': pool_stride,
'unpool_length': unpool_length,
'transformer_cls': _transformer_cls2str.get(
transformer_cls, str(transformer_cls)
),
}
self.inputs = dict(
input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32))
def call(self, inputs, output_range: Optional[tf.Tensor] = None):
# inputs are [word_ids, mask, type_ids]
word_embeddings = None
if isinstance(inputs, (list, tuple)):
logging.warning('List inputs to %s are discouraged.', self.__class__)
if len(inputs) == 3:
word_ids, mask, type_ids = inputs
dense_inputs = None
dense_mask = None
dense_type_ids = None
elif len(inputs) == 6:
word_ids, mask, type_ids, dense_inputs, dense_mask, dense_type_ids = (
inputs
)
else:
raise ValueError(
'Unexpected inputs to %s with length at %d.'
% (self.__class__, len(inputs))
)
elif isinstance(inputs, dict):
word_ids = inputs.get('input_word_ids')
mask = inputs.get('input_mask')
type_ids = inputs.get('input_type_ids')
word_embeddings = inputs.get('input_word_embeddings', None)
dense_inputs = inputs.get('dense_inputs', None)
dense_mask = inputs.get('dense_mask', None)
dense_type_ids = inputs.get('dense_type_ids', None)
else:
raise ValueError('Unexpected inputs type to %s.' % self.__class__)
if word_embeddings is None:
word_embeddings = self._embedding_layer(word_ids)
if dense_inputs is not None:
# Allow concatenation of the dense embeddings at sequence end if requested
# and `unpool_length`` is set as zero
if self._append_dense_inputs:
if self._unpool_length != 0:
raise ValueError(
'unpool_length is not supported by append_dense_inputs now.'
)
word_embeddings = tf.concat([word_embeddings, dense_inputs], axis=1)
type_ids = tf.concat([type_ids, dense_type_ids], axis=1)
mask = tf.concat([mask, dense_mask], axis=1)
else:
# Concat the dense embeddings at sequence begin so unpool_len can
# control embedding not being pooled.
word_embeddings = tf.concat([dense_inputs, word_embeddings], axis=1)
type_ids = tf.concat([dense_type_ids, type_ids], axis=1)
mask = tf.concat([dense_mask, mask], axis=1)
# absolute position embeddings
position_embeddings = self._position_embedding_layer(word_embeddings)
type_embeddings = self._type_embedding_layer(type_ids)
embeddings = tf.keras.layers.add(
[word_embeddings, position_embeddings, type_embeddings])
embeddings = self._embedding_norm_layer(embeddings)
embeddings = self._embedding_dropout(embeddings)
if self._embedding_projection is not None:
embeddings = self._embedding_projection(embeddings)
attention_mask = self._attention_mask_layer(embeddings, mask)
encoder_outputs = []
x = embeddings
# TODO(b/195972228): attention_mask can be co-generated with pooling.
if self._pool_type in (_MAX, _AVG):
attention_mask = _pool_and_concat(
attention_mask,
unpool_length=self._unpool_length,
strides=self._pool_strides[0],
axes=[1])
for i, layer in enumerate(self._transformer_layers):
# Bypass no pooling cases.
if self._pool_strides[i] == 1:
x = layer([x, x, attention_mask])
else:
# Pools layer for compressing the query length.
pooled_inputs = self._att_input_pool_layers[i](
x[:, self._unpool_length:, :])
query_inputs = tf.concat(
values=(tf.cast(
x[:, :self._unpool_length, :],
dtype=pooled_inputs.dtype), pooled_inputs),
axis=1)
x = layer([query_inputs, x, attention_mask],
output_range=output_range if i == self._num_layers -
1 else None)
# Pools the corresponding attention_mask.
if i < len(self._transformer_layers) - 1:
attention_mask = _pool_and_concat(
attention_mask,
unpool_length=self._unpool_length,
strides=[self._pool_strides[i + 1], self._pool_strides[i]],
axes=[1, 2])
encoder_outputs.append(x)
elif self._pool_type == _TRUNCATED_AVG:
# Compute the attention masks and pooling transforms.
# Note we do not compute this in __init__ due to inference converter issue
# b/215659399.
pooling_transforms = _create_truncated_avg_transforms(
self._max_sequence_length, self._pool_strides)
attention_masks = _create_truncated_avg_masks(mask, self._pool_strides,
pooling_transforms)
for i, layer in enumerate(self._transformer_layers):
attention_mask = attention_masks[i]
transformer_output_range = None
if i == self._num_layers - 1:
transformer_output_range = output_range
# Bypass no pooling cases.
if self._pool_strides[i] == 1:
x = layer([x, x, attention_mask],
output_range=transformer_output_range)
else:
pooled_inputs = tf.einsum(
'BFD,FT->BTD',
tf.cast(x[:, self._unpool_length:, :], _get_policy_dtype()
), # extra casting for faster mixed computation.
pooling_transforms[i])
query_inputs = tf.concat(
values=(tf.cast(
x[:, :self._unpool_length, :],
dtype=pooled_inputs.dtype), pooled_inputs),
axis=1)
x = layer([query_inputs, x, attention_mask],
output_range=transformer_output_range)
encoder_outputs.append(x)
last_encoder_output = encoder_outputs[-1]
first_token_tensor = last_encoder_output[:, 0, :]
pooled_output = self._pooler_layer(first_token_tensor)
return dict(
word_embeddings=word_embeddings,
embedding_output=embeddings,
sequence_output=encoder_outputs[-1],
pooled_output=pooled_output,
encoder_outputs=encoder_outputs)
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_embedding_layer(self):
return self._embedding_layer
def get_config(self):
return dict(self._config)
@property
def transformer_layers(self):
"""List of Transformer layers in the encoder."""
return self._transformer_layers
@property
def pooler_layer(self):
"""The pooler dense layer after the transformer layers."""
return self._pooler_layer
@classmethod
def from_config(cls, config, custom_objects=None):
if 'embedding_layer' in config and config['embedding_layer'] is not None:
warn_string = (
'You are reloading a model that was saved with a '
'potentially-shared embedding layer object. If you contine to '
'train this model, the embedding layer will no longer be shared. '
'To work around this, load the model outside of the Keras API.')
print('WARNING: ' + warn_string)
logging.warn(warn_string)
return cls(**config)
| 23,970 | 37.170382 | 80 | py |
models | models-master/official/nlp/modeling/networks/albert_encoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ALBERT transformer-based text encoder network."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.networks import albert_encoder
class AlbertEncoderTest(tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(AlbertEncoderTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy("float32")
@parameterized.named_parameters(
dict(testcase_name="default", expected_dtype=tf.float32),
dict(testcase_name="with_float16_dtype", expected_dtype=tf.float16),
)
def test_network_creation(self, expected_dtype):
hidden_size = 32
sequence_length = 21
kwargs = dict(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3)
if expected_dtype == tf.float16:
tf.keras.mixed_precision.set_global_policy("mixed_float16")
# Create a small TransformerEncoder for testing.
test_network = albert_encoder.AlbertEncoder(**kwargs)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask, type_ids])
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# If float_dtype is set to float16, the data output is float32 (from a layer
# norm) and pool output should be float16.
self.assertEqual(tf.float32, data.dtype)
self.assertEqual(expected_dtype, pooled.dtype)
# ALBERT has additonal 'embedding_hidden_mapping_in' weights and
# it shares transformer weights.
self.assertNotEmpty(
[x for x in test_network.weights if "embedding_projection/" in x.name])
self.assertNotEmpty(
[x for x in test_network.weights if "transformer/" in x.name])
self.assertEmpty(
[x for x in test_network.weights if "transformer/layer" in x.name])
def test_network_invocation(self):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
num_layers = 3
# Create a small TransformerEncoder for testing.
test_network = albert_encoder.AlbertEncoder(
vocab_size=vocab_size,
embedding_width=8,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=num_layers,
type_vocab_size=num_types)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask, type_ids])
# Create a model based off of this network:
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
list_outputs = model.predict([word_id_data, mask_data, type_id_data])
# Creates a TransformerEncoder with max_sequence_length != sequence_length
max_sequence_length = 128
test_network = albert_encoder.AlbertEncoder(
vocab_size=vocab_size,
embedding_width=8,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=num_layers,
type_vocab_size=num_types)
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
_ = model.predict([word_id_data, mask_data, type_id_data])
# Tests dictionary outputs.
test_network_dict = albert_encoder.AlbertEncoder(
vocab_size=vocab_size,
embedding_width=8,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=num_layers,
type_vocab_size=num_types,
dict_outputs=True)
_ = test_network_dict([word_ids, mask, type_ids])
test_network_dict.set_weights(test_network.get_weights())
list_outputs = test_network([word_id_data, mask_data, type_id_data])
dict_outputs = test_network_dict(
dict(
input_word_ids=word_id_data,
input_mask=mask_data,
input_type_ids=type_id_data))
self.assertAllEqual(list_outputs[0], dict_outputs["sequence_output"])
self.assertAllEqual(list_outputs[1], dict_outputs["pooled_output"])
self.assertLen(dict_outputs["pooled_output"], num_layers)
def test_serialize_deserialize(self):
tf.keras.mixed_precision.set_global_policy("mixed_float16")
# Create a network object that sets all of its config options.
kwargs = dict(
vocab_size=100,
embedding_width=8,
hidden_size=32,
num_layers=3,
num_attention_heads=2,
max_sequence_length=21,
type_vocab_size=12,
intermediate_size=1223,
activation="relu",
dropout_rate=0.05,
attention_dropout_rate=0.22,
initializer="glorot_uniform")
network = albert_encoder.AlbertEncoder(**kwargs)
expected_config = dict(kwargs)
expected_config["activation"] = tf.keras.activations.serialize(
tf.keras.activations.get(expected_config["activation"]))
expected_config["initializer"] = tf.keras.initializers.serialize(
tf.keras.initializers.get(expected_config["initializer"]))
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = (
albert_encoder.AlbertEncoder.from_config(
network.get_config()))
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == "__main__":
tf.test.main()
| 7,107 | 38.270718 | 80 | py |
models | models-master/official/nlp/modeling/networks/span_labeling.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Span labeling network."""
# pylint: disable=g-classes-have-attributes
import collections
import tensorflow as tf
from official.modeling import tf_utils
def _apply_paragraph_mask(logits, paragraph_mask):
"""Applies a position mask to calculated logits."""
masked_logits = logits * (paragraph_mask) - 1e30 * (1 - paragraph_mask)
return tf.nn.log_softmax(masked_logits, -1), masked_logits
@tf.keras.utils.register_keras_serializable(package='Text')
class SpanLabeling(tf.keras.Model):
"""Span labeling network head for BERT modeling.
This network implements a simple single-span labeler based on a dense layer.
*Note* that the network is constructed by
[Keras Functional API](https://keras.io/guides/functional_api/).
Args:
input_width: The innermost dimension of the input tensor to this network.
activation: The activation, if any, for the dense layer in this network.
initializer: The initializer for the dense layer in this network. Defaults
to a Glorot uniform initializer.
output: The output style for this network. Can be either `logits` or
`predictions`.
"""
def __init__(self,
input_width,
activation=None,
initializer='glorot_uniform',
output='logits',
**kwargs):
sequence_data = tf.keras.layers.Input(
shape=(None, input_width), name='sequence_data', dtype=tf.float32)
intermediate_logits = tf.keras.layers.Dense(
2, # This layer predicts start location and end location.
activation=activation,
kernel_initializer=initializer,
name='predictions/transform/logits')(
sequence_data)
start_logits, end_logits = self._split_output_tensor(intermediate_logits)
start_predictions = tf.keras.layers.Activation(tf.nn.log_softmax)(
start_logits)
end_predictions = tf.keras.layers.Activation(tf.nn.log_softmax)(end_logits)
if output == 'logits':
output_tensors = [start_logits, end_logits]
elif output == 'predictions':
output_tensors = [start_predictions, end_predictions]
else:
raise ValueError(
('Unknown `output` value "%s". `output` can be either "logits" or '
'"predictions"') % output)
# b/164516224
# Once we've created the network using the Functional API, we call
# super().__init__ as though we were invoking the Functional API Model
# constructor, resulting in this object having all the properties of a model
# created using the Functional API. Once super().__init__ is called, we
# can assign attributes to `self` - note that all `self` assignments are
# below this line.
super().__init__(
inputs=[sequence_data], outputs=output_tensors, **kwargs)
config_dict = {
'input_width': input_width,
'activation': activation,
'initializer': initializer,
'output': output,
}
# We are storing the config dict as a namedtuple here to ensure checkpoint
# compatibility with an earlier version of this model which did not track
# the config dict attribute. TF does not track immutable attrs which
# do not contain Trackables, so by creating a config namedtuple instead of
# a dict we avoid tracking it.
config_cls = collections.namedtuple('Config', config_dict.keys())
self._config = config_cls(**config_dict)
self.start_logits = start_logits
self.end_logits = end_logits
def _split_output_tensor(self, tensor):
transposed_tensor = tf.transpose(tensor, [2, 0, 1])
return tf.unstack(transposed_tensor)
def get_config(self):
return dict(self._config._asdict())
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
class XLNetSpanLabeling(tf.keras.layers.Layer):
"""Span labeling network head for XLNet on SQuAD2.0.
This networks implements a span-labeler based on dense layers and question
possibility classification. This is the complex version seen in the original
XLNet implementation.
This applies a dense layer to the input sequence data to predict the start
positions, and then uses either the true start positions (if training) or
beam search to predict the end positions.
**Note: `compute_with_beam_search` will not work with the Functional API
(https://www.tensorflow.org/guide/keras/functional).
Args:
input_width: The innermost dimension of the input tensor to this network.
start_n_top: Beam size for span start.
end_n_top: Beam size for span end.
activation: The activation, if any, for the dense layer in this network.
dropout_rate: The dropout rate used for answer classification.
initializer: The initializer for the dense layer in this network. Defaults
to a Glorot uniform initializer.
"""
def __init__(self,
input_width,
start_n_top=5,
end_n_top=5,
activation='tanh',
dropout_rate=0.,
initializer='glorot_uniform',
**kwargs):
super().__init__(**kwargs)
self._config = {
'input_width': input_width,
'activation': activation,
'initializer': initializer,
'start_n_top': start_n_top,
'end_n_top': end_n_top,
'dropout_rate': dropout_rate,
}
if start_n_top <= 1:
raise ValueError('`start_n_top` must be greater than 1.')
self._start_n_top = start_n_top
self._end_n_top = end_n_top
self.start_logits_dense = tf.keras.layers.Dense(
units=1,
kernel_initializer=tf_utils.clone_initializer(initializer),
name='predictions/transform/start_logits')
self.end_logits_inner_dense = tf.keras.layers.Dense(
units=input_width,
kernel_initializer=tf_utils.clone_initializer(initializer),
activation=activation,
name='predictions/transform/end_logits/inner')
self.end_logits_layer_norm = tf.keras.layers.LayerNormalization(
axis=-1, epsilon=1e-12,
name='predictions/transform/end_logits/layernorm')
self.end_logits_output_dense = tf.keras.layers.Dense(
units=1,
kernel_initializer=tf_utils.clone_initializer(initializer),
name='predictions/transform/end_logits/output')
self.answer_logits_inner = tf.keras.layers.Dense(
units=input_width,
kernel_initializer=tf_utils.clone_initializer(initializer),
activation=activation,
name='predictions/transform/answer_logits/inner')
self.answer_logits_dropout = tf.keras.layers.Dropout(rate=dropout_rate)
self.answer_logits_output = tf.keras.layers.Dense(
units=1,
kernel_initializer=tf_utils.clone_initializer(initializer),
use_bias=False,
name='predictions/transform/answer_logits/output')
def end_logits(self, inputs):
"""Computes the end logits.
Input shapes into the inner, layer norm, output layers should match.
During training, inputs shape should be
[batch_size, seq_length, input_width].
During inference, input shapes should be
[batch_size, seq_length, start_n_top, input_width].
Args:
inputs: The input for end logits.
Returns:
Calculated end logits.
"""
if len(tf.shape(inputs)) == 3:
# inputs: [B, S, H] -> [B, S, 1, H]
inputs = tf.expand_dims(inputs, axis=2)
end_logits = self.end_logits_inner_dense(inputs)
end_logits = self.end_logits_layer_norm(end_logits)
end_logits = self.end_logits_output_dense(end_logits)
end_logits = tf.squeeze(end_logits)
return end_logits
def call(self,
sequence_data,
class_index,
paragraph_mask=None,
start_positions=None,
training=False):
"""Implements call().
Einsum glossary:
- b: the batch size.
- l: the sequence length.
- h: the hidden size, or input width.
- k: the start/end top n.
Args:
sequence_data: The input sequence data of shape
`(batch_size, seq_length, input_width)`.
class_index: The class indices of the inputs of shape `(batch_size,)`.
paragraph_mask: Invalid position mask such as query and special symbols
(e.g. PAD, SEP, CLS) of shape `(batch_size,)`.
start_positions: The start positions of each example of shape
`(batch_size,)`.
training: Whether or not this is the training phase.
Returns:
A dictionary with the keys `start_predictions`, `end_predictions`,
`start_logits`, `end_logits`.
If inference, then `start_top_predictions`, `start_top_index`,
`end_top_predictions`, `end_top_index` are also included.
"""
paragraph_mask = tf.cast(paragraph_mask, dtype=sequence_data.dtype)
class_index = tf.reshape(class_index, [-1])
seq_length = tf.shape(sequence_data)[1]
start_logits = self.start_logits_dense(sequence_data)
start_logits = tf.squeeze(start_logits, -1)
start_predictions, masked_start_logits = _apply_paragraph_mask(
start_logits, paragraph_mask)
compute_with_beam_search = not training or start_positions is None
if compute_with_beam_search:
# Compute end logits using beam search.
start_top_predictions, start_top_index = tf.nn.top_k(
start_predictions, k=self._start_n_top)
start_index = tf.one_hot(
start_top_index, depth=seq_length, axis=-1, dtype=tf.float32)
# start_index: [batch_size, end_n_top, seq_length]
start_features = tf.einsum('blh,bkl->bkh', sequence_data, start_index)
start_features = tf.tile(start_features[:, None, :, :],
[1, seq_length, 1, 1])
# start_features: [batch_size, seq_length, end_n_top, input_width]
end_input = tf.tile(sequence_data[:, :, None],
[1, 1, self._start_n_top, 1])
end_input = tf.concat([end_input, start_features], axis=-1)
# end_input: [batch_size, seq_length, end_n_top, 2*input_width]
paragraph_mask = paragraph_mask[:, None, :]
end_logits = self.end_logits(end_input)
# Note: this will fail if start_n_top is not >= 1.
end_logits = tf.transpose(end_logits, [0, 2, 1])
else:
start_positions = tf.reshape(start_positions, [-1])
start_index = tf.one_hot(
start_positions, depth=seq_length, axis=-1, dtype=tf.float32)
# start_index: [batch_size, seq_length]
start_features = tf.einsum('blh,bl->bh', sequence_data, start_index)
start_features = tf.tile(start_features[:, None, :], [1, seq_length, 1])
# start_features: [batch_size, seq_length, input_width]
end_input = tf.concat([sequence_data, start_features],
axis=-1)
# end_input: [batch_size, seq_length, 2*input_width]
end_logits = self.end_logits(end_input)
end_predictions, masked_end_logits = _apply_paragraph_mask(
end_logits, paragraph_mask)
output_dict = dict(
start_predictions=start_predictions,
end_predictions=end_predictions,
start_logits=masked_start_logits,
end_logits=masked_end_logits)
if not training:
end_top_predictions, end_top_index = tf.nn.top_k(
end_predictions, k=self._end_n_top)
end_top_predictions = tf.reshape(
end_top_predictions,
[-1, self._start_n_top * self._end_n_top])
end_top_index = tf.reshape(
end_top_index,
[-1, self._start_n_top * self._end_n_top])
output_dict['start_top_predictions'] = start_top_predictions
output_dict['start_top_index'] = start_top_index
output_dict['end_top_predictions'] = end_top_predictions
output_dict['end_top_index'] = end_top_index
# get the representation of CLS
class_index = tf.one_hot(class_index, seq_length, axis=-1, dtype=tf.float32)
class_feature = tf.einsum('blh,bl->bh', sequence_data, class_index)
# get the representation of START
start_p = tf.nn.softmax(masked_start_logits, axis=-1)
start_feature = tf.einsum('blh,bl->bh', sequence_data, start_p)
answer_feature = tf.concat([start_feature, class_feature], -1)
answer_feature = self.answer_logits_inner(answer_feature)
answer_feature = self.answer_logits_dropout(answer_feature)
class_logits = self.answer_logits_output(answer_feature)
class_logits = tf.squeeze(class_logits, -1)
output_dict['class_logits'] = class_logits
return output_dict
def get_config(self):
return self._config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 13,195 | 37.697947 | 80 | py |
models | models-master/official/nlp/modeling/networks/encoder_scaffold_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for EncoderScaffold network."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.modeling import activations
from official.nlp.modeling import layers
from official.nlp.modeling.networks import encoder_scaffold
# Test class that wraps a standard transformer layer. If this layer is called
# at any point, the list passed to the config object will be filled with a
# boolean 'True'. We register this class as a Keras serializable so we can
# test serialization below.
@tf.keras.utils.register_keras_serializable(package="TestOnly")
class ValidatedTransformerLayer(layers.Transformer):
def __init__(self, call_list, call_class=None, **kwargs):
super(ValidatedTransformerLayer, self).__init__(**kwargs)
self.list = call_list
self.call_class = call_class
def call(self, inputs):
self.list.append(True)
return super(ValidatedTransformerLayer, self).call(inputs)
def get_config(self):
config = super(ValidatedTransformerLayer, self).get_config()
config["call_list"] = self.list
config["call_class"] = tf.keras.utils.get_registered_name(self.call_class)
return config
# Test class that wraps a standard self attention mask layer.
# If this layer is called at any point, the list passed to the config
# object will be filled with a
# boolean 'True'. We register this class as a Keras serializable so we can
# test serialization below.
@tf.keras.utils.register_keras_serializable(package="TestOnly")
class ValidatedMaskLayer(layers.SelfAttentionMask):
def __init__(self, call_list, call_class=None, **kwargs):
super(ValidatedMaskLayer, self).__init__(**kwargs)
self.list = call_list
self.call_class = call_class
def call(self, inputs, mask):
self.list.append(True)
return super(ValidatedMaskLayer, self).call(inputs, mask)
def get_config(self):
config = super(ValidatedMaskLayer, self).get_config()
config["call_list"] = self.list
config["call_class"] = tf.keras.utils.get_registered_name(self.call_class)
return config
@tf.keras.utils.register_keras_serializable(package="TestLayerOnly")
class TestLayer(tf.keras.layers.Layer):
pass
class EncoderScaffoldLayerClassTest(tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(EncoderScaffoldLayerClassTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy("float32")
@parameterized.named_parameters(
dict(testcase_name="only_final_output", return_all_layer_outputs=False),
dict(testcase_name="all_layer_outputs", return_all_layer_outputs=True))
def test_network_creation(self, return_all_layer_outputs):
hidden_size = 32
sequence_length = 21
num_hidden_instances = 3
embedding_cfg = {
"vocab_size": 100,
"type_vocab_size": 16,
"hidden_size": hidden_size,
"seq_length": sequence_length,
"max_seq_length": sequence_length,
"initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02),
"dropout_rate": 0.1,
}
call_list = []
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
tf.keras.initializers.TruncatedNormal(stddev=0.02),
"call_list":
call_list
}
mask_call_list = []
mask_cfg = {
"call_list":
mask_call_list
}
# Create a small EncoderScaffold for testing.
test_network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=num_hidden_instances,
pooled_output_dim=hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cls=ValidatedTransformerLayer,
hidden_cfg=hidden_cfg,
mask_cls=ValidatedMaskLayer,
mask_cfg=mask_cfg,
embedding_cfg=embedding_cfg,
layer_norm_before_pooling=True,
return_all_layer_outputs=return_all_layer_outputs)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
output_data, pooled = test_network([word_ids, mask, type_ids])
if return_all_layer_outputs:
self.assertIsInstance(output_data, list)
self.assertLen(output_data, num_hidden_instances)
data = output_data[-1]
else:
data = output_data
self.assertIsInstance(test_network.hidden_layers, list)
self.assertLen(test_network.hidden_layers, num_hidden_instances)
self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense)
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
self.assertTrue(hasattr(test_network, "_output_layer_norm"))
def test_network_creation_with_float16_dtype(self):
tf.keras.mixed_precision.set_global_policy("mixed_float16")
hidden_size = 32
sequence_length = 21
embedding_cfg = {
"vocab_size": 100,
"type_vocab_size": 16,
"hidden_size": hidden_size,
"seq_length": sequence_length,
"max_seq_length": sequence_length,
"initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02),
"dropout_rate": 0.1,
}
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
tf.keras.initializers.TruncatedNormal(stddev=0.02),
}
# Create a small EncoderScaffold for testing.
test_network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cfg=hidden_cfg,
embedding_cfg=embedding_cfg)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask, type_ids])
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# If float_dtype is set to float16, the data output is float32 (from a layer
# norm) and pool output should be float16.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float16, pooled.dtype)
def test_network_invocation(self):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
embedding_cfg = {
"vocab_size": vocab_size,
"type_vocab_size": num_types,
"hidden_size": hidden_size,
"seq_length": sequence_length,
"max_seq_length": sequence_length,
"initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02),
"dropout_rate": 0.1,
}
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
tf.keras.initializers.TruncatedNormal(stddev=0.02),
}
# Create a small EncoderScaffold for testing.
test_network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cfg=hidden_cfg,
embedding_cfg=embedding_cfg,
dict_outputs=True)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
outputs = test_network([word_ids, mask, type_ids])
# Create a model based off of this network:
model = tf.keras.Model([word_ids, mask, type_ids], outputs)
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
preds = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(preds["pooled_output"].shape, (3, hidden_size))
# Creates a EncoderScaffold with max_sequence_length != sequence_length
num_types = 7
embedding_cfg = {
"vocab_size": vocab_size,
"type_vocab_size": num_types,
"hidden_size": hidden_size,
"seq_length": sequence_length,
"max_seq_length": sequence_length * 2,
"initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02),
"dropout_rate": 0.1,
}
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
tf.keras.initializers.TruncatedNormal(stddev=0.02),
}
# Create a small EncoderScaffold for testing.
test_network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cfg=hidden_cfg,
embedding_cfg=embedding_cfg)
outputs = test_network([word_ids, mask, type_ids])
model = tf.keras.Model([word_ids, mask, type_ids], outputs)
_ = model.predict([word_id_data, mask_data, type_id_data])
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
hidden_size = 32
sequence_length = 21
embedding_cfg = {
"vocab_size": 100,
"type_vocab_size": 16,
"hidden_size": hidden_size,
"seq_length": sequence_length,
"max_seq_length": sequence_length,
"initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02),
"dropout_rate": 0.1,
}
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
tf.keras.initializers.TruncatedNormal(stddev=0.02),
}
# Create a small EncoderScaffold for testing.
network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cfg=hidden_cfg,
embedding_cfg=embedding_cfg)
# Create another network object from the first object's config.
new_network = encoder_scaffold.EncoderScaffold.from_config(
network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
class Embeddings(tf.keras.Model):
def __init__(self, vocab_size, hidden_size):
super().__init__()
self.inputs = [
tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name="input_word_ids"),
tf.keras.layers.Input(shape=(None,), dtype=tf.int32, name="input_mask")
]
self.attention_mask = layers.SelfAttentionMask()
self.embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=hidden_size,
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
name="word_embeddings")
def call(self, inputs):
word_ids, mask = inputs
word_embeddings = self.embedding_layer(word_ids)
return word_embeddings, self.attention_mask([word_embeddings, mask])
class EncoderScaffoldEmbeddingNetworkTest(tf.test.TestCase):
def test_network_invocation(self):
hidden_size = 32
sequence_length = 21
vocab_size = 57
# Build an embedding network to swap in for the default network. This one
# will have 2 inputs (mask and word_ids) instead of 3, and won't use
# positional embeddings.
network = Embeddings(vocab_size, hidden_size)
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
tf.keras.initializers.TruncatedNormal(stddev=0.02),
}
# Create a small EncoderScaffold for testing.
test_network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cfg=hidden_cfg,
embedding_cls=network)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask])
# Create a model based off of this network:
model = tf.keras.Model([word_ids, mask], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
_ = model.predict([word_id_data, mask_data])
def test_serialize_deserialize(self):
hidden_size = 32
sequence_length = 21
vocab_size = 57
# Build an embedding network to swap in for the default network. This one
# will have 2 inputs (mask and word_ids) instead of 3, and won't use
# positional embeddings.
word_ids = tf.keras.layers.Input(
shape=(sequence_length,), dtype=tf.int32, name="input_word_ids")
mask = tf.keras.layers.Input(
shape=(sequence_length,), dtype=tf.int32, name="input_mask")
embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=hidden_size,
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
name="word_embeddings")
word_embeddings = embedding_layer(word_ids)
attention_mask = layers.SelfAttentionMask()([word_embeddings, mask])
network = tf.keras.Model([word_ids, mask],
[word_embeddings, attention_mask])
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
tf.keras.initializers.TruncatedNormal(stddev=0.02),
}
# Create a small EncoderScaffold for testing.
test_network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cfg=hidden_cfg,
embedding_cls=network,
embedding_data=embedding_layer.embeddings)
# Create another network object from the first object's config.
new_network = encoder_scaffold.EncoderScaffold.from_config(
test_network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(test_network.get_config(), new_network.get_config())
# Create a model based off of the old and new networks:
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = new_network([word_ids, mask])
new_model = tf.keras.Model([word_ids, mask], [data, pooled])
data, pooled = test_network([word_ids, mask])
model = tf.keras.Model([word_ids, mask], [data, pooled])
# Copy the weights between models.
new_model.set_weights(model.get_weights())
# Invoke the models.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
data, cls = model.predict([word_id_data, mask_data])
new_data, new_cls = new_model.predict([word_id_data, mask_data])
# The output should be equal.
self.assertAllEqual(data, new_data)
self.assertAllEqual(cls, new_cls)
# We should not be able to get a reference to the embedding data.
with self.assertRaisesRegex(RuntimeError, ".*does not have a reference.*"):
new_network.get_embedding_table()
class EncoderScaffoldHiddenInstanceTest(
tf.test.TestCase, parameterized.TestCase):
def test_network_invocation(self):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
embedding_cfg = {
"vocab_size": vocab_size,
"type_vocab_size": num_types,
"hidden_size": hidden_size,
"seq_length": sequence_length,
"max_seq_length": sequence_length,
"initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02),
"dropout_rate": 0.1,
}
call_list = []
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
tf.keras.initializers.TruncatedNormal(stddev=0.02),
"call_list":
call_list
}
mask_call_list = []
mask_cfg = {
"call_list": mask_call_list
}
# Create a small EncoderScaffold for testing. This time, we pass an already-
# instantiated layer object.
xformer = ValidatedTransformerLayer(**hidden_cfg)
xmask = ValidatedMaskLayer(**mask_cfg)
test_network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cls=xformer,
mask_cls=xmask,
embedding_cfg=embedding_cfg)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask, type_ids])
# Create a model based off of this network:
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
_ = model.predict([word_id_data, mask_data, type_id_data])
# If call_list[0] exists and is True, the passed layer class was
# called as part of the graph creation.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
def test_hidden_cls_list(self):
hidden_size = 32
sequence_length = 10
vocab_size = 57
embedding_network = Embeddings(vocab_size, hidden_size)
call_list = []
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
tf.keras.initializers.TruncatedNormal(stddev=0.02),
"call_list":
call_list
}
mask_call_list = []
mask_cfg = {
"call_list": mask_call_list
}
# Create a small EncoderScaffold for testing. This time, we pass an already-
# instantiated layer object.
xformer = ValidatedTransformerLayer(**hidden_cfg)
xmask = ValidatedMaskLayer(**mask_cfg)
test_network_a = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cls=xformer,
mask_cls=xmask,
embedding_cls=embedding_network)
# Create a network b with same embedding and hidden layers as network a.
test_network_b = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
mask_cls=xmask,
embedding_cls=test_network_a.embedding_network,
hidden_cls=test_network_a.hidden_layers)
# Create a network c with same embedding but fewer hidden layers compared to
# network a and b.
hidden_layers = test_network_a.hidden_layers
hidden_layers.pop()
test_network_c = encoder_scaffold.EncoderScaffold(
num_hidden_instances=2,
pooled_output_dim=hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
mask_cls=xmask,
embedding_cls=test_network_a.embedding_network,
hidden_cls=hidden_layers)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
# Create model based off of network a:
data_a, pooled_a = test_network_a([word_ids, mask])
model_a = tf.keras.Model([word_ids, mask], [data_a, pooled_a])
# Create model based off of network b:
data_b, pooled_b = test_network_b([word_ids, mask])
model_b = tf.keras.Model([word_ids, mask], [data_b, pooled_b])
# Create model based off of network b:
data_c, pooled_c = test_network_c([word_ids, mask])
model_c = tf.keras.Model([word_ids, mask], [data_c, pooled_c])
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
output_a, _ = model_a.predict([word_id_data, mask_data])
output_b, _ = model_b.predict([word_id_data, mask_data])
output_c, _ = model_c.predict([word_id_data, mask_data])
# Outputs from model a and b should be the same since they share the same
# embedding and hidden layers.
self.assertAllEqual(output_a, output_b)
# Outputs from model a and c shouldn't be the same since they share the same
# embedding layer but different number of hidden layers.
self.assertNotAllEqual(output_a, output_c)
@parameterized.parameters(True, False)
def test_serialize_deserialize(self, use_hidden_cls_instance):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
embedding_cfg = {
"vocab_size": vocab_size,
"type_vocab_size": num_types,
"hidden_size": hidden_size,
"seq_length": sequence_length,
"max_seq_length": sequence_length,
"initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02),
"dropout_rate": 0.1,
}
call_list = []
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
tf.keras.initializers.TruncatedNormal(stddev=0.02),
"call_list":
call_list,
"call_class":
TestLayer
}
mask_call_list = []
mask_cfg = {"call_list": mask_call_list, "call_class": TestLayer}
# Create a small EncoderScaffold for testing. This time, we pass an already-
# instantiated layer object.
kwargs = dict(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
embedding_cfg=embedding_cfg)
if use_hidden_cls_instance:
xformer = ValidatedTransformerLayer(**hidden_cfg)
xmask = ValidatedMaskLayer(**mask_cfg)
test_network = encoder_scaffold.EncoderScaffold(
hidden_cls=xformer, mask_cls=xmask, **kwargs)
else:
test_network = encoder_scaffold.EncoderScaffold(
hidden_cls=ValidatedTransformerLayer,
hidden_cfg=hidden_cfg,
mask_cls=ValidatedMaskLayer,
mask_cfg=mask_cfg,
**kwargs)
# Create another network object from the first object's config.
new_network = encoder_scaffold.EncoderScaffold.from_config(
test_network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(test_network.get_config(), new_network.get_config())
# Create a model based off of the old and new networks:
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = new_network([word_ids, mask, type_ids])
new_model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
data, pooled = test_network([word_ids, mask, type_ids])
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
# Copy the weights between models.
new_model.set_weights(model.get_weights())
# Invoke the models.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
data, cls = model.predict([word_id_data, mask_data, type_id_data])
new_data, new_cls = new_model.predict(
[word_id_data, mask_data, type_id_data])
# The output should be equal.
self.assertAllEqual(data, new_data)
self.assertAllEqual(cls, new_cls)
if __name__ == "__main__":
tf.test.main()
| 29,056 | 35.503769 | 80 | py |
models | models-master/official/nlp/modeling/networks/bert_dense_encoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for transformer-based bert encoder network with dense features as inputs."""
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.networks import bert_encoder
class BertEncoderV2Test(tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(BertEncoderV2Test, self).tearDown()
tf.keras.mixed_precision.set_global_policy("float32")
def test_dict_outputs_network_creation(self):
hidden_size = 32
sequence_length = 21
dense_sequence_length = 20
# Create a small dense BertEncoderV2 for testing.
kwargs = {}
test_network = bert_encoder.BertEncoderV2(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
with_dense_inputs=True,
**kwargs)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dense_inputs = tf.keras.Input(
shape=(dense_sequence_length, hidden_size), dtype=tf.float32)
dense_mask = tf.keras.Input(shape=(dense_sequence_length,), dtype=tf.int32)
dense_type_ids = tf.keras.Input(
shape=(dense_sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(
input_word_ids=word_ids,
input_mask=mask,
input_type_ids=type_ids,
dense_inputs=dense_inputs,
dense_mask=dense_mask,
dense_type_ids=dense_type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
self.assertIsInstance(test_network.transformer_layers, list)
self.assertLen(test_network.transformer_layers, 3)
self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense)
expected_data_shape = [
None, sequence_length + dense_sequence_length, hidden_size
]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
def test_dict_outputs_all_encoder_outputs_network_creation(self):
hidden_size = 32
sequence_length = 21
dense_sequence_length = 20
# Create a small BertEncoder for testing.
test_network = bert_encoder.BertEncoderV2(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
dict_outputs=True,
with_dense_inputs=True)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dense_inputs = tf.keras.Input(
shape=(dense_sequence_length, hidden_size), dtype=tf.float32)
dense_mask = tf.keras.Input(shape=(dense_sequence_length,), dtype=tf.int32)
dense_type_ids = tf.keras.Input(
shape=(dense_sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(
input_word_ids=word_ids,
input_mask=mask,
input_type_ids=type_ids,
dense_inputs=dense_inputs,
dense_mask=dense_mask,
dense_type_ids=dense_type_ids))
all_encoder_outputs = dict_outputs["encoder_outputs"]
pooled = dict_outputs["pooled_output"]
expected_data_shape = [
None, sequence_length + dense_sequence_length, hidden_size
]
expected_pooled_shape = [None, hidden_size]
self.assertLen(all_encoder_outputs, 3)
for data in all_encoder_outputs:
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
def test_dict_outputs_network_creation_with_float16_dtype(self):
hidden_size = 32
sequence_length = 21
dense_sequence_length = 20
tf.keras.mixed_precision.set_global_policy("mixed_float16")
# Create a small BertEncoder for testing.
test_network = bert_encoder.BertEncoderV2(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
dict_outputs=True,
with_dense_inputs=True)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dense_inputs = tf.keras.Input(
shape=(dense_sequence_length, hidden_size), dtype=tf.float32)
dense_mask = tf.keras.Input(shape=(dense_sequence_length,), dtype=tf.int32)
dense_type_ids = tf.keras.Input(
shape=(dense_sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(
input_word_ids=word_ids,
input_mask=mask,
input_type_ids=type_ids,
dense_inputs=dense_inputs,
dense_mask=dense_mask,
dense_type_ids=dense_type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
expected_data_shape = [
None, sequence_length + dense_sequence_length, hidden_size
]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# If float_dtype is set to float16, the data output is float32 (from a layer
# norm) and pool output should be float16.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float16, pooled.dtype)
@parameterized.named_parameters(
("all_sequence_encoder_v2", bert_encoder.BertEncoderV2, None, 41),
("output_range_encoder_v2", bert_encoder.BertEncoderV2, 1, 1),
)
def test_dict_outputs_network_invocation(
self, encoder_cls, output_range, out_seq_len):
hidden_size = 32
sequence_length = 21
dense_sequence_length = 20
vocab_size = 57
num_types = 7
# Create a small BertEncoder for testing.
test_network = encoder_cls(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
dict_outputs=True,
with_dense_inputs=True,
output_range=output_range)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dense_inputs = tf.keras.Input(
shape=(dense_sequence_length, hidden_size), dtype=tf.float32)
dense_mask = tf.keras.Input(shape=(dense_sequence_length,), dtype=tf.int32)
dense_type_ids = tf.keras.Input(
shape=(dense_sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(
input_word_ids=word_ids,
input_mask=mask,
input_type_ids=type_ids,
dense_inputs=dense_inputs,
dense_mask=dense_mask,
dense_type_ids=dense_type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
# Create a model based off of this network:
model = tf.keras.Model(
[word_ids, mask, type_ids, dense_inputs, dense_mask, dense_type_ids],
[data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
dense_input_data = np.random.rand(batch_size, dense_sequence_length,
hidden_size)
dense_mask_data = np.random.randint(
2, size=(batch_size, dense_sequence_length))
dense_type_ids_data = np.random.randint(
num_types, size=(batch_size, dense_sequence_length))
outputs = model.predict([
word_id_data, mask_data, type_id_data, dense_input_data,
dense_mask_data, dense_type_ids_data
])
self.assertEqual(outputs[0].shape[1], out_seq_len)
# Creates a BertEncoder with max_sequence_length != sequence_length
max_sequence_length = 128
test_network = encoder_cls(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
dict_outputs=True)
dict_outputs = test_network(
dict(
input_word_ids=word_ids,
input_mask=mask,
input_type_ids=type_ids,
dense_inputs=dense_inputs,
dense_mask=dense_mask,
dense_type_ids=dense_type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
model = tf.keras.Model(
[word_ids, mask, type_ids, dense_inputs, dense_mask, dense_type_ids],
[data, pooled])
outputs = model.predict([
word_id_data, mask_data, type_id_data, dense_input_data,
dense_mask_data, dense_type_ids_data
])
self.assertEqual(outputs[0].shape[1],
sequence_length + dense_sequence_length)
# Creates a BertEncoder with embedding_width != hidden_size
embedding_width = 16
test_network = bert_encoder.BertEncoderV2(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
embedding_width=embedding_width,
dict_outputs=True)
dense_inputs = tf.keras.Input(
shape=(dense_sequence_length, embedding_width), dtype=tf.float32)
dense_input_data = np.zeros(
(batch_size, dense_sequence_length, embedding_width), dtype=float)
dict_outputs = test_network(
dict(
input_word_ids=word_ids,
input_mask=mask,
input_type_ids=type_ids,
dense_inputs=dense_inputs,
dense_mask=dense_mask,
dense_type_ids=dense_type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
model = tf.keras.Model(
[word_ids, mask, type_ids, dense_inputs, dense_mask, dense_type_ids],
[data, pooled])
outputs = model.predict([
word_id_data, mask_data, type_id_data, dense_input_data,
dense_mask_data, dense_type_ids_data
])
self.assertEqual(outputs[0].shape[-1], hidden_size)
self.assertTrue(hasattr(test_network, "_embedding_projection"))
def test_embeddings_as_inputs(self):
hidden_size = 32
sequence_length = 21
dense_sequence_length = 20
# Create a small BertEncoder for testing.
test_network = bert_encoder.BertEncoderV2(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
with_dense_inputs=True)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dense_inputs = tf.keras.Input(
shape=(dense_sequence_length, hidden_size), dtype=tf.float32)
dense_mask = tf.keras.Input(shape=(dense_sequence_length,), dtype=tf.int32)
dense_type_ids = tf.keras.Input(
shape=(dense_sequence_length,), dtype=tf.int32)
test_network.build(
dict(
input_word_ids=word_ids,
input_mask=mask,
input_type_ids=type_ids,
dense_inputs=dense_inputs,
dense_mask=dense_mask,
dense_type_ids=dense_type_ids))
embeddings = test_network.get_embedding_layer()(word_ids)
# Calls with the embeddings.
dict_outputs = test_network(
dict(
input_word_embeddings=embeddings,
input_mask=mask,
input_type_ids=type_ids,
dense_inputs=dense_inputs,
dense_mask=dense_mask,
dense_type_ids=dense_type_ids))
all_encoder_outputs = dict_outputs["encoder_outputs"]
pooled = dict_outputs["pooled_output"]
expected_data_shape = [
None, sequence_length + dense_sequence_length, hidden_size
]
expected_pooled_shape = [None, hidden_size]
self.assertLen(all_encoder_outputs, 3)
for data in all_encoder_outputs:
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
if __name__ == "__main__":
tf.test.main()
| 14,229 | 37.150134 | 85 | py |
models | models-master/official/nlp/modeling/networks/encoder_scaffold.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer-based text encoder network."""
# pylint: disable=g-classes-have-attributes
import copy
import inspect
from absl import logging
import gin
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling import layers
@tf.keras.utils.register_keras_serializable(package='Text')
@gin.configurable
class EncoderScaffold(tf.keras.Model):
"""Bi-directional Transformer-based encoder network scaffold.
This network allows users to flexibly implement an encoder similar to the one
described in "BERT: Pre-training of Deep Bidirectional Transformers for
Language Understanding" (https://arxiv.org/abs/1810.04805).
In this network, users can choose to provide a custom embedding subnetwork
(which will replace the standard embedding logic) and/or a custom hidden layer
class (which will replace the Transformer instantiation in the encoder). For
each of these custom injection points, users can pass either a class or a
class instance. If a class is passed, that class will be instantiated using
the `embedding_cfg` or `hidden_cfg` argument, respectively; if an instance
is passed, that instance will be invoked. (In the case of hidden_cls, the
instance will be invoked 'num_hidden_instances' times.
If the hidden_cls is not overridden, a default transformer layer will be
instantiated.
*Note* that the network is constructed by
[Keras Functional API](https://keras.io/guides/functional_api/).
Args:
pooled_output_dim: The dimension of pooled output.
pooler_layer_initializer: The initializer for the classification layer.
embedding_cls: The class or instance to use to embed the input data. This
class or instance defines the inputs to this encoder and outputs (1)
embeddings tensor with shape `(batch_size, seq_length, hidden_size)` and
(2) attention masking with tensor `(batch_size, seq_length, seq_length)`.
If `embedding_cls` is not set, a default embedding network (from the
original BERT paper) will be created.
embedding_cfg: A dict of kwargs to pass to the embedding_cls, if it needs to
be instantiated. If `embedding_cls` is not set, a config dict must be
passed to `embedding_cfg` with the following values:
`vocab_size`: The size of the token vocabulary.
`type_vocab_size`: The size of the type vocabulary.
`hidden_size`: The hidden size for this encoder.
`max_seq_length`: The maximum sequence length for this encoder.
`seq_length`: The sequence length for this encoder.
`initializer`: The initializer for the embedding portion of this encoder.
`dropout_rate`: The dropout rate to apply before the encoding layers.
embedding_data: A reference to the embedding weights that will be used to
train the masked language model, if necessary. This is optional, and only
needed if (1) you are overriding `embedding_cls` and (2) are doing
standard pretraining.
num_hidden_instances: The number of times to instantiate and/or invoke the
hidden_cls.
hidden_cls: Three types of input are supported: (1) class (2) instance
(3) list of classes or instances, to encode the input data. If
`hidden_cls` is not set, a KerasBERT transformer layer will be used as the
encoder class. If `hidden_cls` is a list of classes or instances, these
classes (instances) are sequentially instantiated (invoked) on top of
embedding layer. Mixing classes and instances in the list is allowed.
hidden_cfg: A dict of kwargs to pass to the hidden_cls, if it needs to be
instantiated. If hidden_cls is not set, a config dict must be passed to
`hidden_cfg` with the following values:
`num_attention_heads`: The number of attention heads. The hidden size
must be divisible by `num_attention_heads`.
`intermediate_size`: The intermediate size of the transformer.
`intermediate_activation`: The activation to apply in the transfomer.
`dropout_rate`: The overall dropout rate for the transformer layers.
`attention_dropout_rate`: The dropout rate for the attention layers.
`kernel_initializer`: The initializer for the transformer layers.
mask_cls: The class to generate masks passed into hidden_cls() from inputs
and 2D mask indicating positions we can attend to. It is the caller's job
to make sure the output of the mask_layer can be used by hidden_layer.
A mask_cls is usually mapped to a hidden_cls.
mask_cfg: A dict of kwargs pass to mask_cls.
layer_norm_before_pooling: Whether to add a layer norm before the pooling
layer. You probably want to turn this on if you set `norm_first=True` in
transformer layers.
return_all_layer_outputs: Whether to output sequence embedding outputs of
all encoder transformer layers.
dict_outputs: Whether to use a dictionary as the model outputs.
layer_idx_as_attention_seed: Whether to include layer_idx in
attention_cfg in hidden_cfg.
feed_layer_idx: whether the scaffold should feed layer index to hidden_cls.
recursive: whether to pass the second return of the hidden layer as the last
element among the inputs. None will be passed as the initial state.
"""
def __init__(self,
pooled_output_dim,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
embedding_cls=None,
embedding_cfg=None,
embedding_data=None,
num_hidden_instances=1,
hidden_cls=layers.Transformer,
hidden_cfg=None,
mask_cls=layers.SelfAttentionMask,
mask_cfg=None,
layer_norm_before_pooling=False,
return_all_layer_outputs=False,
dict_outputs=False,
layer_idx_as_attention_seed=False,
feed_layer_idx=False,
recursive=False,
**kwargs):
if embedding_cls:
if inspect.isclass(embedding_cls):
embedding_network = embedding_cls(
**embedding_cfg) if embedding_cfg else embedding_cls()
else:
embedding_network = embedding_cls
inputs = embedding_network.inputs
embeddings, attention_mask = embedding_network(inputs)
embedding_layer = None
position_embedding_layer = None
type_embedding_layer = None
embedding_norm_layer = None
else:
embedding_network = None
seq_length = embedding_cfg.get('seq_length', None)
word_ids = tf.keras.layers.Input(
shape=(seq_length,), dtype=tf.int32, name='input_word_ids')
mask = tf.keras.layers.Input(
shape=(seq_length,), dtype=tf.int32, name='input_mask')
type_ids = tf.keras.layers.Input(
shape=(seq_length,), dtype=tf.int32, name='input_type_ids')
inputs = [word_ids, mask, type_ids]
embedding_layer = layers.OnDeviceEmbedding(
vocab_size=embedding_cfg['vocab_size'],
embedding_width=embedding_cfg['hidden_size'],
initializer=tf_utils.clone_initializer(embedding_cfg['initializer']),
name='word_embeddings')
word_embeddings = embedding_layer(word_ids)
# Always uses dynamic slicing for simplicity.
position_embedding_layer = layers.PositionEmbedding(
initializer=tf_utils.clone_initializer(embedding_cfg['initializer']),
max_length=embedding_cfg['max_seq_length'],
name='position_embedding')
position_embeddings = position_embedding_layer(word_embeddings)
type_embedding_layer = layers.OnDeviceEmbedding(
vocab_size=embedding_cfg['type_vocab_size'],
embedding_width=embedding_cfg['hidden_size'],
initializer=tf_utils.clone_initializer(embedding_cfg['initializer']),
use_one_hot=True,
name='type_embeddings')
type_embeddings = type_embedding_layer(type_ids)
embeddings = tf.keras.layers.Add()(
[word_embeddings, position_embeddings, type_embeddings])
embedding_norm_layer = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm',
axis=-1,
epsilon=1e-12,
dtype=tf.float32)
embeddings = embedding_norm_layer(embeddings)
embeddings = (
tf.keras.layers.Dropout(
rate=embedding_cfg['dropout_rate'])(embeddings))
mask_cfg = {} if mask_cfg is None else mask_cfg
if inspect.isclass(mask_cls):
mask_layer = mask_cls(**mask_cfg)
else:
mask_layer = mask_cls
attention_mask = mask_layer(embeddings, mask)
data = embeddings
layer_output_data = []
hidden_layers = []
hidden_cfg = hidden_cfg if hidden_cfg else {}
if isinstance(hidden_cls, list) and len(hidden_cls) != num_hidden_instances:
raise RuntimeError(
('When input hidden_cls to EncoderScaffold %s is a list, it must '
'contain classes or instances with size specified by '
'num_hidden_instances, got %d vs %d.') % self.name, len(hidden_cls),
num_hidden_instances)
# Consider supporting customized init states.
recursive_states = None
for i in range(num_hidden_instances):
if isinstance(hidden_cls, list):
cur_hidden_cls = hidden_cls[i]
else:
cur_hidden_cls = hidden_cls
if inspect.isclass(cur_hidden_cls):
if hidden_cfg and 'attention_cfg' in hidden_cfg and (
layer_idx_as_attention_seed):
hidden_cfg = copy.deepcopy(hidden_cfg)
hidden_cfg['attention_cfg']['seed'] = i
if feed_layer_idx:
hidden_cfg['layer_idx'] = i
layer = cur_hidden_cls(**hidden_cfg)
else:
layer = cur_hidden_cls
if recursive:
data, recursive_states = layer([data, attention_mask, recursive_states])
else:
data = layer([data, attention_mask])
layer_output_data.append(data)
hidden_layers.append(layer)
if layer_norm_before_pooling:
# Normalize the final output.
output_layer_norm = tf.keras.layers.LayerNormalization(
name='final_layer_norm',
axis=-1,
epsilon=1e-12)
layer_output_data[-1] = output_layer_norm(layer_output_data[-1])
last_layer_output = layer_output_data[-1]
# Applying a tf.slice op (through subscript notation) to a Keras tensor
# like this will create a SliceOpLambda layer. This is better than a Lambda
# layer with Python code, because that is fundamentally less portable.
first_token_tensor = last_layer_output[:, 0, :]
pooler_layer_initializer = tf.keras.initializers.get(
pooler_layer_initializer)
pooler_layer = tf.keras.layers.Dense(
units=pooled_output_dim,
activation='tanh',
kernel_initializer=pooler_layer_initializer,
name='cls_transform')
cls_output = pooler_layer(first_token_tensor)
if dict_outputs:
outputs = dict(
sequence_output=layer_output_data[-1],
pooled_output=cls_output,
encoder_outputs=layer_output_data,
)
elif return_all_layer_outputs:
outputs = [layer_output_data, cls_output]
else:
outputs = [layer_output_data[-1], cls_output]
# b/164516224
# Once we've created the network using the Functional API, we call
# super().__init__ as though we were invoking the Functional API Model
# constructor, resulting in this object having all the properties of a model
# created using the Functional API. Once super().__init__ is called, we
# can assign attributes to `self` - note that all `self` assignments are
# below this line.
super().__init__(
inputs=inputs, outputs=outputs, **kwargs)
self._hidden_cls = hidden_cls
self._hidden_cfg = hidden_cfg
self._mask_cls = mask_cls
self._mask_cfg = mask_cfg
self._num_hidden_instances = num_hidden_instances
self._pooled_output_dim = pooled_output_dim
self._pooler_layer_initializer = pooler_layer_initializer
self._embedding_cls = embedding_cls
self._embedding_cfg = embedding_cfg
self._embedding_data = embedding_data
self._layer_norm_before_pooling = layer_norm_before_pooling
self._return_all_layer_outputs = return_all_layer_outputs
self._dict_outputs = dict_outputs
self._kwargs = kwargs
self._embedding_layer = embedding_layer
self._embedding_network = embedding_network
self._position_embedding_layer = position_embedding_layer
self._type_embedding_layer = type_embedding_layer
self._embedding_norm_layer = embedding_norm_layer
self._hidden_layers = hidden_layers
if self._layer_norm_before_pooling:
self._output_layer_norm = output_layer_norm
self._pooler_layer = pooler_layer
self._layer_idx_as_attention_seed = layer_idx_as_attention_seed
logging.info('EncoderScaffold configs: %s', self.get_config())
def get_config(self):
config_dict = {
'num_hidden_instances': self._num_hidden_instances,
'pooled_output_dim': self._pooled_output_dim,
'pooler_layer_initializer': tf.keras.initializers.serialize(
self._pooler_layer_initializer),
'embedding_cls': self._embedding_network,
'embedding_cfg': self._embedding_cfg,
'layer_norm_before_pooling': self._layer_norm_before_pooling,
'return_all_layer_outputs': self._return_all_layer_outputs,
'dict_outputs': self._dict_outputs,
'layer_idx_as_attention_seed': self._layer_idx_as_attention_seed
}
cfgs = {
'hidden_cfg': self._hidden_cfg,
'mask_cfg': self._mask_cfg
}
for cfg_name, cfg in cfgs.items():
if cfg:
config_dict[cfg_name] = {}
for k, v in cfg.items():
# `self._hidden_cfg` may contain `class`, e.g., when `hidden_cfg` is
# `TransformerScaffold`, `attention_cls` argument can be a `class`.
if inspect.isclass(v):
config_dict[cfg_name][k] = tf.keras.utils.get_registered_name(v)
else:
config_dict[cfg_name][k] = v
clss = {
'hidden_cls': self._hidden_cls,
'mask_cls': self._mask_cls
}
for cls_name, cls in clss.items():
if inspect.isclass(cls):
key = '{}_string'.format(cls_name)
config_dict[key] = tf.keras.utils.get_registered_name(cls)
else:
config_dict[cls_name] = cls
config_dict.update(self._kwargs)
return config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
cls_names = ['hidden_cls', 'mask_cls']
for cls_name in cls_names:
cls_string = '{}_string'.format(cls_name)
if cls_string in config:
config[cls_name] = tf.keras.utils.get_registered_object(
config[cls_string], custom_objects=custom_objects)
del config[cls_string]
return cls(**config)
def get_embedding_table(self):
if self._embedding_network is None:
# In this case, we don't have a custom embedding network and can return
# the standard embedding data.
return self._embedding_layer.embeddings
if self._embedding_data is None:
raise RuntimeError(('The EncoderScaffold %s does not have a reference '
'to the embedding data. This is required when you '
'pass a custom embedding network to the scaffold. '
'It is also possible that you are trying to get '
'embedding data from an embedding scaffold with a '
'custom embedding network where the scaffold has '
'been serialized and deserialized. Unfortunately, '
'accessing custom embedding references after '
'serialization is not yet supported.') % self.name)
else:
return self._embedding_data
@property
def embedding_network(self):
if self._embedding_network is None:
raise RuntimeError(
('The EncoderScaffold %s does not have a reference '
'to the embedding network. This is required when you '
'pass a custom embedding network to the scaffold.') % self.name)
return self._embedding_network
@property
def hidden_layers(self):
"""List of hidden layers in the encoder."""
return self._hidden_layers
@property
def pooler_layer(self):
"""The pooler dense layer after the transformer layers."""
return self._pooler_layer
| 17,143 | 42.183879 | 80 | py |
models | models-master/official/nlp/modeling/networks/sparse_mixer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sparse Mixer encoder network.
Based on ["Sparse Mixers: Combining MoE and Mixing to build a more efficient
BERT"](https://arxiv.org/abs/2205.12399).
"""
# pylint: disable=g-classes-have-attributes
from typing import Any, Callable, Optional, Sequence, Union
from absl import logging
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling import layers
_Activation = Union[str, Callable[..., Any]]
_Initializer = Union[str, tf.keras.initializers.Initializer]
_approx_gelu = lambda x: tf.keras.activations.gelu(x, approximate=True)
class SparseMixer(tf.keras.layers.Layer):
"""Sparse Mixer encoder network.
Based on ["Sparse Mixers: Combining MoE and Mixing to build a more efficient
BERT"](https://arxiv.org/abs/2205.12399). Sparse Mixer is an efficient
encoder network that replaces typical Transformer encoder blocks with a
combination of linear mixing and sparsely activated Mixture-of-Experts (MoE)
sublayers.
This implementation defaults to the canonical Sparse Mixer Base model. To use
the "Fast Sparse Mixer" configuration, set `*_capacity_factor`=0.5. This
yields a sparser and faster variant of the canonical Sparse Mixer model, in
which each expert processes roughly 50% less tokens.
Notes:
- The underlying MoeLayer uses the Keras add_loss() and add_metric() APIs to
propagate auxiliary MoE losses and metrics. Any model using this network,
should collect these losses and, if desired, metrics.
- The input length is fixed to 'max_sequence_length' to accomodate the mixing
mechanisms.
Args:
vocab_size: The size of the token vocabulary.
hidden_size: The size of the transformer hidden layers.
num_layers: The number of transformer layers.
moe_layers: Specifies which layers, if any, should be sparsely activated
Mixture-of-Experts (MoE) layers. The remaining [0, num_layers) setminus
moe_layers will use the vanilla MLP sublayers. Defaults to placing MoE
layers in the middle of the model.
attention_layers: Specifies which layers, if any, should be attention layers
in the encoder. The remaining [0, num_layers) setminus attention_layers
will use the specified `mixing_mechanism`. If using attention layers, a
good rule of thumb is to place them in the final few layers.
num_experts: Number of experts. Experts are themselves MLP modules, with the
same `inner_dim` and `inner_activation` as the vanilla MLP sublayers.
train_capacity_factor: Scaling factor to increase the expert token capacity
during training. See layers.MoeLayer for further details. The "Fast Sparse
Mixer" increases model sparsity (and speed) by using a capacity factor of
0.5.
eval_capacity_factor: As above, but used during evaluation.
max_group_size: The total number of tokens on each device is subdivided into
groups of this size. Router computations are then performed on a per-group
basis. See layers.MoeLayer for further details.
mixing_mechanism: Type of mixing mechanism used in place of self-attention
layers. Defaults to 'Linear' mixing.
use_fft: Only used for spectral mixing mechanisms. Determines whether to use
Fast Fourier Transform (True) or the Discrete Fourier Transform (DFT)
matrix (False; default) to compute the Fourier Transform. See
layers.FourierTransformLayer or layers.HartleyTransformLayer for advice.
num_attention_heads: The number of attention heads for each transformer. The
hidden size must be divisible by the number of attention heads.
max_sequence_length: The only sequence length that this encoder can consume.
This determines the variable shape for positional embeddings and the size
of the mixing matrices.
type_vocab_size: The number of types that the 'type_ids' input can take.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network for each transformer.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network for each transformer.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: The dropout rate to use for the attention layers within
the transformer layers.
initializer: The initializer to use for all weights in this encoder.
output_range: The sequence output range, [0, output_range), by slicing the
target sequence of the last transformer layer. `None` means the entire
target sequence will attend to the source sequence, which yields the full
output.
embedding_width: The width of the word embeddings. If the embedding width is
not equal to hidden size, embedding parameters will be factorized into two
matrices in the shape of ['vocab_size', 'embedding_width'] and
['embedding_width', 'hidden_size'] ('embedding_width' is usually much
smaller than 'hidden_size').
embedding_layer: An optional Layer instance which will be called to generate
embeddings for the input word IDs.
norm_first: Whether to normalize inputs to attention and intermediate dense
layers. If set False, output of attention and intermediate dense layers is
normalized.
with_dense_inputs: Whether to accept dense embeddings as the input.
export_metrics: Whether to export metrics using Keras add_metric API.
"""
def __init__(
self,
vocab_size: int,
hidden_size: int = 512,
num_layers: int = 14,
moe_layers: Sequence[int] = (5, 6, 7, 8),
attention_layers: Sequence[int] = (10, 11, 12, 13),
num_experts: int = 16,
train_capacity_factor: float = 1.,
eval_capacity_factor: float = 1.,
examples_per_group: float = 1.,
mixing_mechanism: layers.MixingMechanism = layers.MixingMechanism.LINEAR,
use_fft: bool = False,
num_attention_heads: int = 8,
max_sequence_length: int = 512,
type_vocab_size: int = 16,
inner_dim: int = 2048,
inner_activation: _Activation = _approx_gelu,
output_dropout: float = 0.1,
attention_dropout: float = 0.1,
initializer: _Initializer = tf.keras.initializers.TruncatedNormal(
stddev=0.02),
output_range: Optional[int] = None,
embedding_width: Optional[int] = None,
embedding_layer: Optional[tf.keras.layers.Layer] = None,
norm_first: bool = False,
with_dense_inputs: bool = False,
export_metrics: bool = True,
**kwargs):
super().__init__(**kwargs)
activation = tf.keras.activations.get(inner_activation)
initializer = tf.keras.initializers.get(initializer)
if embedding_width is None:
embedding_width = hidden_size
self._config = {
'vocab_size': vocab_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'moe_layers': moe_layers,
'num_experts': num_experts,
'train_capacity_factor': train_capacity_factor,
'eval_capacity_factor': eval_capacity_factor,
'examples_per_group': examples_per_group,
'mixing_mechanism': mixing_mechanism,
'use_fft': use_fft,
'attention_layers': attention_layers,
'num_attention_heads': num_attention_heads,
'max_sequence_length': max_sequence_length,
'type_vocab_size': type_vocab_size,
'inner_dim': inner_dim,
'inner_activation': tf.keras.activations.serialize(activation),
'output_dropout': output_dropout,
'attention_dropout': attention_dropout,
'initializer': tf.keras.initializers.serialize(initializer),
'output_range': output_range,
'embedding_width': embedding_width,
'embedding_layer': embedding_layer,
'norm_first': norm_first,
'with_dense_inputs': with_dense_inputs,
'export_metrics': export_metrics,
}
if embedding_layer is None:
self._embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=tf_utils.clone_initializer(initializer),
name='word_embeddings')
else:
self._embedding_layer = embedding_layer
self._position_embedding_layer = layers.PositionEmbedding(
initializer=tf_utils.clone_initializer(initializer),
max_length=max_sequence_length,
name='position_embedding')
self._type_embedding_layer = layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=tf_utils.clone_initializer(initializer),
use_one_hot=True,
name='type_embeddings')
self._embedding_norm_layer = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)
self._embedding_dropout = tf.keras.layers.Dropout(
rate=output_dropout, name='embedding_dropout')
# We project the 'embedding' output to 'hidden_size' if it is not already
# 'hidden_size'.
self._embedding_projection = None
if embedding_width != hidden_size:
self._embedding_projection = tf.keras.layers.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
kernel_initializer=tf_utils.clone_initializer(initializer),
name='embedding_projection')
self._transformer_layers = []
for layer in range(num_layers):
if layer in attention_layers:
mixing_layer = layers.MultiHeadAttention(
num_heads=num_attention_heads,
key_dim=int(hidden_size // num_attention_heads),
dropout=attention_dropout,
use_bias=True,
kernel_initializer=tf_utils.clone_initializer(initializer),
name='self_attention',
)
else:
mixing_layer = self._init_mixing_sublayer(layer)
if layer in moe_layers:
feedforward_layer = layers.MoeLayer(
experts=layers.FeedForwardExperts(
num_experts=num_experts,
d_ff=inner_dim,
output_dropout=output_dropout,
activation=inner_activation,
kernel_initializer=tf_utils.clone_initializer(initializer),
name='experts'),
router=layers.ExpertsChooseMaskedRouter(
num_experts=num_experts,
kernel_initializer=tf_utils.clone_initializer(initializer),
export_metrics=export_metrics,
name='router'),
train_capacity_factor=train_capacity_factor,
eval_capacity_factor=eval_capacity_factor,
examples_per_group=examples_per_group,
name='moe')
else:
feedforward_layer = None # Fallback to default (dense) MLP class
block = layers.TransformerScaffold(
num_attention_heads=num_attention_heads,
inner_dim=inner_dim,
inner_activation=inner_activation,
attention_cls=mixing_layer,
feedforward_cls=feedforward_layer,
output_dropout=output_dropout,
attention_dropout=attention_dropout,
norm_first=norm_first,
output_range=output_range if layer == num_layers - 1 else None,
kernel_initializer=tf_utils.clone_initializer(initializer),
name='transformer/layer_%d' % layer)
self._transformer_layers.append(block)
self._attention_mask_layer = layers.SelfAttentionMask(
name='self_attention_mask')
self._pooler_layer = tf.keras.layers.Dense(
units=hidden_size,
activation='tanh',
kernel_initializer=tf_utils.clone_initializer(initializer),
name='pooler_transform')
if with_dense_inputs:
self.inputs = dict(
# The total length of token ids and dense inputs still has to be
# max_sequence_length. It is checked in call().
input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
dense_inputs=tf.keras.Input(
shape=(None, embedding_width), dtype=tf.float32),
dense_mask=tf.keras.Input(shape=(None,), dtype=tf.int32),
dense_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
)
else:
self.inputs = dict(
input_word_ids=tf.keras.Input(
shape=(max_sequence_length,), dtype=tf.int32),
input_mask=tf.keras.Input(
shape=(max_sequence_length,), dtype=tf.int32),
input_type_ids=tf.keras.Input(
shape=(max_sequence_length,), dtype=tf.int32))
self._max_sequence_length = max_sequence_length
def call(self, inputs):
word_embeddings = None
if isinstance(inputs, dict):
word_ids = inputs.get('input_word_ids')
mask = inputs.get('input_mask')
type_ids = inputs.get('input_type_ids')
word_embeddings = inputs.get('input_word_embeddings', None)
dense_inputs = inputs.get('dense_inputs', None)
dense_mask = inputs.get('dense_mask', None)
dense_type_ids = inputs.get('dense_type_ids', None)
else:
raise ValueError('Unexpected inputs type (%s) to %s.' %
(type(inputs), self.__class__))
if word_embeddings is None:
word_embeddings = self._embedding_layer(word_ids)
if dense_inputs is not None:
# Concat the dense embeddings at sequence end.
word_embeddings = tf.concat([word_embeddings, dense_inputs], axis=1)
type_ids = tf.concat([type_ids, dense_type_ids], axis=1)
mask = tf.concat([mask, dense_mask], axis=1)
# SparseMixer: Sequence length must be the same as `max_sequence_length`.
word_embeddings = tf.ensure_shape(word_embeddings,
[None, self._max_sequence_length, None])
# Absolute position embeddings.
position_embeddings = self._position_embedding_layer(word_embeddings)
type_embeddings = self._type_embedding_layer(type_ids)
embeddings = word_embeddings + position_embeddings + type_embeddings
embeddings = self._embedding_norm_layer(embeddings)
embeddings = self._embedding_dropout(embeddings)
if self._embedding_projection is not None:
embeddings = self._embedding_projection(embeddings)
attention_mask = self._attention_mask_layer(embeddings, mask)
encoder_outputs = []
x = embeddings
for layer in self._transformer_layers:
x = layer([x, attention_mask])
encoder_outputs.append(x)
last_encoder_output = encoder_outputs[-1]
first_token_tensor = last_encoder_output[:, 0, :]
pooled_output = self._pooler_layer(first_token_tensor)
output = dict(
sequence_output=encoder_outputs[-1],
pooled_output=pooled_output,
encoder_outputs=encoder_outputs)
return output
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_embedding_layer(self):
return self._embedding_layer
def get_config(self):
return dict(self._config)
@property
def transformer_layers(self):
"""List of Transformer layers in the encoder."""
return self._transformer_layers
@property
def pooler_layer(self):
"""The pooler dense layer after the transformer layers."""
return self._pooler_layer
@classmethod
def from_config(cls, config, custom_objects=None):
if 'embedding_layer' in config and config['embedding_layer'] is not None:
warn_string = (
'You are reloading a model that was saved with a '
'potentially-shared embedding layer object. If you contine to '
'train this model, the embedding layer will no longer be shared. '
'To work around this, load the model outside of the Keras API.')
print('WARNING: ' + warn_string)
logging.warn(warn_string)
return cls(**config)
def _init_mixing_sublayer(self, layer: int):
"""Initializes config-dependent mixing sublayer."""
if self._config['mixing_mechanism'] == layers.MixingMechanism.FOURIER:
mixing_sublayer = layers.FourierTransformLayer(
use_fft=self._config['use_fft'], name='fourier_transform')
elif self._config['mixing_mechanism'] == layers.MixingMechanism.HARTLEY:
mixing_sublayer = layers.HartleyTransformLayer(
use_fft=self._config['use_fft'], name='hartley_transform')
elif self._config['mixing_mechanism'] == layers.MixingMechanism.LINEAR:
mixing_sublayer = layers.LinearTransformLayer(
kernel_initializer=tf_utils.clone_initializer(
self._config['initializer']),
name='linear_transform')
else:
raise ValueError('Unsupported mixing mechanism: %s' %
self._config['mixing_mechanism'])
return mixing_sublayer
| 17,450 | 41.87715 | 80 | py |
models | models-master/official/nlp/modeling/networks/xlnet_base.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based XLNet Model."""
from absl import logging
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling import layers
from official.nlp.modeling.layers import transformer_xl
_SEG_ID_CLS = 2
def _create_causal_attention_mask(
seq_length,
memory_length,
dtype=tf.float32,
same_length=False):
"""Creates a causal attention mask with a single-sided context.
When applying the attention mask in `MultiHeadRelativeAttention`, the
attention scores are of shape `[(batch dimensions), S, S + M]`, where:
- S = sequence length.
- M = memory length.
In a simple case where S = 2, M = 1, here is a simple illustration of the
`attention_scores` matrix, where `a` represents an attention function:
token_0 [[a(token_0, mem_0) a(token_0, token_0) a(token_0, token_1)],
token_1 [a(token_1, mem_0) a(token_1, token_0) a(token_1, token_1)]]
mem_0 token_0 token_1
For uni-directional attention, we want to mask out values in the attention
scores that represent a(token_i, token_j) where j > i. We can achieve this by
concatenating 0s (representing memory positions) with a strictly upper
triangular matrix of 1s.
We then flip the matrix values in order to match the representation where
real values are 1s.
Args:
seq_length: int, The length of each sequence.
memory_length: int, The length of memory blocks.
dtype: dtype of the mask.
same_length: bool, whether to use the same attention length for each token.
Returns:
A unidirectional attention mask of shape
`[seq_length, seq_length + memory_length]`. E.g.:
[[1. 1. 1. 0. 0. 0.]
[1. 1. 1. 1. 0. 0.]
[1. 1. 1. 1. 1. 0.]
[1. 1. 1. 1. 1. 1.]]
"""
ones_matrix = tf.ones([seq_length, seq_length], dtype=dtype)
upper_triangular = tf.linalg.band_part(ones_matrix, 0, -1)
diagonal = tf.linalg.band_part(ones_matrix, 0, 0)
padding = tf.zeros([seq_length, memory_length], dtype=dtype)
causal_attention_mask = tf.concat(
[padding, upper_triangular - diagonal], 1)
if same_length:
lower_triangular = tf.linalg.band_part(ones_matrix, -1, 0)
strictly_lower_triangular = lower_triangular - diagonal
causal_attention_mask = tf.concat(
[causal_attention_mask[:, :seq_length] + strictly_lower_triangular,
causal_attention_mask[:, seq_length:]], 1)
return 1 - causal_attention_mask
def _combine_masks(mask1, mask2, dtype, how="and"):
"""Combines two masks.
Use "and" if trying to combine two existing masks.
Use "or" if trying to flip a few positions to "real".
Args:
mask1: tf.Tensor, input mask 1
mask2: tf.Tensor, input mask 2
dtype: tf.dtype
how: Which logical operation should run.
Returns:
The combined input masks.
"""
if how == "and":
operator = tf.math.logical_and
else:
operator = tf.math.logical_or
return tf.cast(operator(
tf.cast(mask1, tf.bool),
tf.cast(mask2, tf.bool)), dtype=dtype)
def _compute_attention_mask(
input_mask,
permutation_mask,
attention_type,
seq_length,
memory_length,
batch_size,
dtype=tf.float32):
"""Combines all input attention masks for XLNet.
In XLNet modeling, `0` represents tokens that can be attended, and `1`
represents tokens that cannot be attended.
For XLNet pre-training and fine tuning, there are a few masks used:
- Causal attention mask: If the attention type is unidirectional, then all
tokens after the current position cannot be attended to.
- Input mask: when generating data, padding is added to a max sequence length
to make all sequences the same length. This masks out real tokens (`0`) from
padding tokens (`1`).
- Permutation mask: during XLNet pretraining, the input sequence is factorized
into a factorization sequence `z`. During partial prediction, `z` is split
at a cutting point `c` (an index of the factorization sequence) and
prediction is only applied to all tokens after `c`. Therefore, tokens at
factorization positions `i` > `c` can be attended to and tokens at
factorization positions `i` <= `c` cannot be attended to.
This function broadcasts and combines all attention masks to produce the
query attention mask and the content attention mask.
Args:
input_mask: Tensor, the input mask related to padding. Input shape:
`(B, S)`.
permutation_mask: Tensor, the permutation mask used in partial prediction.
Input shape: `(B, S, S)`.
attention_type: str, the attention type. Can be "uni" (directional) or
"bi" (directional).
seq_length: int, the length of each sequence.
memory_length: int the length of memory blocks.
batch_size: int, the batch size.
dtype: The dtype of the masks.
Returns:
attention_mask, content_attention_mask: The position and context-based
attention masks and content attention masks, respectively.
"""
attention_mask = None
# `1` values mean do not attend to this position.
if attention_type == "uni":
causal_attention_mask = _create_causal_attention_mask(
seq_length=seq_length,
memory_length=memory_length,
dtype=dtype)
causal_attention_mask = causal_attention_mask[None, None, :, :]
# `causal_attention_mask`: [1, 1, S, S + M]
# input_mask: [B, S]
# permutation_mask: [B, S, S]
if input_mask is not None and permutation_mask is not None:
data_mask = _combine_masks(input_mask[:, None, :], permutation_mask, dtype)
elif input_mask is not None and permutation_mask is None:
data_mask = input_mask[:, None, :]
elif input_mask is None and permutation_mask is not None:
data_mask = permutation_mask
else:
data_mask = None
# data_mask: [B, S, S] or [B, 1, S]
if data_mask is not None:
# All positions within state can be attended to.
state_mask = tf.ones([batch_size, tf.shape(data_mask)[1], memory_length],
dtype=dtype)
# state_mask: [B, 1, M] or [B, S, M]
data_mask = tf.concat([state_mask, data_mask], 2)
# data_mask: [B, 1, S + M] or [B, S, S + M]
if attention_type == "uni":
attention_mask = _combine_masks(causal_attention_mask,
data_mask[:, None, :, :],
dtype=dtype)
else:
attention_mask = data_mask[:, None, :, :]
if attention_mask is not None:
# Construct the content attention mask.
# This ensures that the mask allows the model to attend to positions in
# content positions (e.g. the content diagonal).
non_target_mask = tf.concat(
[tf.zeros([seq_length, memory_length], dtype=dtype),
tf.eye(seq_length, dtype=dtype)], axis=-1)
content_attention_mask = _combine_masks(
attention_mask, non_target_mask, how="or", dtype=dtype)
else:
content_attention_mask = None
return attention_mask, content_attention_mask
def _compute_segment_matrix(
segment_ids,
memory_length,
batch_size,
use_cls_mask):
"""Computes the segment embedding matrix.
XLNet introduced segment-based attention for attention calculations. This
extends the idea of relative encodings in Transformer XL by considering
whether or not two positions are within the same segment, rather than
which segments they come from.
This function generates a segment matrix by broadcasting provided segment IDs
in two different dimensions and checking where values are equal. This output
matrix shows `True` whenever two tokens are NOT in the same segment and
`False` whenever they are.
Args:
segment_ids: A Tensor of size `[B, S]` that represents which segment
each token belongs to.
memory_length: int, the length of memory blocks.
batch_size: int, the batch size.
use_cls_mask: bool, whether or not to introduce cls mask in
input sequences.
Returns:
A boolean Tensor of size `[B, S, S + M]`, where `True` means that two
tokens are NOT in the same segment, and `False` means they are in the same
segment.
"""
if segment_ids is None:
return None
memory_padding = tf.zeros([batch_size, memory_length],
dtype=segment_ids.dtype)
padded_segment_ids = tf.concat([memory_padding, segment_ids], 1)
# segment_ids: [B, S]
# padded_segment_ids: [B, S + M]
if use_cls_mask:
# `1` indicates not in the same segment.
# Target result: [B, S, S + M]
# segment_ids: [B, S]
# padded_segment_ids: [B, S + M]
broadcasted_segment_class_indices = (
tf.equal(segment_ids,
tf.constant([_SEG_ID_CLS]))[:, :, None])
broadcasted_padded_class_indices = (
tf.equal(
padded_segment_ids,
tf.constant([_SEG_ID_CLS]))[:, None, :])
class_index_matrix = tf.logical_or(broadcasted_segment_class_indices,
broadcasted_padded_class_indices)
segment_matrix = tf.equal(segment_ids[:, :, None],
padded_segment_ids[:, None, :])
segment_matrix = tf.logical_or(class_index_matrix, segment_matrix)
else:
# TODO(allencwang) - address this legacy mismatch from `use_cls_mask`.
segment_matrix = tf.logical_not(
tf.equal(segment_ids[:, :, None], padded_segment_ids[:, None, :]))
return segment_matrix
def _compute_positional_encoding(
attention_type,
position_encoding_layer,
hidden_size,
batch_size,
total_length,
seq_length,
clamp_length,
bi_data,
dtype=tf.float32):
"""Computes the relative position encoding.
Args:
attention_type: str, the attention type. Can be "uni" (directional) or
"bi" (directional).
position_encoding_layer: An instance of `RelativePositionEncoding`.
hidden_size: int, the hidden size.
batch_size: int, the batch size.
total_length: int, the sequence length added to the memory length.
seq_length: int, the length of each sequence.
clamp_length: int, clamp all relative distances larger than clamp_length. -1
means no clamping.
bi_data: bool, whether to use bidirectional input pipeline. Usually set to
True during pretraining and False during finetuning.
dtype: the dtype of the encoding.
Returns:
A Tensor, representing the position encoding.
"""
freq_seq = tf.range(0, hidden_size, 2.0)
if dtype is not None and dtype != tf.float32:
freq_seq = tf.cast(freq_seq, dtype=dtype)
if attention_type == "bi":
beg, end = total_length, -seq_length
elif attention_type == "uni":
beg, end = total_length, -1
else:
raise ValueError("Unknown `attention_type` {}.".format(attention_type))
if bi_data:
forward_position_sequence = tf.range(beg, end, -1.0)
backward_position_sequence = tf.range(-beg, -end, 1.0)
if dtype is not None and dtype != tf.float32:
forward_position_sequence = tf.cast(forward_position_sequence,
dtype=dtype)
backward_position_sequence = tf.cast(backward_position_sequence,
dtype=dtype)
if clamp_length > 0:
forward_position_sequence = tf.clip_by_value(
forward_position_sequence,
-clamp_length,
clamp_length)
backward_position_sequence = tf.clip_by_value(
backward_position_sequence,
-clamp_length,
clamp_length)
if batch_size is not None:
forward_positional_encoding = position_encoding_layer(
forward_position_sequence, batch_size // 2)
backward_positional_encoding = position_encoding_layer(
backward_position_sequence, batch_size // 2)
else:
forward_positional_encoding = position_encoding_layer(
forward_position_sequence, None)
backward_positional_encoding = position_encoding_layer(
backward_position_sequence, None)
relative_position_encoding = tf.concat(
[forward_positional_encoding, backward_positional_encoding], axis=0)
else:
forward_position_sequence = tf.range(beg, end, -1.0)
if dtype is not None and dtype != tf.float32:
forward_position_sequence = tf.cast(
forward_position_sequence, dtype=dtype)
if clamp_length > 0:
forward_position_sequence = tf.clip_by_value(
forward_position_sequence,
-clamp_length,
clamp_length)
relative_position_encoding = position_encoding_layer(
forward_position_sequence, batch_size)
return relative_position_encoding
class RelativePositionEncoding(tf.keras.layers.Layer):
"""Creates a relative positional encoding.
This layer creates a relative positional encoding as described in
"Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context"
(https://arxiv.org/abs/1901.02860).
Rather than an absolute position embedding as in Transformer, this
formulation represents position as the relative distance between tokens using
sinusoidal positional embeddings.
Note: This layer is currently experimental.
Attributes:
hidden_size: The dimensionality of the input embeddings.
"""
def __init__(self, hidden_size, **kwargs):
super().__init__(**kwargs)
self._hidden_size = hidden_size
self._inv_freq = 1.0 / (10000.0**(
tf.range(0, self._hidden_size, 2.0) / self._hidden_size))
def call(self, pos_seq, batch_size=None):
"""Implements call() for the layer.
Args:
pos_seq: A 1-D `Tensor`
batch_size: The optionally provided batch size that tiles the relative
positional encoding.
Returns:
The relative positional encoding of shape:
[batch_size, len(pos_seq), hidden_size] if batch_size is provided, else
[1, len(pos_seq), hidden_size].
"""
sinusoid_input = tf.einsum("i,d->id", pos_seq, self._inv_freq)
relative_position_encoding = tf.concat([tf.sin(sinusoid_input),
tf.cos(sinusoid_input)], -1)
relative_position_encoding = relative_position_encoding[None, :, :]
if batch_size is not None:
relative_position_encoding = tf.tile(relative_position_encoding,
[batch_size, 1, 1])
return relative_position_encoding
@tf.keras.utils.register_keras_serializable(package="Text")
class XLNetBase(tf.keras.layers.Layer):
"""Base XLNet model.
Attributes:
vocab_size: int, the number of tokens in vocabulary.
num_layers: int, the number of layers.
hidden_size: int, the hidden size.
num_attention_heads: int, the number of attention heads.
head_size: int, the dimension size of each attention head.
inner_size: int, the hidden size in feed-forward layers.
dropout_rate: float, dropout rate.
attention_dropout_rate: float, dropout rate on attention probabilities.
attention_type: str, "uni" or "bi".
bi_data: bool, whether to use bidirectional input pipeline. Usually set to
True during pretraining and False during finetuning.
initializer: A tf initializer.
two_stream: bool, whether or not to use `TwoStreamRelativeAttention` used
in the XLNet pretrainer. If `False`, then it will use
`MultiHeadRelativeAttention` as in Transformer XL.
tie_attention_biases: bool, whether or not to tie the biases together.
Usually set to `True`. Used for backwards compatibility.
memory_length: int, the number of tokens to cache.
same_length: bool, whether to use the same attention length for each
token.
clamp_length: int, clamp all relative distances larger than clamp_length. -1
means no clamping.
reuse_length: int, the number of tokens in the currect batch to be cached
and reused in the future.
inner_activation: str, "relu" or "gelu".
use_cls_mask: bool, whether or not cls mask is included in the
input sequences.
embedding_width: The width of the word embeddings. If the embedding width
is not equal to hidden size, embedding parameters will be factorized
into two matrices in the shape of ["vocab_size", "embedding_width"] and
["embedding_width", "hidden_size"] ("embedding_width" is usually much
smaller than "hidden_size").
embedding_layer: The word embedding layer. `None` means we will create a
new embedding layer. Otherwise, we will reuse the given embedding layer.
This parameter is originally added for ELECTRA model which needs to tie
the generator embeddings with the discriminator embeddings.
"""
def __init__(self,
vocab_size,
num_layers,
hidden_size,
num_attention_heads,
head_size,
inner_size,
dropout_rate,
attention_dropout_rate,
attention_type,
bi_data,
initializer,
two_stream=False,
tie_attention_biases=True,
memory_length=None,
clamp_length=-1,
reuse_length=None,
inner_activation="relu",
use_cls_mask=False,
embedding_width=None,
**kwargs):
super().__init__(**kwargs)
self._vocab_size = vocab_size
self._initializer = initializer
self._attention_type = attention_type
self._num_layers = num_layers
self._hidden_size = hidden_size
self._num_attention_heads = num_attention_heads
self._head_size = head_size
self._inner_size = inner_size
self._inner_activation = inner_activation
self._dropout_rate = dropout_rate
self._attention_dropout_rate = attention_dropout_rate
self._tie_attention_biases = tie_attention_biases
self._two_stream = two_stream
self._memory_length = memory_length
self._reuse_length = reuse_length
self._bi_data = bi_data
self._clamp_length = clamp_length
self._use_cls_mask = use_cls_mask
self._segment_embedding = None
self._mask_embedding = None
self._embedding_width = embedding_width
if embedding_width is None:
embedding_width = hidden_size
self._embedding_layer = layers.OnDeviceEmbedding(
vocab_size=self._vocab_size,
embedding_width=embedding_width,
initializer=tf_utils.clone_initializer(self._initializer),
dtype=tf.float32,
name="word_embedding")
self._dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
self.embedding_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
self.position_encoding = RelativePositionEncoding(self._hidden_size)
self._transformer_xl = transformer_xl.TransformerXL(
vocab_size=vocab_size,
num_layers=num_layers,
hidden_size=hidden_size,
num_attention_heads=num_attention_heads,
head_size=head_size,
inner_size=inner_size,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate,
initializer=initializer,
two_stream=two_stream,
tie_attention_biases=tie_attention_biases,
memory_length=memory_length,
reuse_length=reuse_length,
inner_activation=inner_activation,
name="transformer_xl")
def get_config(self):
config = {
"vocab_size":
self._vocab_size,
"num_layers":
self._num_layers,
"hidden_size":
self._hidden_size,
"num_attention_heads":
self._num_attention_heads,
"head_size":
self._head_size,
"inner_size":
self._inner_size,
"dropout_rate":
self._dropout_rate,
"attention_dropout_rate":
self._attention_dropout_rate,
"attention_type":
self._attention_type,
"bi_data":
self._bi_data,
"initializer":
self._initializer,
"two_stream":
self._two_stream,
"tie_attention_biases":
self._tie_attention_biases,
"memory_length":
self._memory_length,
"clamp_length":
self._clamp_length,
"reuse_length":
self._reuse_length,
"inner_activation":
self._inner_activation,
"use_cls_mask":
self._use_cls_mask,
"embedding_width":
self._embedding_width,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_embedding_lookup_table(self):
"""Returns the embedding layer weights."""
return self._embedding_layer.embeddings
def __call__(self,
input_ids,
segment_ids=None,
input_mask=None,
state=None,
permutation_mask=None,
target_mapping=None,
masked_tokens=None,
**kwargs):
# Uses dict to feed inputs into call() in order to keep state as a python
# list.
inputs = {
"input_ids": input_ids,
"segment_ids": segment_ids,
"input_mask": input_mask,
"state": state,
"permutation_mask": permutation_mask,
"target_mapping": target_mapping,
"masked_tokens": masked_tokens
}
return super().__call__(inputs, **kwargs)
def call(self, inputs):
"""Implements call() for the layer."""
input_ids = inputs["input_ids"]
segment_ids = inputs["segment_ids"]
input_mask = inputs["input_mask"]
state = inputs["state"]
permutation_mask = inputs["permutation_mask"]
target_mapping = inputs["target_mapping"]
masked_tokens = inputs["masked_tokens"]
batch_size = tf.shape(input_ids)[0]
seq_length = tf.shape(input_ids)[1]
if state is not None:
memory_length = tf.shape(state[0])[1]
else:
memory_length = 0
total_length = memory_length + seq_length
if self._two_stream and masked_tokens is None:
raise ValueError("`masked_tokens` must be provided in order to "
"initialize the query stream in "
"`TwoStreamRelativeAttention`.")
if masked_tokens is not None and not self._two_stream:
logging.warning("`masked_tokens` is provided but `two_stream` is not "
"enabled. Please enable `two_stream` to enable two "
"stream attention.")
if input_mask is not None:
dtype = input_mask.dtype
elif permutation_mask is not None:
dtype = permutation_mask.dtype
else:
dtype = tf.int32
query_attention_mask, content_attention_mask = _compute_attention_mask(
input_mask=input_mask,
permutation_mask=permutation_mask,
attention_type=self._attention_type,
seq_length=seq_length,
memory_length=memory_length,
batch_size=batch_size,
dtype=dtype)
relative_position_encoding = _compute_positional_encoding(
attention_type=self._attention_type,
position_encoding_layer=self.position_encoding,
hidden_size=self._hidden_size,
batch_size=batch_size,
total_length=total_length,
seq_length=seq_length,
clamp_length=self._clamp_length,
bi_data=self._bi_data,
dtype=tf.float32)
relative_position_encoding = self.embedding_dropout(
relative_position_encoding)
if segment_ids is None:
segment_embedding = None
segment_matrix = None
else:
if self._segment_embedding is None:
self._segment_embedding = self.add_weight(
"seg_embed",
shape=[self._num_layers, 2, self._num_attention_heads,
self._head_size],
dtype=tf.float32,
initializer=tf_utils.clone_initializer(self._initializer))
segment_embedding = self._segment_embedding
segment_matrix = _compute_segment_matrix(
segment_ids=segment_ids,
memory_length=memory_length,
batch_size=batch_size,
use_cls_mask=self._use_cls_mask)
word_embeddings = self._embedding_layer(input_ids)
content_stream = self._dropout(word_embeddings)
if self._two_stream:
if self._mask_embedding is None:
self._mask_embedding = self.add_weight(
"mask_emb/mask_emb",
shape=[1, 1, self._hidden_size],
dtype=tf.float32)
if target_mapping is None:
masked_tokens = masked_tokens[:, :, None]
masked_token_embedding = (
masked_tokens * self._mask_embedding +
(1 - masked_tokens) * word_embeddings)
else:
masked_token_embedding = tf.tile(
self._mask_embedding,
[batch_size, tf.shape(target_mapping)[1], 1])
query_stream = self._dropout(masked_token_embedding)
else:
query_stream = None
return self._transformer_xl(
content_stream=content_stream,
query_stream=query_stream,
target_mapping=target_mapping,
state=state,
relative_position_encoding=relative_position_encoding,
segment_matrix=segment_matrix,
segment_embedding=segment_embedding,
content_attention_mask=content_attention_mask,
query_attention_mask=query_attention_mask)
| 25,857 | 35.368495 | 80 | py |
models | models-master/official/nlp/modeling/networks/sparse_mixer_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Sparse Mixer encoder network."""
from typing import Sequence
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.modeling import layers
from official.nlp.modeling.networks import sparse_mixer
class SparseMixerTest(parameterized.TestCase, tf.test.TestCase):
def tearDown(self):
super().tearDown()
tf.keras.mixed_precision.set_global_policy("float32")
@parameterized.named_parameters(
dict(
testcase_name="sparse_mixer",
mixing_mechanism=layers.MixingMechanism.LINEAR,
moe_layers=(1,),
attention_layers=(2,)),
dict(
testcase_name="fnet",
mixing_mechanism=layers.MixingMechanism.FOURIER,
moe_layers=(),
attention_layers=()),
dict(
testcase_name="sparse_hnet",
mixing_mechanism=layers.MixingMechanism.HARTLEY,
moe_layers=(0, 1, 2),
attention_layers=(1, 2)),
dict(
testcase_name="sparse_bert",
mixing_mechanism=layers.MixingMechanism.LINEAR,
moe_layers=(0, 1, 2), # All layers use MoE
attention_layers=(0, 1, 2)), # All layers use attention
)
def test_network(self, mixing_mechanism: layers.MixingMechanism,
attention_layers: Sequence[int], moe_layers: Sequence[int]):
num_layers = 3
hidden_size = 16
sequence_length = 32
test_network = sparse_mixer.SparseMixer(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
max_sequence_length=sequence_length,
num_layers=num_layers,
moe_layers=moe_layers,
num_experts=8,
mixing_mechanism=mixing_mechanism,
attention_layers=attention_layers)
batch_size = 4
word_ids = tf.keras.Input(
shape=(sequence_length,), batch_size=batch_size, dtype=tf.int32)
mask = tf.keras.Input(
shape=(sequence_length,), batch_size=batch_size, dtype=tf.int32)
type_ids = tf.keras.Input(
shape=(sequence_length,), batch_size=batch_size, dtype=tf.int32)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
self.assertIsInstance(test_network.transformer_layers, list)
self.assertLen(test_network.transformer_layers, 3)
self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense)
expected_data_shape = [batch_size, sequence_length, hidden_size]
expected_pooled_shape = [batch_size, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
def test_embeddings_as_inputs(self):
hidden_size = 32
sequence_length = 8
test_network = sparse_mixer.SparseMixer(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
max_sequence_length=sequence_length,
num_layers=3,
moe_layers=(1,),
num_experts=4,
attention_layers=(2,))
batch_size = 2
word_ids = tf.keras.Input(
shape=(sequence_length), batch_size=batch_size, dtype=tf.int32)
mask = tf.keras.Input(
shape=(sequence_length,), batch_size=batch_size, dtype=tf.int32)
type_ids = tf.keras.Input(
shape=(sequence_length,), batch_size=batch_size, dtype=tf.int32)
test_network.build(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
embeddings = test_network.get_embedding_layer()(word_ids)
# Calls with the embeddings.
dict_outputs = test_network(
dict(
input_word_embeddings=embeddings,
input_mask=mask,
input_type_ids=type_ids))
all_encoder_outputs = dict_outputs["encoder_outputs"]
pooled = dict_outputs["pooled_output"]
expected_data_shape = [batch_size, sequence_length, hidden_size]
expected_pooled_shape = [batch_size, hidden_size]
self.assertLen(all_encoder_outputs, 3)
for data in all_encoder_outputs:
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
if __name__ == "__main__":
tf.test.main()
| 5,192 | 35.0625 | 80 | py |
models | models-master/official/nlp/modeling/networks/fnet_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for FNet encoder network."""
from typing import Sequence
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.modeling import layers
from official.nlp.modeling.networks import fnet
class FNetTest(parameterized.TestCase, tf.test.TestCase):
def tearDown(self):
super(FNetTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy("float32")
@parameterized.named_parameters(
("fnet", layers.MixingMechanism.FOURIER, ()),
("fnet_hybrid", layers.MixingMechanism.FOURIER, (1, 2)),
("hnet", layers.MixingMechanism.HARTLEY, ()),
("hnet_hybrid", layers.MixingMechanism.HARTLEY, (1, 2)),
("linear", layers.MixingMechanism.LINEAR, ()),
("linear_hybrid", layers.MixingMechanism.LINEAR, (0,)),
("bert", layers.MixingMechanism.FOURIER, (0, 1, 2)),
)
def test_network(self, mixing_mechanism: layers.MixingMechanism,
attention_layers: Sequence[int]):
num_layers = 3
hidden_size = 32
sequence_length = 21
test_network = fnet.FNet(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
max_sequence_length=sequence_length,
num_layers=num_layers,
mixing_mechanism=mixing_mechanism,
attention_layers=attention_layers)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
self.assertIsInstance(test_network.transformer_layers, list)
self.assertLen(test_network.transformer_layers, 3)
self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense)
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
def test_embeddings_as_inputs(self):
hidden_size = 32
sequence_length = 21
test_network = fnet.FNet(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
max_sequence_length=sequence_length,
num_layers=3)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
test_network.build(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
embeddings = test_network.get_embedding_layer()(word_ids)
# Calls with the embeddings.
dict_outputs = test_network(
dict(
input_word_embeddings=embeddings,
input_mask=mask,
input_type_ids=type_ids))
all_encoder_outputs = dict_outputs["encoder_outputs"]
pooled = dict_outputs["pooled_output"]
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertLen(all_encoder_outputs, 3)
for data in all_encoder_outputs:
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
if __name__ == "__main__":
tf.test.main()
| 4,543 | 36.866667 | 80 | py |
models | models-master/official/nlp/modeling/networks/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Networks are combinations of `tf.keras` layers (and possibly other networks).
They are `tf.keras` models that would not be trained alone. It encapsulates
common network structures like a transformer encoder into an easily
handled object with a standardized configuration.
"""
from official.nlp.modeling.networks.albert_encoder import AlbertEncoder
from official.nlp.modeling.networks.bert_encoder import BertEncoder
from official.nlp.modeling.networks.bert_encoder import BertEncoderV2
from official.nlp.modeling.networks.classification import Classification
from official.nlp.modeling.networks.encoder_scaffold import EncoderScaffold
from official.nlp.modeling.networks.fnet import FNet
from official.nlp.modeling.networks.funnel_transformer import FunnelTransformerEncoder
from official.nlp.modeling.networks.mobile_bert_encoder import MobileBERTEncoder
from official.nlp.modeling.networks.packed_sequence_embedding import PackedSequenceEmbedding
from official.nlp.modeling.networks.span_labeling import SpanLabeling
from official.nlp.modeling.networks.span_labeling import XLNetSpanLabeling
from official.nlp.modeling.networks.sparse_mixer import SparseMixer
from official.nlp.modeling.networks.xlnet_base import XLNetBase
| 1,839 | 53.117647 | 92 | py |
models | models-master/official/nlp/modeling/networks/fnet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FNet encoder network.
Based on ["FNet: Mixing Tokens with Fourier Transforms"]
(https://aclanthology.org/2022.naacl-main.319/).
"""
# pylint: disable=g-classes-have-attributes
from typing import Any, Callable, Optional, Sequence, Union
from absl import logging
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling import layers
_Activation = Union[str, Callable[..., Any]]
_Initializer = Union[str, tf.keras.initializers.Initializer]
_approx_gelu = lambda x: tf.keras.activations.gelu(x, approximate=True)
class FNet(tf.keras.layers.Layer):
"""FNet encoder network.
Based on ["FNet: Mixing Tokens with Fourier Transforms"]
(https://aclanthology.org/2022.naacl-main.319/). FNet is an efficient
Transformer-like encoder network that replaces self-attention sublayers with
Fourier sublayers.
This implementation defaults to the canonical FNet Base model, but the network
also supports more general mixing models (e.g. 'Linear', 'HNet') and hybrid
models (e.g. 'FNet-Hybrid') models that use both mixing and self-attention
layers. The input length is fixed to 'max_sequence_length'.
Args:
vocab_size: The size of the token vocabulary.
hidden_size: The size of the transformer hidden layers.
num_layers: The number of transformer layers.
mixing_mechanism: Type of mixing mechanism used in place of self-attention
layers. Defaults to FNet ('Fourier') mixing.
use_fft: Only used for spectral mixing mechanisms. Determines whether to use
Fast Fourier Transform (True) or the Discrete Fourier Transform (DFT)
matrix (False; default) to compute the Fourier Transform. See
layers.FourierTransformLayer or layers.HartleyTransformLayer for advice.
attention_layers: Specifies which layers, if any, should be attention layers
in the encoder. The remaining [0, num_layers) setminus attention_layers
will use the specified `mixing_mechanism`. If using attention layers, a
good rule of thumb is to place them in the final few layers.
num_attention_heads: The number of attention heads for each transformer. The
hidden size must be divisible by the number of attention heads.
max_sequence_length: The only sequence length that this encoder can
consume. This determines the variable shape for positional embeddings and
the size of the mixing matrices.
type_vocab_size: The number of types that the 'type_ids' input can take.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network for each transformer.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network for each transformer.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: The dropout rate to use for the attention layers within
the transformer layers.
initializer: The initializer to use for all weights in this encoder.
output_range: The sequence output range, [0, output_range), by slicing the
target sequence of the last transformer layer. `None` means the entire
target sequence will attend to the source sequence, which yields the full
output.
embedding_width: The width of the word embeddings. If the embedding width is
not equal to hidden size, embedding parameters will be factorized into two
matrices in the shape of ['vocab_size', 'embedding_width'] and
['embedding_width', 'hidden_size'] ('embedding_width' is usually much
smaller than 'hidden_size').
embedding_layer: An optional Layer instance which will be called to generate
embeddings for the input word IDs.
norm_first: Whether to normalize inputs to attention and intermediate dense
layers. If set False, output of attention and intermediate dense layers is
normalized.
with_dense_inputs: Whether to accept dense embeddings as the input.
"""
def __init__(
self,
vocab_size: int,
hidden_size: int = 768,
num_layers: int = 12,
mixing_mechanism: layers.MixingMechanism = layers.MixingMechanism.FOURIER,
use_fft: bool = False,
attention_layers: Sequence[int] = (),
num_attention_heads: int = 12,
max_sequence_length: int = 512,
type_vocab_size: int = 16,
inner_dim: int = 3072,
inner_activation: _Activation = _approx_gelu,
output_dropout: float = 0.1,
attention_dropout: float = 0.1,
initializer: _Initializer = tf.keras.initializers.TruncatedNormal(
stddev=0.02),
output_range: Optional[int] = None,
embedding_width: Optional[int] = None,
embedding_layer: Optional[tf.keras.layers.Layer] = None,
norm_first: bool = False,
with_dense_inputs: bool = False,
**kwargs):
super().__init__(**kwargs)
activation = tf.keras.activations.get(inner_activation)
initializer = tf.keras.initializers.get(initializer)
if embedding_width is None:
embedding_width = hidden_size
self._config = {
'vocab_size': vocab_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'mixing_mechanism': mixing_mechanism,
'use_fft': use_fft,
'attention_layers': attention_layers,
'num_attention_heads': num_attention_heads,
'max_sequence_length': max_sequence_length,
'type_vocab_size': type_vocab_size,
'inner_dim': inner_dim,
'inner_activation': tf.keras.activations.serialize(activation),
'output_dropout': output_dropout,
'attention_dropout': attention_dropout,
'initializer': tf.keras.initializers.serialize(initializer),
'output_range': output_range,
'embedding_width': embedding_width,
'embedding_layer': embedding_layer,
'norm_first': norm_first,
'with_dense_inputs': with_dense_inputs,
}
if embedding_layer is None:
self._embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=tf_utils.clone_initializer(initializer),
name='word_embeddings')
else:
self._embedding_layer = embedding_layer
self._position_embedding_layer = layers.PositionEmbedding(
initializer=tf_utils.clone_initializer(initializer),
max_length=max_sequence_length,
name='position_embedding')
self._type_embedding_layer = layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=tf_utils.clone_initializer(initializer),
use_one_hot=True,
name='type_embeddings')
self._embedding_norm_layer = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)
self._embedding_dropout = tf.keras.layers.Dropout(
rate=output_dropout, name='embedding_dropout')
# We project the 'embedding' output to 'hidden_size' if it is not already
# 'hidden_size'.
self._embedding_projection = None
if embedding_width != hidden_size:
self._embedding_projection = tf.keras.layers.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
kernel_initializer=tf_utils.clone_initializer(initializer),
name='embedding_projection')
self._transformer_layers = []
for layer in range(num_layers):
if layer in attention_layers:
mixing_layer = layers.MultiHeadAttention(
num_heads=num_attention_heads,
key_dim=int(hidden_size // num_attention_heads),
dropout=attention_dropout,
use_bias=True,
kernel_initializer=tf_utils.clone_initializer(initializer),
name='self_attention',
)
else:
mixing_layer = self._init_mixing_sublayer(layer)
block = layers.TransformerScaffold(
num_attention_heads=num_attention_heads,
inner_dim=inner_dim,
inner_activation=inner_activation,
attention_cls=mixing_layer,
feedforward_cls=None, # Fallback to default FeedForward class
output_dropout=output_dropout,
attention_dropout=attention_dropout,
norm_first=norm_first,
output_range=output_range if layer == num_layers - 1 else None,
kernel_initializer=tf_utils.clone_initializer(initializer),
name='transformer/layer_%d' % layer)
self._transformer_layers.append(block)
self._attention_mask_layer = layers.SelfAttentionMask(
name='self_attention_mask')
self._pooler_layer = tf.keras.layers.Dense(
units=hidden_size,
activation='tanh',
kernel_initializer=tf_utils.clone_initializer(initializer),
name='pooler_transform')
if with_dense_inputs:
self.inputs = dict(
# The total length of token ids and dense inputs still has to be
# max_sequence_length. It is checked in call().
input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
dense_inputs=tf.keras.Input(
shape=(None, embedding_width), dtype=tf.float32),
dense_mask=tf.keras.Input(shape=(None,), dtype=tf.int32),
dense_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
)
else:
self.inputs = dict(
input_word_ids=tf.keras.Input(
shape=(max_sequence_length,), dtype=tf.int32),
input_mask=tf.keras.Input(
shape=(max_sequence_length,), dtype=tf.int32),
input_type_ids=tf.keras.Input(
shape=(max_sequence_length,), dtype=tf.int32))
self._max_sequence_length = max_sequence_length
def call(self, inputs):
word_embeddings = None
if isinstance(inputs, dict):
word_ids = inputs.get('input_word_ids')
mask = inputs.get('input_mask')
type_ids = inputs.get('input_type_ids')
word_embeddings = inputs.get('input_word_embeddings', None)
dense_inputs = inputs.get('dense_inputs', None)
dense_mask = inputs.get('dense_mask', None)
dense_type_ids = inputs.get('dense_type_ids', None)
else:
raise ValueError('Unexpected inputs type (%s) to %s.' %
(type(inputs), self.__class__))
if word_embeddings is None:
word_embeddings = self._embedding_layer(word_ids)
if dense_inputs is not None:
# Concat the dense embeddings at sequence end.
word_embeddings = tf.concat([word_embeddings, dense_inputs], axis=1)
type_ids = tf.concat([type_ids, dense_type_ids], axis=1)
mask = tf.concat([mask, dense_mask], axis=1)
# FNet: Sequence length must be the same as `max_sequence_length`.
word_embeddings = tf.ensure_shape(word_embeddings,
[None, self._max_sequence_length, None])
# Absolute position embeddings.
position_embeddings = self._position_embedding_layer(word_embeddings)
type_embeddings = self._type_embedding_layer(type_ids)
embeddings = word_embeddings + position_embeddings + type_embeddings
embeddings = self._embedding_norm_layer(embeddings)
embeddings = self._embedding_dropout(embeddings)
if self._embedding_projection is not None:
embeddings = self._embedding_projection(embeddings)
attention_mask = self._attention_mask_layer(embeddings, mask)
encoder_outputs = []
x = embeddings
for layer in self._transformer_layers:
x = layer([x, attention_mask])
encoder_outputs.append(x)
last_encoder_output = encoder_outputs[-1]
first_token_tensor = last_encoder_output[:, 0, :]
pooled_output = self._pooler_layer(first_token_tensor)
output = dict(
sequence_output=encoder_outputs[-1],
pooled_output=pooled_output,
encoder_outputs=encoder_outputs)
return output
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_embedding_layer(self):
return self._embedding_layer
def get_config(self):
return dict(self._config)
@property
def transformer_layers(self):
"""List of Transformer layers in the encoder."""
return self._transformer_layers
@property
def pooler_layer(self):
"""The pooler dense layer after the transformer layers."""
return self._pooler_layer
@classmethod
def from_config(cls, config, custom_objects=None):
if 'embedding_layer' in config and config['embedding_layer'] is not None:
warn_string = (
'You are reloading a model that was saved with a '
'potentially-shared embedding layer object. If you contine to '
'train this model, the embedding layer will no longer be shared. '
'To work around this, load the model outside of the Keras API.')
print('WARNING: ' + warn_string)
logging.warn(warn_string)
return cls(**config)
def _init_mixing_sublayer(self, layer: int):
"""Initializes config-dependent mixing sublayer."""
if self._config['mixing_mechanism'] == layers.MixingMechanism.FOURIER:
mixing_sublayer = layers.FourierTransformLayer(
use_fft=self._config['use_fft'], name='fourier_transform')
elif self._config['mixing_mechanism'] == layers.MixingMechanism.HARTLEY:
mixing_sublayer = layers.HartleyTransformLayer(
use_fft=self._config['use_fft'], name='hartley_transform')
elif self._config['mixing_mechanism'] == layers.MixingMechanism.LINEAR:
mixing_sublayer = layers.LinearTransformLayer(
kernel_initializer=tf_utils.clone_initializer(
self._config['initializer']),
name='linear_transform')
else:
raise ValueError('Unsupported mixing mechanism: %s' %
self._config['mixing_mechanism'])
return mixing_sublayer
| 14,576 | 40.411932 | 80 | py |
models | models-master/official/nlp/modeling/networks/mobile_bert_encoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling import models
from official.nlp.modeling.networks import mobile_bert_encoder
def generate_fake_input(batch_size=1, seq_len=5, vocab_size=10000, seed=0):
"""Generate consistent fake integer input sequences."""
np.random.seed(seed)
fake_input = []
for _ in range(batch_size):
fake_input.append([])
for _ in range(seq_len):
fake_input[-1].append(np.random.randint(0, vocab_size))
fake_input = np.asarray(fake_input)
return fake_input
class MobileBertEncoderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters(
('default_setting', 'relu', True, 'no_norm', False),
('gelu', 'gelu', True, 'no_norm', False),
('kq_not_shared', 'relu', False, 'no_norm', False),
('layer_norm', 'relu', True, 'layer_norm', False),
('use_pooler', 'relu', True, 'no_norm', True),
('with_pooler_layer', 'relu', True, 'layer_norm', False))
def test_mobilebert_encoder(self, act_fn, kq_shared_bottleneck,
normalization_type, use_pooler):
hidden_size = 32
sequence_length = 16
num_blocks = 3
test_network = mobile_bert_encoder.MobileBERTEncoder(
word_vocab_size=100,
hidden_size=hidden_size,
num_blocks=num_blocks,
intermediate_act_fn=act_fn,
key_query_shared_bottleneck=kq_shared_bottleneck,
normalization_type=normalization_type,
classifier_activation=use_pooler)
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
outputs = test_network([word_ids, mask, type_ids])
layer_output, pooler_output = outputs['sequence_output'], outputs[
'pooled_output']
self.assertIsInstance(test_network.transformer_layers, list)
self.assertLen(test_network.transformer_layers, num_blocks)
layer_output_shape = [None, sequence_length, hidden_size]
self.assertAllEqual(layer_output.shape.as_list(), layer_output_shape)
pooler_output_shape = [None, hidden_size]
self.assertAllEqual(pooler_output.shape.as_list(), pooler_output_shape)
self.assertAllEqual(tf.float32, layer_output.dtype)
def test_mobilebert_encoder_return_all_layer_output(self):
hidden_size = 32
sequence_length = 16
num_blocks = 3
test_network = mobile_bert_encoder.MobileBERTEncoder(
word_vocab_size=100,
hidden_size=hidden_size,
num_blocks=num_blocks)
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
outputs = test_network([word_ids, mask, type_ids])
all_layer_output = outputs['encoder_outputs']
self.assertIsInstance(all_layer_output, list)
self.assertLen(all_layer_output, num_blocks + 1)
@parameterized.parameters('int32', 'float32')
def test_mobilebert_encoder_invocation(self, input_mask_dtype):
vocab_size = 100
hidden_size = 32
sequence_length = 16
num_blocks = 3
test_network = mobile_bert_encoder.MobileBERTEncoder(
word_vocab_size=vocab_size,
hidden_size=hidden_size,
num_blocks=num_blocks,
input_mask_dtype=input_mask_dtype)
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=input_mask_dtype)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
outputs = test_network([word_ids, mask, type_ids])
model = tf.keras.Model([word_ids, mask, type_ids], outputs)
input_seq = generate_fake_input(
batch_size=1, seq_len=sequence_length, vocab_size=vocab_size)
input_mask = generate_fake_input(
batch_size=1, seq_len=sequence_length, vocab_size=2)
token_type = generate_fake_input(
batch_size=1, seq_len=sequence_length, vocab_size=2)
outputs = model.predict([input_seq, input_mask, token_type])
sequence_output_shape = [1, sequence_length, hidden_size]
self.assertAllEqual(outputs['sequence_output'].shape, sequence_output_shape)
pooled_output_shape = [1, hidden_size]
self.assertAllEqual(outputs['pooled_output'].shape, pooled_output_shape)
def test_mobilebert_encoder_invocation_with_attention_score(self):
vocab_size = 100
hidden_size = 32
sequence_length = 16
num_blocks = 3
test_network = mobile_bert_encoder.MobileBERTEncoder(
word_vocab_size=vocab_size,
hidden_size=hidden_size,
num_blocks=num_blocks)
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
outputs = test_network([word_ids, mask, type_ids])
model = tf.keras.Model([word_ids, mask, type_ids], outputs)
input_seq = generate_fake_input(
batch_size=1, seq_len=sequence_length, vocab_size=vocab_size)
input_mask = generate_fake_input(
batch_size=1, seq_len=sequence_length, vocab_size=2)
token_type = generate_fake_input(
batch_size=1, seq_len=sequence_length, vocab_size=2)
outputs = model.predict([input_seq, input_mask, token_type])
self.assertLen(outputs['attention_scores'], num_blocks)
@parameterized.named_parameters(
('sequence_classification', models.BertClassifier, [None, 5]),
('token_classification', models.BertTokenClassifier, [None, 16, 5]))
def test_mobilebert_encoder_for_downstream_task(self, task, prediction_shape):
hidden_size = 32
sequence_length = 16
mobilebert_encoder = mobile_bert_encoder.MobileBERTEncoder(
word_vocab_size=100, hidden_size=hidden_size)
num_classes = 5
classifier = task(network=mobilebert_encoder, num_classes=num_classes)
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
prediction = classifier([word_ids, mask, type_ids])
if task == models.BertTokenClassifier:
prediction = prediction['logits']
self.assertAllEqual(prediction.shape.as_list(), prediction_shape)
if __name__ == '__main__':
tf.test.main()
| 7,105 | 40.8 | 80 | py |
models | models-master/official/nlp/modeling/networks/albert_encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ALBERT (https://arxiv.org/abs/1810.04805) text encoder network."""
# pylint: disable=g-classes-have-attributes
import collections
import tensorflow as tf
from official.modeling import activations
from official.modeling import tf_utils
from official.nlp.modeling import layers
@tf.keras.utils.register_keras_serializable(package='Text')
class AlbertEncoder(tf.keras.Model):
"""ALBERT (https://arxiv.org/abs/1810.04805) text encoder network.
This network implements the encoder described in the paper "ALBERT: A Lite
BERT for Self-supervised Learning of Language Representations"
(https://arxiv.org/abs/1909.11942).
Compared with BERT (https://arxiv.org/abs/1810.04805), ALBERT refactorizes
embedding parameters into two smaller matrices and shares parameters
across layers.
The default values for this object are taken from the ALBERT-Base
implementation described in the paper.
*Note* that the network is constructed by Keras Functional API.
Args:
vocab_size: The size of the token vocabulary.
embedding_width: The width of the word embeddings. If the embedding width is
not equal to hidden size, embedding parameters will be factorized into two
matrices in the shape of `(vocab_size, embedding_width)` and
`(embedding_width, hidden_size)`, where `embedding_width` is usually much
smaller than `hidden_size`.
hidden_size: The size of the transformer hidden layers.
num_layers: The number of transformer layers.
num_attention_heads: The number of attention heads for each transformer. The
hidden size must be divisible by the number of attention heads.
max_sequence_length: The maximum sequence length that this encoder can
consume. If None, max_sequence_length uses the value from sequence length.
This determines the variable shape for positional embeddings.
type_vocab_size: The number of types that the 'type_ids' input can take.
intermediate_size: The intermediate size for the transformer layers.
activation: The activation to use for the transformer layers.
dropout_rate: The dropout rate to use for the transformer layers.
attention_dropout_rate: The dropout rate to use for the attention layers
within the transformer layers.
initializer: The initialzer to use for all weights in this encoder.
dict_outputs: Whether to use a dictionary as the model outputs.
"""
def __init__(self,
vocab_size,
embedding_width=128,
hidden_size=768,
num_layers=12,
num_attention_heads=12,
max_sequence_length=512,
type_vocab_size=16,
intermediate_size=3072,
activation=activations.gelu,
dropout_rate=0.1,
attention_dropout_rate=0.1,
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
dict_outputs=False,
**kwargs):
activation = tf.keras.activations.get(activation)
initializer = tf.keras.initializers.get(initializer)
word_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_word_ids')
mask = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_mask')
type_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_type_ids')
if embedding_width is None:
embedding_width = hidden_size
embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=tf_utils.clone_initializer(initializer),
name='word_embeddings')
word_embeddings = embedding_layer(word_ids)
# Always uses dynamic slicing for simplicity.
position_embedding_layer = layers.PositionEmbedding(
initializer=tf_utils.clone_initializer(initializer),
max_length=max_sequence_length,
name='position_embedding')
position_embeddings = position_embedding_layer(word_embeddings)
type_embeddings = (
layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=tf_utils.clone_initializer(initializer),
use_one_hot=True,
name='type_embeddings')(type_ids))
embeddings = tf.keras.layers.Add()(
[word_embeddings, position_embeddings, type_embeddings])
embeddings = (
tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm',
axis=-1,
epsilon=1e-12,
dtype=tf.float32)(embeddings))
embeddings = (tf.keras.layers.Dropout(rate=dropout_rate)(embeddings))
# We project the 'embedding' output to 'hidden_size' if it is not already
# 'hidden_size'.
if embedding_width != hidden_size:
embeddings = tf.keras.layers.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
kernel_initializer=tf_utils.clone_initializer(initializer),
name='embedding_projection')(
embeddings)
data = embeddings
attention_mask = layers.SelfAttentionMask()(data, mask)
shared_layer = layers.TransformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=intermediate_size,
inner_activation=activation,
output_dropout=dropout_rate,
attention_dropout=attention_dropout_rate,
kernel_initializer=tf_utils.clone_initializer(initializer),
name='transformer')
encoder_outputs = []
for _ in range(num_layers):
data = shared_layer([data, attention_mask])
encoder_outputs.append(data)
# Applying a tf.slice op (through subscript notation) to a Keras tensor
# like this will create a SliceOpLambda layer. This is better than a Lambda
# layer with Python code, because that is fundamentally less portable.
first_token_tensor = data[:, 0, :]
cls_output = tf.keras.layers.Dense(
units=hidden_size,
activation='tanh',
kernel_initializer=tf_utils.clone_initializer(initializer),
name='pooler_transform')(
first_token_tensor)
if dict_outputs:
outputs = dict(
sequence_output=data,
encoder_outputs=encoder_outputs,
pooled_output=cls_output,
)
else:
outputs = [data, cls_output]
# b/164516224
# Once we've created the network using the Functional API, we call
# super().__init__ as though we were invoking the Functional API Model
# constructor, resulting in this object having all the properties of a model
# created using the Functional API. Once super().__init__ is called, we
# can assign attributes to `self` - note that all `self` assignments are
# below this line.
super().__init__(
inputs=[word_ids, mask, type_ids], outputs=outputs, **kwargs)
config_dict = {
'vocab_size': vocab_size,
'embedding_width': embedding_width,
'hidden_size': hidden_size,
'num_layers': num_layers,
'num_attention_heads': num_attention_heads,
'max_sequence_length': max_sequence_length,
'type_vocab_size': type_vocab_size,
'intermediate_size': intermediate_size,
'activation': tf.keras.activations.serialize(activation),
'dropout_rate': dropout_rate,
'attention_dropout_rate': attention_dropout_rate,
'initializer': tf.keras.initializers.serialize(initializer),
}
# We are storing the config dict as a namedtuple here to ensure checkpoint
# compatibility with an earlier version of this model which did not track
# the config dict attribute. TF does not track immutable attrs which
# do not contain Trackables, so by creating a config namedtuple instead of
# a dict we avoid tracking it.
config_cls = collections.namedtuple('Config', config_dict.keys())
self._config = config_cls(**config_dict)
self._embedding_layer = embedding_layer
self._position_embedding_layer = position_embedding_layer
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_config(self):
return dict(self._config._asdict())
@classmethod
def from_config(cls, config):
return cls(**config)
| 8,877 | 40.877358 | 80 | py |
models | models-master/official/nlp/modeling/networks/mobile_bert_encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MobileBERT text encoder network."""
import gin
import tensorflow as tf
from official.nlp.modeling import layers
@gin.configurable
class MobileBERTEncoder(tf.keras.Model):
"""A Keras functional API implementation for MobileBERT encoder."""
def __init__(self,
word_vocab_size=30522,
word_embed_size=128,
type_vocab_size=2,
max_sequence_length=512,
num_blocks=24,
hidden_size=512,
num_attention_heads=4,
intermediate_size=512,
intermediate_act_fn='relu',
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
intra_bottleneck_size=128,
initializer_range=0.02,
use_bottleneck_attention=False,
key_query_shared_bottleneck=True,
num_feedforward_networks=4,
normalization_type='no_norm',
classifier_activation=False,
input_mask_dtype='int32',
**kwargs):
"""Class initialization.
Args:
word_vocab_size: Number of words in the vocabulary.
word_embed_size: Word embedding size.
type_vocab_size: Number of word types.
max_sequence_length: Maximum length of input sequence.
num_blocks: Number of transformer block in the encoder model.
hidden_size: Hidden size for the transformer block.
num_attention_heads: Number of attention heads in the transformer block.
intermediate_size: The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: Dropout probability for the hidden layers.
attention_probs_dropout_prob: Dropout probability of the attention
probabilities.
intra_bottleneck_size: Size of bottleneck.
initializer_range: The stddev of the `truncated_normal_initializer` for
initializing all weight matrices.
use_bottleneck_attention: Use attention inputs from the bottleneck
transformation. If true, the following `key_query_shared_bottleneck`
will be ignored.
key_query_shared_bottleneck: Whether to share linear transformation for
keys and queries.
num_feedforward_networks: Number of stacked feed-forward networks.
normalization_type: The type of normalization_type, only `no_norm` and
`layer_norm` are supported. `no_norm` represents the element-wise linear
transformation for the student model, as suggested by the original
MobileBERT paper. `layer_norm` is used for the teacher model.
classifier_activation: If using the tanh activation for the final
representation of the `[CLS]` token in fine-tuning.
input_mask_dtype: The dtype of `input_mask` tensor, which is one of the
input tensors of this encoder. Defaults to `int32`. If you want
to use `tf.lite` quantization, which does not support `Cast` op,
please set this argument to `tf.float32` and feed `input_mask`
tensor with values in `float32` to avoid `tf.cast` in the computation.
**kwargs: Other keyworded and arguments.
"""
self._self_setattr_tracking = False
initializer = tf.keras.initializers.TruncatedNormal(
stddev=initializer_range)
# layer instantiation
self.embedding_layer = layers.MobileBertEmbedding(
word_vocab_size=word_vocab_size,
word_embed_size=word_embed_size,
type_vocab_size=type_vocab_size,
output_embed_size=hidden_size,
max_sequence_length=max_sequence_length,
normalization_type=normalization_type,
initializer=initializer,
dropout_rate=hidden_dropout_prob)
self._transformer_layers = []
for layer_idx in range(num_blocks):
transformer = layers.MobileBertTransformer(
hidden_size=hidden_size,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
intermediate_act_fn=intermediate_act_fn,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
intra_bottleneck_size=intra_bottleneck_size,
use_bottleneck_attention=use_bottleneck_attention,
key_query_shared_bottleneck=key_query_shared_bottleneck,
num_feedforward_networks=num_feedforward_networks,
normalization_type=normalization_type,
initializer=initializer,
name=f'transformer_layer_{layer_idx}')
self._transformer_layers.append(transformer)
# input tensor
input_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_word_ids')
input_mask = tf.keras.layers.Input(
shape=(None,), dtype=input_mask_dtype, name='input_mask')
type_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_type_ids')
self.inputs = [input_ids, input_mask, type_ids]
# The dtype of `attention_mask` will the same as the dtype of `input_mask`.
attention_mask = layers.SelfAttentionMask()(input_mask, input_mask)
# build the computation graph
all_layer_outputs = []
all_attention_scores = []
embedding_output = self.embedding_layer(input_ids, type_ids)
all_layer_outputs.append(embedding_output)
prev_output = embedding_output
for layer_idx in range(num_blocks):
layer_output, attention_score = self._transformer_layers[layer_idx](
prev_output,
attention_mask,
return_attention_scores=True)
all_layer_outputs.append(layer_output)
all_attention_scores.append(attention_score)
prev_output = layer_output
first_token = tf.squeeze(prev_output[:, 0:1, :], axis=1)
if classifier_activation:
self._pooler_layer = tf.keras.layers.EinsumDense(
'ab,bc->ac',
output_shape=hidden_size,
activation=tf.tanh,
bias_axes='c',
kernel_initializer=initializer,
name='pooler')
first_token = self._pooler_layer(first_token)
else:
self._pooler_layer = None
outputs = dict(
sequence_output=prev_output,
pooled_output=first_token,
encoder_outputs=all_layer_outputs,
attention_scores=all_attention_scores)
super().__init__(
inputs=self.inputs, outputs=outputs, **kwargs)
def get_embedding_table(self):
return self.embedding_layer.word_embedding.embeddings
def get_embedding_layer(self):
return self.embedding_layer.word_embedding
@property
def transformer_layers(self):
"""List of Transformer layers in the encoder."""
return self._transformer_layers
@property
def pooler_layer(self):
"""The pooler dense layer after the transformer layers."""
return self._pooler_layer
| 7,533 | 39.945652 | 80 | py |
models | models-master/official/nlp/modeling/networks/bert_encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer-based BERT encoder network."""
# pylint: disable=g-classes-have-attributes
from typing import Any, Callable, Optional, Union
from absl import logging
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling import layers
_Initializer = Union[str, tf.keras.initializers.Initializer]
_Activation = Union[str, Callable[..., Any]]
_approx_gelu = lambda x: tf.keras.activations.gelu(x, approximate=True)
class BertEncoderV2(tf.keras.layers.Layer):
"""Bi-directional Transformer-based encoder network.
This network implements a bi-directional Transformer-based encoder as
described in "BERT: Pre-training of Deep Bidirectional Transformers for
Language Understanding" (https://arxiv.org/abs/1810.04805). It includes the
embedding lookups and transformer layers, but not the masked language model
or classification task networks.
The default values for this object are taken from the BERT-Base implementation
in "BERT: Pre-training of Deep Bidirectional Transformers for Language
Understanding".
Args:
vocab_size: The size of the token vocabulary.
hidden_size: The size of the transformer hidden layers.
num_layers: The number of transformer layers.
num_attention_heads: The number of attention heads for each transformer. The
hidden size must be divisible by the number of attention heads.
max_sequence_length: The maximum sequence length that this encoder can
consume. This determines the variable shape for positional embeddings.
type_vocab_size: The number of types that the 'type_ids' input can take.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network for each transformer.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network for each transformer.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: The dropout rate to use for the attention layers within
the transformer layers.
initializer: The initialzer to use for all weights in this encoder.
output_range: The sequence output range, [0, output_range), by slicing the
target sequence of the last transformer layer. `None` means the entire
target sequence will attend to the source sequence, which yields the full
output.
embedding_width: The width of the word embeddings. If the embedding width is
not equal to hidden size, embedding parameters will be factorized into two
matrices in the shape of ['vocab_size', 'embedding_width'] and
['embedding_width', 'hidden_size'] ('embedding_width' is usually much
smaller than 'hidden_size').
embedding_layer: An optional Layer instance which will be called to generate
embeddings for the input word IDs.
norm_first: Whether to normalize inputs to attention and intermediate dense
layers. If set False, output of attention and intermediate dense layers is
normalized.
with_dense_inputs: Whether to accept dense embeddings as the input.
return_attention_scores: Whether to add an additional output containing the
attention scores of all transformer layers. This will be a list of length
`num_layers`, and each element will be in the shape [batch_size,
num_attention_heads, seq_dim, seq_dim].
"""
def __init__(
self,
vocab_size: int,
hidden_size: int = 768,
num_layers: int = 12,
num_attention_heads: int = 12,
max_sequence_length: int = 512,
type_vocab_size: int = 16,
inner_dim: int = 3072,
inner_activation: _Activation = _approx_gelu,
output_dropout: float = 0.1,
attention_dropout: float = 0.1,
initializer: _Initializer = tf.keras.initializers.TruncatedNormal(
stddev=0.02),
output_range: Optional[int] = None,
embedding_width: Optional[int] = None,
embedding_layer: Optional[tf.keras.layers.Layer] = None,
norm_first: bool = False,
with_dense_inputs: bool = False,
return_attention_scores: bool = False,
**kwargs):
# Pops kwargs that are used in V1 implementation.
if 'dict_outputs' in kwargs:
kwargs.pop('dict_outputs')
if 'return_all_encoder_outputs' in kwargs:
kwargs.pop('return_all_encoder_outputs')
if 'intermediate_size' in kwargs:
inner_dim = kwargs.pop('intermediate_size')
if 'activation' in kwargs:
inner_activation = kwargs.pop('activation')
if 'dropout_rate' in kwargs:
output_dropout = kwargs.pop('dropout_rate')
if 'attention_dropout_rate' in kwargs:
attention_dropout = kwargs.pop('attention_dropout_rate')
super().__init__(**kwargs)
self._output_range = output_range
activation = tf.keras.activations.get(inner_activation)
initializer = tf.keras.initializers.get(initializer)
if embedding_width is None:
embedding_width = hidden_size
if embedding_layer is None:
self._embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=tf_utils.clone_initializer(initializer),
name='word_embeddings')
else:
self._embedding_layer = embedding_layer
self._position_embedding_layer = layers.PositionEmbedding(
initializer=tf_utils.clone_initializer(initializer),
max_length=max_sequence_length,
name='position_embedding')
self._type_embedding_layer = layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=tf_utils.clone_initializer(initializer),
use_one_hot=True,
name='type_embeddings')
self._embedding_norm_layer = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)
self._embedding_dropout = tf.keras.layers.Dropout(
rate=output_dropout, name='embedding_dropout')
# We project the 'embedding' output to 'hidden_size' if it is not already
# 'hidden_size'.
self._embedding_projection = None
if embedding_width != hidden_size:
self._embedding_projection = tf.keras.layers.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
kernel_initializer=tf_utils.clone_initializer(initializer),
name='embedding_projection')
self._transformer_layers = []
self._attention_mask_layer = layers.SelfAttentionMask(
name='self_attention_mask')
self._num_layers = num_layers
for i in range(num_layers):
layer = layers.TransformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=inner_dim,
inner_activation=inner_activation,
output_dropout=output_dropout,
attention_dropout=attention_dropout,
norm_first=norm_first,
return_attention_scores=return_attention_scores,
kernel_initializer=tf_utils.clone_initializer(initializer),
name='transformer/layer_%d' % i)
self._transformer_layers.append(layer)
self._pooler_layer = tf.keras.layers.Dense(
units=hidden_size,
activation='tanh',
kernel_initializer=tf_utils.clone_initializer(initializer),
name='pooler_transform')
self._config = {
'vocab_size': vocab_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'num_attention_heads': num_attention_heads,
'max_sequence_length': max_sequence_length,
'type_vocab_size': type_vocab_size,
'inner_dim': inner_dim,
'inner_activation': tf_utils.serialize_activation(
activation, use_legacy_format=True
),
'output_dropout': output_dropout,
'attention_dropout': attention_dropout,
'initializer': tf_utils.serialize_initializer(
initializer, use_legacy_format=True
),
'output_range': output_range,
'embedding_width': embedding_width,
'embedding_layer': embedding_layer,
'norm_first': norm_first,
'with_dense_inputs': with_dense_inputs,
'return_attention_scores': return_attention_scores,
}
if with_dense_inputs:
self.inputs = dict(
input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
dense_inputs=tf.keras.Input(
shape=(None, embedding_width), dtype=tf.float32),
dense_mask=tf.keras.Input(shape=(None,), dtype=tf.int32),
dense_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
)
else:
self.inputs = dict(
input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32))
def call(self, inputs):
word_embeddings = None
if isinstance(inputs, dict):
word_ids = inputs.get('input_word_ids')
mask = inputs.get('input_mask')
type_ids = inputs.get('input_type_ids')
word_embeddings = inputs.get('input_word_embeddings', None)
dense_inputs = inputs.get('dense_inputs', None)
dense_mask = inputs.get('dense_mask', None)
dense_type_ids = inputs.get('dense_type_ids', None)
else:
raise ValueError('Unexpected inputs type to %s.' % self.__class__)
if word_embeddings is None:
word_embeddings = self._embedding_layer(word_ids)
if dense_inputs is not None:
mask = tf.concat([mask, dense_mask], axis=1)
embeddings = self._get_embeddings(word_ids, type_ids, word_embeddings,
dense_inputs, dense_type_ids)
embeddings = self._embedding_norm_layer(embeddings)
embeddings = self._embedding_dropout(embeddings)
if self._embedding_projection is not None:
embeddings = self._embedding_projection(embeddings)
attention_mask = self._attention_mask_layer(embeddings, mask)
encoder_outputs = []
attention_outputs = []
x = embeddings
for i, layer in enumerate(self._transformer_layers):
transformer_output_range = None
if i == self._num_layers - 1:
transformer_output_range = self._output_range
x = layer([x, attention_mask], output_range=transformer_output_range)
if self._config['return_attention_scores']:
x, attention_scores = x
attention_outputs.append(attention_scores)
encoder_outputs.append(x)
last_encoder_output = encoder_outputs[-1]
first_token_tensor = last_encoder_output[:, 0, :]
pooled_output = self._pooler_layer(first_token_tensor)
output = dict(
sequence_output=encoder_outputs[-1],
pooled_output=pooled_output,
encoder_outputs=encoder_outputs)
if self._config['return_attention_scores']:
output['attention_scores'] = attention_outputs
return output
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_embedding_layer(self):
return self._embedding_layer
def get_config(self):
return dict(self._config)
@property
def transformer_layers(self):
"""List of Transformer layers in the encoder."""
return self._transformer_layers
@property
def pooler_layer(self):
"""The pooler dense layer after the transformer layers."""
return self._pooler_layer
@classmethod
def from_config(cls, config, custom_objects=None):
if 'embedding_layer' in config and config['embedding_layer'] is not None:
warn_string = (
'You are reloading a model that was saved with a '
'potentially-shared embedding layer object. If you contine to '
'train this model, the embedding layer will no longer be shared. '
'To work around this, load the model outside of the Keras API.')
print('WARNING: ' + warn_string)
logging.warn(warn_string)
return cls(**config)
def _get_embeddings(self, word_ids: tf.Tensor, type_ids: tf.Tensor,
word_embeddings: Optional[tf.Tensor],
dense_inputs: Optional[tf.Tensor],
dense_type_ids: Optional[tf.Tensor]) -> tf.Tensor:
if word_embeddings is None:
word_embeddings = self._embedding_layer(word_ids)
if dense_inputs is not None:
# Concat the dense embeddings at sequence end.
word_embeddings = tf.concat([word_embeddings, dense_inputs], axis=1)
type_ids = tf.concat([type_ids, dense_type_ids], axis=1)
type_embeddings = self._type_embedding_layer(type_ids)
# absolute position embeddings.
position_embeddings = self._position_embedding_layer(word_embeddings)
return word_embeddings + position_embeddings + type_embeddings
@tf.keras.utils.register_keras_serializable(package='Text')
class BertEncoder(tf.keras.Model):
"""Bi-directional Transformer-based encoder network.
This network implements a bi-directional Transformer-based encoder as
described in "BERT: Pre-training of Deep Bidirectional Transformers for
Language Understanding" (https://arxiv.org/abs/1810.04805). It includes the
embedding lookups and transformer layers, but not the masked language model
or classification task networks.
The default values for this object are taken from the BERT-Base implementation
in "BERT: Pre-training of Deep Bidirectional Transformers for Language
Understanding".
*Note* that the network is constructed by
[Keras Functional API](https://keras.io/guides/functional_api/).
Args:
vocab_size: The size of the token vocabulary.
hidden_size: The size of the transformer hidden layers.
num_layers: The number of transformer layers.
num_attention_heads: The number of attention heads for each transformer. The
hidden size must be divisible by the number of attention heads.
max_sequence_length: The maximum sequence length that this encoder can
consume. If None, max_sequence_length uses the value from sequence length.
This determines the variable shape for positional embeddings.
type_vocab_size: The number of types that the 'type_ids' input can take.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network for each transformer.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network for each transformer.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: The dropout rate to use for the attention layers within
the transformer layers.
initializer: The initialzer to use for all weights in this encoder.
output_range: The sequence output range, [0, output_range), by slicing the
target sequence of the last transformer layer. `None` means the entire
target sequence will attend to the source sequence, which yields the full
output.
embedding_width: The width of the word embeddings. If the embedding width is
not equal to hidden size, embedding parameters will be factorized into two
matrices in the shape of ['vocab_size', 'embedding_width'] and
['embedding_width', 'hidden_size'] ('embedding_width' is usually much
smaller than 'hidden_size').
embedding_layer: An optional Layer instance which will be called to generate
embeddings for the input word IDs.
norm_first: Whether to normalize inputs to attention and intermediate dense
layers. If set False, output of attention and intermediate dense layers is
normalized.
dict_outputs: Whether to use a dictionary as the model outputs.
return_all_encoder_outputs: Whether to output sequence embedding outputs of
all encoder transformer layers. Note: when the following `dict_outputs`
argument is True, all encoder outputs are always returned in the dict,
keyed by `encoder_outputs`.
return_attention_scores: Whether to add an additional output containing the
attention scores of all transformer layers. This will be a list of length
`num_layers`, and each element will be in the shape [batch_size,
num_attention_heads, seq_dim, seq_dim].
"""
def __init__(
self,
vocab_size,
hidden_size=768,
num_layers=12,
num_attention_heads=12,
max_sequence_length=512,
type_vocab_size=16,
inner_dim=3072,
inner_activation=lambda x: tf.keras.activations.gelu(x, approximate=True),
output_dropout=0.1,
attention_dropout=0.1,
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
output_range=None,
embedding_width=None,
embedding_layer=None,
norm_first=False,
dict_outputs=False,
return_all_encoder_outputs=False,
return_attention_scores: bool = False,
**kwargs):
if 'sequence_length' in kwargs:
kwargs.pop('sequence_length')
logging.warning('`sequence_length` is a deprecated argument to '
'`BertEncoder`, which has no effect for a while. Please '
'remove `sequence_length` argument.')
# Handles backward compatible kwargs.
if 'intermediate_size' in kwargs:
inner_dim = kwargs.pop('intermediate_size')
if 'activation' in kwargs:
inner_activation = kwargs.pop('activation')
if 'dropout_rate' in kwargs:
output_dropout = kwargs.pop('dropout_rate')
if 'attention_dropout_rate' in kwargs:
attention_dropout = kwargs.pop('attention_dropout_rate')
activation = tf.keras.activations.get(inner_activation)
initializer = tf.keras.initializers.get(initializer)
word_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_word_ids')
mask = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_mask')
type_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_type_ids')
if embedding_width is None:
embedding_width = hidden_size
if embedding_layer is None:
embedding_layer_inst = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=tf_utils.clone_initializer(initializer),
name='word_embeddings')
else:
embedding_layer_inst = embedding_layer
word_embeddings = embedding_layer_inst(word_ids)
# Always uses dynamic slicing for simplicity.
position_embedding_layer = layers.PositionEmbedding(
initializer=tf_utils.clone_initializer(initializer),
max_length=max_sequence_length,
name='position_embedding')
position_embeddings = position_embedding_layer(word_embeddings)
type_embedding_layer = layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=tf_utils.clone_initializer(initializer),
use_one_hot=True,
name='type_embeddings')
type_embeddings = type_embedding_layer(type_ids)
embeddings = tf.keras.layers.Add()(
[word_embeddings, position_embeddings, type_embeddings])
embedding_norm_layer = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)
embeddings = embedding_norm_layer(embeddings)
embeddings = (tf.keras.layers.Dropout(rate=output_dropout)(embeddings))
# We project the 'embedding' output to 'hidden_size' if it is not already
# 'hidden_size'.
if embedding_width != hidden_size:
embedding_projection = tf.keras.layers.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
kernel_initializer=tf_utils.clone_initializer(initializer),
name='embedding_projection')
embeddings = embedding_projection(embeddings)
else:
embedding_projection = None
transformer_layers = []
data = embeddings
attention_mask = layers.SelfAttentionMask()(data, mask)
encoder_outputs = []
attention_outputs = []
for i in range(num_layers):
transformer_output_range = None
if i == num_layers - 1:
transformer_output_range = output_range
layer = layers.TransformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=inner_dim,
inner_activation=inner_activation,
output_dropout=output_dropout,
attention_dropout=attention_dropout,
norm_first=norm_first,
return_attention_scores=return_attention_scores,
kernel_initializer=tf_utils.clone_initializer(initializer),
name='transformer/layer_%d' % i)
transformer_layers.append(layer)
data = layer([data, attention_mask],
output_range=transformer_output_range)
if return_attention_scores:
data, attention_scores = data
attention_outputs.append(attention_scores)
encoder_outputs.append(data)
last_encoder_output = encoder_outputs[-1]
# Applying a tf.slice op (through subscript notation) to a Keras tensor
# like this will create a SliceOpLambda layer. This is better than a Lambda
# layer with Python code, because that is fundamentally less portable.
first_token_tensor = last_encoder_output[:, 0, :]
pooler_layer = tf.keras.layers.Dense(
units=hidden_size,
activation='tanh',
kernel_initializer=tf_utils.clone_initializer(initializer),
name='pooler_transform')
cls_output = pooler_layer(first_token_tensor)
outputs = dict(
sequence_output=encoder_outputs[-1],
pooled_output=cls_output,
encoder_outputs=encoder_outputs,
)
if return_attention_scores:
outputs['attention_scores'] = attention_outputs
if dict_outputs:
super().__init__(
inputs=[word_ids, mask, type_ids], outputs=outputs, **kwargs)
else:
cls_output = outputs['pooled_output']
if return_all_encoder_outputs:
encoder_outputs = outputs['encoder_outputs']
outputs = [encoder_outputs, cls_output]
else:
sequence_output = outputs['sequence_output']
outputs = [sequence_output, cls_output]
if return_attention_scores:
outputs.append(attention_outputs)
super().__init__( # pylint: disable=bad-super-call
inputs=[word_ids, mask, type_ids],
outputs=outputs,
**kwargs)
self._pooler_layer = pooler_layer
self._transformer_layers = transformer_layers
self._embedding_norm_layer = embedding_norm_layer
self._embedding_layer = embedding_layer_inst
self._position_embedding_layer = position_embedding_layer
self._type_embedding_layer = type_embedding_layer
if embedding_projection is not None:
self._embedding_projection = embedding_projection
config_dict = {
'vocab_size': vocab_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'num_attention_heads': num_attention_heads,
'max_sequence_length': max_sequence_length,
'type_vocab_size': type_vocab_size,
'inner_dim': inner_dim,
'inner_activation': tf_utils.serialize_activation(
activation, use_legacy_format=True
),
'output_dropout': output_dropout,
'attention_dropout': attention_dropout,
'initializer': tf_utils.serialize_initializer(
initializer, use_legacy_format=True
),
'output_range': output_range,
'embedding_width': embedding_width,
'embedding_layer': embedding_layer,
'norm_first': norm_first,
'dict_outputs': dict_outputs,
'return_attention_scores': return_attention_scores,
}
# pylint: disable=protected-access
self._setattr_tracking = False
self._config = config_dict
self._setattr_tracking = True
# pylint: enable=protected-access
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_embedding_layer(self):
return self._embedding_layer
def get_config(self):
return self._config
@property
def transformer_layers(self):
"""List of Transformer layers in the encoder."""
return self._transformer_layers
@property
def pooler_layer(self):
"""The pooler dense layer after the transformer layers."""
return self._pooler_layer
@classmethod
def from_config(cls, config, custom_objects=None):
if 'embedding_layer' in config and config['embedding_layer'] is not None:
warn_string = (
'You are reloading a model that was saved with a '
'potentially-shared embedding layer object. If you contine to '
'train this model, the embedding layer will no longer be shared. '
'To work around this, load the model outside of the Keras API.')
print('WARNING: ' + warn_string)
logging.warn(warn_string)
return cls(**config)
| 25,604 | 39.837321 | 80 | py |
models | models-master/official/nlp/modeling/networks/classification_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for classification network."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.networks import classification
class ClassificationTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(1, 10)
def test_network_creation(self, num_classes):
"""Validate that the Keras object can be created."""
input_width = 512
test_object = classification.Classification(
input_width=input_width, num_classes=num_classes)
# Create a 2-dimensional input (the first dimension is implicit).
cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32)
output = test_object(cls_data)
# Validate that the outputs are of the expected shape.
expected_output_shape = [None, num_classes]
self.assertEqual(expected_output_shape, output.shape.as_list())
@parameterized.parameters(1, 10)
def test_network_invocation(self, num_classes):
"""Validate that the Keras object can be invoked."""
input_width = 512
test_object = classification.Classification(
input_width=input_width, num_classes=num_classes, output='predictions')
# Create a 2-dimensional input (the first dimension is implicit).
cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32)
output = test_object(cls_data)
# Invoke the network as part of a Model.
model = tf.keras.Model(cls_data, output)
input_data = 10 * np.random.random_sample((3, input_width))
_ = model.predict(input_data)
def test_network_invocation_with_internal_logits(self):
"""Validate that the logit outputs are correct."""
input_width = 512
num_classes = 10
test_object = classification.Classification(
input_width=input_width, num_classes=num_classes, output='predictions')
# Create a 2-dimensional input (the first dimension is implicit).
cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32)
output = test_object(cls_data)
model = tf.keras.Model(cls_data, output)
logits_model = tf.keras.Model(test_object.inputs, test_object.logits)
batch_size = 3
input_data = 10 * np.random.random_sample((batch_size, input_width))
outputs = model.predict(input_data)
logits = logits_model.predict(input_data)
# Ensure that the tensor shapes are correct.
expected_output_shape = (batch_size, num_classes)
self.assertEqual(expected_output_shape, outputs.shape)
self.assertEqual(expected_output_shape, logits.shape)
# Ensure that the logits, when softmaxed, create the outputs.
input_tensor = tf.keras.Input(expected_output_shape[1:])
output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor)
softmax_model = tf.keras.Model(input_tensor, output_tensor)
calculated_softmax = softmax_model.predict(logits)
self.assertAllClose(outputs, calculated_softmax)
@parameterized.parameters(1, 10)
def test_network_invocation_with_internal_and_external_logits(
self, num_classes):
"""Validate that the logit outputs are correct."""
input_width = 512
test_object = classification.Classification(
input_width=input_width, num_classes=num_classes, output='logits')
# Create a 2-dimensional input (the first dimension is implicit).
cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32)
output = test_object(cls_data)
model = tf.keras.Model(cls_data, output)
logits_model = tf.keras.Model(test_object.inputs, test_object.logits)
batch_size = 3
input_data = 10 * np.random.random_sample((batch_size, input_width))
outputs = model.predict(input_data)
logits = logits_model.predict(input_data)
# Ensure that the tensor shapes are correct.
expected_output_shape = (batch_size, num_classes)
self.assertEqual(expected_output_shape, outputs.shape)
self.assertEqual(expected_output_shape, logits.shape)
self.assertAllClose(outputs, logits)
def test_network_invocation_with_logit_output(self):
"""Validate that the logit outputs are correct."""
input_width = 512
num_classes = 10
test_object = classification.Classification(
input_width=input_width, num_classes=num_classes, output='predictions')
logit_object = classification.Classification(
input_width=input_width, num_classes=num_classes, output='logits')
logit_object.set_weights(test_object.get_weights())
# Create a 2-dimensional input (the first dimension is implicit).
cls_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32)
output = test_object(cls_data)
logit_output = logit_object(cls_data)
model = tf.keras.Model(cls_data, output)
logits_model = tf.keras.Model(cls_data, logit_output)
batch_size = 3
input_data = 10 * np.random.random_sample((batch_size, input_width))
outputs = model.predict(input_data)
logits = logits_model.predict(input_data)
# Ensure that the tensor shapes are correct.
expected_output_shape = (batch_size, num_classes)
self.assertEqual(expected_output_shape, outputs.shape)
self.assertEqual(expected_output_shape, logits.shape)
# Ensure that the logits, when softmaxed, create the outputs.
input_tensor = tf.keras.Input(expected_output_shape[1:])
output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor)
softmax_model = tf.keras.Model(input_tensor, output_tensor)
calculated_softmax = softmax_model.predict(logits)
self.assertAllClose(outputs, calculated_softmax)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
network = classification.Classification(
input_width=128,
num_classes=10,
initializer='zeros',
output='predictions')
# Create another network object from the first object's config.
new_network = classification.Classification.from_config(
network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
def test_unknown_output_type_fails(self):
with self.assertRaisesRegex(ValueError, 'Unknown `output` value "bad".*'):
_ = classification.Classification(
input_width=128, num_classes=10, output='bad')
if __name__ == '__main__':
tf.test.main()
| 7,061 | 39.586207 | 79 | py |
models | models-master/official/nlp/modeling/networks/span_labeling_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for span_labeling network."""
import numpy as np
import tensorflow as tf
from official.nlp.modeling.networks import span_labeling
class SpanLabelingTest(tf.test.TestCase):
def test_network_creation(self):
"""Validate that the Keras object can be created."""
sequence_length = 15
input_width = 512
test_network = span_labeling.SpanLabeling(
input_width=input_width, output='predictions')
# Create a 3-dimensional input (the first dimension is implicit).
sequence_data = tf.keras.Input(
shape=(sequence_length, input_width), dtype=tf.float32)
start_outputs, end_outputs = test_network(sequence_data)
# Validate that the outputs are of the expected shape.
expected_output_shape = [None, sequence_length]
self.assertEqual(expected_output_shape, start_outputs.shape.as_list())
self.assertEqual(expected_output_shape, end_outputs.shape.as_list())
def test_network_invocation(self):
"""Validate that the Keras object can be invoked."""
sequence_length = 15
input_width = 512
test_network = span_labeling.SpanLabeling(input_width=input_width)
# Create a 3-dimensional input (the first dimension is implicit).
sequence_data = tf.keras.Input(
shape=(sequence_length, input_width), dtype=tf.float32)
outputs = test_network(sequence_data)
model = tf.keras.Model(sequence_data, outputs)
# Invoke the network as part of a Model.
batch_size = 3
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, input_width))
start_outputs, end_outputs = model.predict(input_data)
# Validate that the outputs are of the expected shape.
expected_output_shape = (batch_size, sequence_length)
self.assertEqual(expected_output_shape, start_outputs.shape)
self.assertEqual(expected_output_shape, end_outputs.shape)
def test_network_invocation_with_internal_logit_output(self):
"""Validate that the logit outputs are correct."""
sequence_length = 15
input_width = 512
test_network = span_labeling.SpanLabeling(
input_width=input_width, output='predictions')
# Create a 3-dimensional input (the first dimension is implicit).
sequence_data = tf.keras.Input(
shape=(sequence_length, input_width), dtype=tf.float32)
output = test_network(sequence_data)
model = tf.keras.Model(sequence_data, output)
logit_model = tf.keras.Model(
test_network.inputs,
[test_network.start_logits, test_network.end_logits])
batch_size = 3
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, input_width))
start_outputs, end_outputs = model.predict(input_data)
start_logits, end_logits = logit_model.predict(input_data)
# Ensure that the tensor shapes are correct.
expected_output_shape = (batch_size, sequence_length)
self.assertEqual(expected_output_shape, start_outputs.shape)
self.assertEqual(expected_output_shape, end_outputs.shape)
self.assertEqual(expected_output_shape, start_logits.shape)
self.assertEqual(expected_output_shape, end_logits.shape)
# Ensure that the logits, when softmaxed, create the outputs.
input_tensor = tf.keras.Input(expected_output_shape[1:])
output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor)
softmax_model = tf.keras.Model(input_tensor, output_tensor)
start_softmax = softmax_model.predict(start_logits)
self.assertAllClose(start_outputs, start_softmax)
end_softmax = softmax_model.predict(end_logits)
self.assertAllClose(end_outputs, end_softmax)
def test_network_invocation_with_external_logit_output(self):
"""Validate that the logit outputs are correct."""
sequence_length = 15
input_width = 512
test_network = span_labeling.SpanLabeling(
input_width=input_width, output='predictions')
logit_network = span_labeling.SpanLabeling(
input_width=input_width, output='logits')
logit_network.set_weights(test_network.get_weights())
# Create a 3-dimensional input (the first dimension is implicit).
sequence_data = tf.keras.Input(
shape=(sequence_length, input_width), dtype=tf.float32)
output = test_network(sequence_data)
logit_output = logit_network(sequence_data)
model = tf.keras.Model(sequence_data, output)
logit_model = tf.keras.Model(sequence_data, logit_output)
batch_size = 3
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, input_width))
start_outputs, end_outputs = model.predict(input_data)
start_logits, end_logits = logit_model.predict(input_data)
# Ensure that the tensor shapes are correct.
expected_output_shape = (batch_size, sequence_length)
self.assertEqual(expected_output_shape, start_outputs.shape)
self.assertEqual(expected_output_shape, end_outputs.shape)
self.assertEqual(expected_output_shape, start_logits.shape)
self.assertEqual(expected_output_shape, end_logits.shape)
# Ensure that the logits, when softmaxed, create the outputs.
input_tensor = tf.keras.Input(expected_output_shape[1:])
output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor)
softmax_model = tf.keras.Model(input_tensor, output_tensor)
start_softmax = softmax_model.predict(start_logits)
self.assertAllClose(start_outputs, start_softmax)
end_softmax = softmax_model.predict(end_logits)
self.assertAllClose(end_outputs, end_softmax)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
network = span_labeling.SpanLabeling(
input_width=128,
activation='relu',
initializer='zeros',
output='predictions')
# Create another network object from the first object's config.
new_network = span_labeling.SpanLabeling.from_config(network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
def test_unknown_output_type_fails(self):
with self.assertRaisesRegex(ValueError, 'Unknown `output` value "bad".*'):
_ = span_labeling.SpanLabeling(input_width=10, output='bad')
class XLNetSpanLabelingTest(tf.test.TestCase):
def test_basic_invocation_train(self):
batch_size = 2
seq_length = 8
hidden_size = 4
sequence_data = np.random.uniform(
size=(batch_size, seq_length, hidden_size)).astype('float32')
paragraph_mask = np.random.uniform(
size=(batch_size, seq_length)).astype('float32')
class_index = np.random.uniform(size=(batch_size)).astype('uint8')
start_positions = np.zeros(shape=(batch_size)).astype('uint8')
layer = span_labeling.XLNetSpanLabeling(
input_width=hidden_size,
start_n_top=2,
end_n_top=2,
activation='tanh',
dropout_rate=0.,
initializer='glorot_uniform')
output = layer(sequence_data=sequence_data,
class_index=class_index,
paragraph_mask=paragraph_mask,
start_positions=start_positions,
training=True)
expected_keys = {
'start_logits', 'end_logits', 'class_logits', 'start_predictions',
'end_predictions',
}
self.assertSetEqual(expected_keys, set(output.keys()))
def test_basic_invocation_beam_search(self):
batch_size = 2
seq_length = 8
hidden_size = 4
top_n = 5
sequence_data = np.random.uniform(
size=(batch_size, seq_length, hidden_size)).astype('float32')
paragraph_mask = np.random.uniform(
size=(batch_size, seq_length)).astype('float32')
class_index = np.random.uniform(size=(batch_size)).astype('uint8')
layer = span_labeling.XLNetSpanLabeling(
input_width=hidden_size,
start_n_top=top_n,
end_n_top=top_n,
activation='tanh',
dropout_rate=0.,
initializer='glorot_uniform')
output = layer(sequence_data=sequence_data,
class_index=class_index,
paragraph_mask=paragraph_mask,
training=False)
expected_keys = {
'start_top_predictions', 'end_top_predictions', 'class_logits',
'start_top_index', 'end_top_index', 'start_logits',
'end_logits', 'start_predictions', 'end_predictions'
}
self.assertSetEqual(expected_keys, set(output.keys()))
def test_subclass_invocation(self):
"""Tests basic invocation of this layer wrapped in a subclass."""
seq_length = 8
hidden_size = 4
batch_size = 2
sequence_data = tf.keras.Input(shape=(seq_length, hidden_size),
dtype=tf.float32)
class_index = tf.keras.Input(shape=(), dtype=tf.uint8)
paragraph_mask = tf.keras.Input(shape=(seq_length), dtype=tf.float32)
start_positions = tf.keras.Input(shape=(), dtype=tf.int32)
layer = span_labeling.XLNetSpanLabeling(
input_width=hidden_size,
start_n_top=5,
end_n_top=5,
activation='tanh',
dropout_rate=0.,
initializer='glorot_uniform')
output = layer(sequence_data=sequence_data,
class_index=class_index,
paragraph_mask=paragraph_mask,
start_positions=start_positions)
model = tf.keras.Model(
inputs={
'sequence_data': sequence_data,
'class_index': class_index,
'paragraph_mask': paragraph_mask,
'start_positions': start_positions,
},
outputs=output)
sequence_data = tf.random.uniform(
shape=(batch_size, seq_length, hidden_size), dtype=tf.float32)
paragraph_mask = tf.random.uniform(
shape=(batch_size, seq_length), dtype=tf.float32)
class_index = tf.ones(shape=(batch_size,), dtype=tf.uint8)
start_positions = tf.random.uniform(
shape=(batch_size,), maxval=5, dtype=tf.int32)
inputs = dict(sequence_data=sequence_data,
paragraph_mask=paragraph_mask,
class_index=class_index,
start_positions=start_positions)
output = model(inputs)
self.assertIsInstance(output, dict)
# Test `call` without training flag.
output = model(inputs, training=False)
self.assertIsInstance(output, dict)
# Test `call` with training flag.
# Note: this fails due to incompatibility with the functional API.
with self.assertRaisesRegex(AssertionError,
'Could not compute output KerasTensor'):
model(inputs, training=True)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
network = span_labeling.XLNetSpanLabeling(
input_width=128,
start_n_top=5,
end_n_top=1,
activation='tanh',
dropout_rate=0.34,
initializer='zeros')
# Create another network object from the first object's config.
new_network = span_labeling.XLNetSpanLabeling.from_config(
network.get_config())
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 11,974 | 38.391447 | 79 | py |
models | models-master/official/nlp/modeling/models/bert_pretrainer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT Pre-training model."""
# pylint: disable=g-classes-have-attributes
import collections
import copy
from typing import List, Optional
from absl import logging
import gin
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling import layers
from official.nlp.modeling import networks
@tf.keras.utils.register_keras_serializable(package='Text')
class BertPretrainer(tf.keras.Model):
"""BERT pretraining model.
[Note] Please use the new `BertPretrainerV2` for your projects.
The BertPretrainer allows a user to pass in a transformer stack, and
instantiates the masked language model and classification networks that are
used to create the training objectives.
*Note* that the model is constructed by
[Keras Functional API](https://keras.io/guides/functional_api/).
Args:
network: A transformer network. This network should output a sequence output
and a classification output.
num_classes: Number of classes to predict from the classification network.
num_token_predictions: Number of tokens to predict from the masked LM.
embedding_table: Embedding table of a network. If None, the
"network.get_embedding_table()" is used.
activation: The activation (if any) to use in the masked LM network. If
None, no activation will be used.
initializer: The initializer (if any) to use in the masked LM and
classification networks. Defaults to a Glorot uniform initializer.
output: The output style for this network. Can be either `logits` or
`predictions`.
"""
def __init__(self,
network,
num_classes,
num_token_predictions,
embedding_table=None,
activation=None,
initializer='glorot_uniform',
output='logits',
**kwargs):
# We want to use the inputs of the passed network as the inputs to this
# Model. To do this, we need to keep a copy of the network inputs for use
# when we construct the Model object at the end of init. (We keep a copy
# because we'll be adding another tensor to the copy later.)
network_inputs = network.inputs
inputs = copy.copy(network_inputs)
# Because we have a copy of inputs to create this Model object, we can
# invoke the Network object with its own input tensors to start the Model.
# Note that, because of how deferred construction happens, we can't use
# the copy of the list here - by the time the network is invoked, the list
# object contains the additional input added below.
sequence_output, cls_output = network(network_inputs)
# The encoder network may get outputs from all layers.
if isinstance(sequence_output, list):
sequence_output = sequence_output[-1]
if isinstance(cls_output, list):
cls_output = cls_output[-1]
sequence_output_length = sequence_output.shape.as_list()[1]
if sequence_output_length is not None and (sequence_output_length <
num_token_predictions):
raise ValueError(
"The passed network's output length is %s, which is less than the "
'requested num_token_predictions %s.' %
(sequence_output_length, num_token_predictions))
masked_lm_positions = tf.keras.layers.Input(
shape=(num_token_predictions,),
name='masked_lm_positions',
dtype=tf.int32)
inputs.append(masked_lm_positions)
if embedding_table is None:
embedding_table = network.get_embedding_table()
masked_lm = layers.MaskedLM(
embedding_table=embedding_table,
activation=activation,
initializer=tf_utils.clone_initializer(initializer),
output=output,
name='cls/predictions')
lm_outputs = masked_lm(
sequence_output, masked_positions=masked_lm_positions)
classification = networks.Classification(
input_width=cls_output.shape[-1],
num_classes=num_classes,
initializer=tf_utils.clone_initializer(initializer),
output=output,
name='classification')
sentence_outputs = classification(cls_output)
super(BertPretrainer, self).__init__(
inputs=inputs,
outputs=dict(masked_lm=lm_outputs, classification=sentence_outputs),
**kwargs)
# b/164516224
# Once we've created the network using the Functional API, we call
# super().__init__ as though we were invoking the Functional API Model
# constructor, resulting in this object having all the properties of a model
# created using the Functional API. Once super().__init__ is called, we
# can assign attributes to `self` - note that all `self` assignments are
# below this line.
config_dict = {
'network': network,
'num_classes': num_classes,
'num_token_predictions': num_token_predictions,
'activation': activation,
'initializer': initializer,
'output': output,
}
# We are storing the config dict as a namedtuple here to ensure checkpoint
# compatibility with an earlier version of this model which did not track
# the config dict attribute. TF does not track immutable attrs which
# do not contain Trackables, so by creating a config namedtuple instead of
# a dict we avoid tracking it.
config_cls = collections.namedtuple('Config', config_dict.keys())
self._config = config_cls(**config_dict)
self.encoder = network
self.classification = classification
self.masked_lm = masked_lm
def get_config(self):
return dict(self._config._asdict())
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@tf.keras.utils.register_keras_serializable(package='Text')
@gin.configurable
class BertPretrainerV2(tf.keras.Model):
"""BERT pretraining model V2.
Adds the masked language model head and optional classification heads upon the
transformer encoder.
Args:
encoder_network: A transformer network. This network should output a
sequence output and a classification output.
mlm_activation: The activation (if any) to use in the masked LM network. If
None, no activation will be used.
mlm_initializer: The initializer (if any) to use in the masked LM. Default
to a Glorot uniform initializer.
classification_heads: A list of optional head layers to transform on encoder
sequence outputs.
customized_masked_lm: A customized masked_lm layer. If None, will create
a standard layer from `layers.MaskedLM`; if not None, will use the
specified masked_lm layer. Above arguments `mlm_activation` and
`mlm_initializer` will be ignored.
name: The name of the model.
Inputs: Inputs defined by the encoder network, plus `masked_lm_positions` as a
dictionary.
Outputs: A dictionary of `lm_output`, classification head outputs keyed by
head names, and also outputs from `encoder_network`, keyed by
`sequence_output` and `encoder_outputs` (if any).
"""
def __init__(
self,
encoder_network: tf.keras.Model,
mlm_activation=None,
mlm_initializer='glorot_uniform',
classification_heads: Optional[List[tf.keras.layers.Layer]] = None,
customized_masked_lm: Optional[tf.keras.layers.Layer] = None,
name: str = 'bert',
**kwargs):
super().__init__(self, name=name, **kwargs)
self._config = {
'encoder_network': encoder_network,
'mlm_initializer': mlm_initializer,
'mlm_activation': mlm_activation,
'classification_heads': classification_heads,
'name': name,
}
self.encoder_network = encoder_network
# Makes sure the weights are built.
_ = self.encoder_network(self.encoder_network.inputs)
inputs = copy.copy(self.encoder_network.inputs)
self.classification_heads = classification_heads or []
if len(set([cls.name for cls in self.classification_heads])) != len(
self.classification_heads):
raise ValueError('Classification heads should have unique names.')
self.masked_lm = customized_masked_lm or layers.MaskedLM(
embedding_table=self.encoder_network.get_embedding_table(),
activation=mlm_activation,
initializer=mlm_initializer,
name='cls/predictions')
masked_lm_positions = tf.keras.layers.Input(
shape=(None,), name='masked_lm_positions', dtype=tf.int32)
if isinstance(inputs, dict):
inputs['masked_lm_positions'] = masked_lm_positions
else:
inputs.append(masked_lm_positions)
self.inputs = inputs
def call(self, inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
if isinstance(inputs, list):
logging.warning('List inputs to BertPretrainer are discouraged.')
inputs = dict([
(ref.name, tensor) for ref, tensor in zip(self.inputs, inputs)
])
outputs = dict()
encoder_network_outputs = self.encoder_network(inputs)
if isinstance(encoder_network_outputs, list):
outputs['pooled_output'] = encoder_network_outputs[1]
# When `encoder_network` was instantiated with return_all_encoder_outputs
# set to True, `encoder_network_outputs[0]` is a list containing
# all transformer layers' output.
if isinstance(encoder_network_outputs[0], list):
outputs['encoder_outputs'] = encoder_network_outputs[0]
outputs['sequence_output'] = encoder_network_outputs[0][-1]
else:
outputs['sequence_output'] = encoder_network_outputs[0]
elif isinstance(encoder_network_outputs, dict):
outputs = encoder_network_outputs
else:
raise ValueError('encoder_network\'s output should be either a list '
'or a dict, but got %s' % encoder_network_outputs)
sequence_output = outputs['sequence_output']
# Inference may not have masked_lm_positions and mlm_logits is not needed.
if 'masked_lm_positions' in inputs:
masked_lm_positions = inputs['masked_lm_positions']
outputs['mlm_logits'] = self.masked_lm(
sequence_output, masked_positions=masked_lm_positions)
for cls_head in self.classification_heads:
cls_outputs = cls_head(sequence_output)
if isinstance(cls_outputs, dict):
outputs.update(cls_outputs)
else:
outputs[cls_head.name] = cls_outputs
return outputs
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(encoder=self.encoder_network, masked_lm=self.masked_lm)
for head in self.classification_heads:
for key, item in head.checkpoint_items.items():
items['.'.join([head.name, key])] = item
return items
def get_config(self):
return self._config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 11,480 | 39.712766 | 100 | py |
models | models-master/official/nlp/modeling/models/bert_pretrainer_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for BERT pretrainer model."""
import itertools
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.modeling import layers
from official.nlp.modeling import networks
from official.nlp.modeling.models import bert_pretrainer
class BertPretrainerTest(tf.test.TestCase, parameterized.TestCase):
def test_bert_pretrainer(self):
"""Validate that the Keras object can be created."""
# Build a transformer network to use within the BERT trainer.
vocab_size = 100
sequence_length = 512
test_network = networks.BertEncoder(
vocab_size=vocab_size,
num_layers=2,
max_sequence_length=sequence_length)
# Create a BERT trainer with the created network.
num_classes = 3
num_token_predictions = 2
bert_trainer_model = bert_pretrainer.BertPretrainer(
test_network,
num_classes=num_classes,
num_token_predictions=num_token_predictions)
# Create a set of 2-dimensional inputs (the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
masked_lm_positions = tf.keras.Input(
shape=(num_token_predictions,), dtype=tf.int32)
# Invoke the trainer model on the inputs. This causes the layer to be built.
outputs = bert_trainer_model(
[word_ids, mask, type_ids, masked_lm_positions])
# Validate that the outputs are of the expected shape.
expected_lm_shape = [None, num_token_predictions, vocab_size]
expected_classification_shape = [None, num_classes]
self.assertAllEqual(expected_lm_shape, outputs['masked_lm'].shape.as_list())
self.assertAllEqual(expected_classification_shape,
outputs['classification'].shape.as_list())
def test_bert_trainer_tensor_call(self):
"""Validate that the Keras object can be invoked."""
# Build a transformer network to use within the BERT trainer.
test_network = networks.BertEncoder(vocab_size=100, num_layers=2)
# Create a BERT trainer with the created network.
bert_trainer_model = bert_pretrainer.BertPretrainer(
test_network, num_classes=2, num_token_predictions=2)
# Create a set of 2-dimensional data tensors to feed into the model.
word_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32)
mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32)
type_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32)
lm_mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32)
# Invoke the trainer model on the tensors. In Eager mode, this does the
# actual calculation. (We can't validate the outputs, since the network is
# too complex: this simply ensures we're not hitting runtime errors.)
_ = bert_trainer_model([word_ids, mask, type_ids, lm_mask])
def test_serialize_deserialize(self):
"""Validate that the BERT trainer can be serialized and deserialized."""
# Build a transformer network to use within the BERT trainer. (Here, we use
# a short sequence_length for convenience.)
test_network = networks.BertEncoder(
vocab_size=100, num_layers=2, max_sequence_length=5)
# Create a BERT trainer with the created network. (Note that all the args
# are different, so we can catch any serialization mismatches.)
bert_trainer_model = bert_pretrainer.BertPretrainer(
test_network, num_classes=4, num_token_predictions=3)
# Create another BERT trainer via serialization and deserialization.
config = bert_trainer_model.get_config()
new_bert_trainer_model = bert_pretrainer.BertPretrainer.from_config(config)
# Validate that the config can be forced to JSON.
_ = new_bert_trainer_model.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(bert_trainer_model.get_config(),
new_bert_trainer_model.get_config())
class BertPretrainerV2Test(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(itertools.product(
(False, True),
(False, True),
(False, True),
(False, True),
))
def test_bert_pretrainerv2(self, dict_outputs, return_all_encoder_outputs,
use_customized_masked_lm, has_masked_lm_positions):
"""Validate that the Keras object can be created."""
# Build a transformer network to use within the BERT trainer.
del dict_outputs, return_all_encoder_outputs
vocab_size = 100
sequence_length = 512
hidden_size = 48
num_layers = 2
test_network = networks.BertEncoderV2(
vocab_size=vocab_size,
num_layers=num_layers,
hidden_size=hidden_size,
max_sequence_length=sequence_length)
_ = test_network(test_network.inputs)
# Create a BERT trainer with the created network.
if use_customized_masked_lm:
customized_masked_lm = layers.MaskedLM(
embedding_table=test_network.get_embedding_table())
else:
customized_masked_lm = None
bert_trainer_model = bert_pretrainer.BertPretrainerV2(
encoder_network=test_network, customized_masked_lm=customized_masked_lm)
num_token_predictions = 20
# Create a set of 2-dimensional inputs (the first dimension is implicit).
inputs = dict(
input_word_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32))
if has_masked_lm_positions:
inputs['masked_lm_positions'] = tf.keras.Input(
shape=(num_token_predictions,), dtype=tf.int32)
# Invoke the trainer model on the inputs. This causes the layer to be built.
outputs = bert_trainer_model(inputs)
has_encoder_outputs = True # dict_outputs or return_all_encoder_outputs
expected_keys = ['sequence_output', 'pooled_output']
if has_encoder_outputs:
expected_keys.append('encoder_outputs')
if has_masked_lm_positions:
expected_keys.append('mlm_logits')
self.assertSameElements(outputs.keys(), expected_keys)
# Validate that the outputs are of the expected shape.
expected_lm_shape = [None, num_token_predictions, vocab_size]
if has_masked_lm_positions:
self.assertAllEqual(expected_lm_shape,
outputs['mlm_logits'].shape.as_list())
expected_sequence_output_shape = [None, sequence_length, hidden_size]
self.assertAllEqual(expected_sequence_output_shape,
outputs['sequence_output'].shape.as_list())
expected_pooled_output_shape = [None, hidden_size]
self.assertAllEqual(expected_pooled_output_shape,
outputs['pooled_output'].shape.as_list())
def test_multiple_cls_outputs(self):
"""Validate that the Keras object can be created."""
# Build a transformer network to use within the BERT trainer.
vocab_size = 100
sequence_length = 512
hidden_size = 48
num_layers = 2
test_network = networks.BertEncoderV2(
vocab_size=vocab_size,
num_layers=num_layers,
hidden_size=hidden_size,
max_sequence_length=sequence_length)
bert_trainer_model = bert_pretrainer.BertPretrainerV2(
encoder_network=test_network,
classification_heads=[layers.MultiClsHeads(
inner_dim=5, cls_list=[('foo', 2), ('bar', 3)])])
num_token_predictions = 20
# Create a set of 2-dimensional inputs (the first dimension is implicit).
inputs = dict(
input_word_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32),
masked_lm_positions=tf.keras.Input(
shape=(num_token_predictions,), dtype=tf.int32))
# Invoke the trainer model on the inputs. This causes the layer to be built.
outputs = bert_trainer_model(inputs)
self.assertEqual(outputs['foo'].shape.as_list(), [None, 2])
self.assertEqual(outputs['bar'].shape.as_list(), [None, 3])
def test_v2_serialize_deserialize(self):
"""Validate that the BERT trainer can be serialized and deserialized."""
# Build a transformer network to use within the BERT trainer.
test_network = networks.BertEncoderV2(vocab_size=100, num_layers=2)
# Create a BERT trainer with the created network. (Note that all the args
# are different, so we can catch any serialization mismatches.)
bert_trainer_model = bert_pretrainer.BertPretrainerV2(
encoder_network=test_network)
# Create another BERT trainer via serialization and deserialization.
config = bert_trainer_model.get_config()
new_bert_trainer_model = bert_pretrainer.BertPretrainerV2.from_config(
config)
# Validate that the config can be forced to JSON.
_ = new_bert_trainer_model.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(bert_trainer_model.get_config(),
new_bert_trainer_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 9,900 | 41.676724 | 80 | py |
models | models-master/official/nlp/modeling/models/bert_classifier_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for BERT trainer network."""
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.modeling import layers
from official.nlp.modeling import networks
from official.nlp.modeling.models import bert_classifier
class BertClassifierTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('single_cls', 1, False), ('3_cls', 3, False),
('3_cls_dictoutputs', 3, True))
def test_bert_trainer(self, num_classes, dict_outputs):
"""Validate that the Keras object can be created."""
# Build a transformer network to use within the BERT trainer.
vocab_size = 100
sequence_length = 512
test_network = networks.BertEncoder(
vocab_size=vocab_size, num_layers=2, dict_outputs=dict_outputs)
# Create a BERT trainer with the created network.
bert_trainer_model = bert_classifier.BertClassifier(
test_network, num_classes=num_classes)
# Create a set of 2-dimensional inputs (the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
# Invoke the trainer model on the inputs. This causes the layer to be built.
cls_outs = bert_trainer_model([word_ids, mask, type_ids])
# Validate that the outputs are of the expected shape.
expected_classification_shape = [None, num_classes]
self.assertAllEqual(expected_classification_shape, cls_outs.shape.as_list())
@parameterized.named_parameters(
('single_cls', 1, False),
('2_cls', 2, False),
('single_cls_custom_head', 1, True),
('2_cls_custom_head', 2, True))
def test_bert_trainer_tensor_call(self, num_classes, use_custom_head):
"""Validate that the Keras object can be invoked."""
# Build a transformer network to use within the BERT trainer. (Here, we use
# a short sequence_length for convenience.)
test_network = networks.BertEncoder(vocab_size=100, num_layers=2)
cls_head = layers.GaussianProcessClassificationHead(
inner_dim=0, num_classes=num_classes) if use_custom_head else None
# Create a BERT trainer with the created network.
bert_trainer_model = bert_classifier.BertClassifier(
test_network, num_classes=num_classes, cls_head=cls_head)
# Create a set of 2-dimensional data tensors to feed into the model.
word_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32)
mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32)
type_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32)
# Invoke the trainer model on the tensors. In Eager mode, this does the
# actual calculation. (We can't validate the outputs, since the network is
# too complex: this simply ensures we're not hitting runtime errors.)
_ = bert_trainer_model([word_ids, mask, type_ids])
@parameterized.named_parameters(
('default_cls_head', None),
('sngp_cls_head', layers.GaussianProcessClassificationHead(
inner_dim=0, num_classes=4)))
def test_serialize_deserialize(self, cls_head):
"""Validate that the BERT trainer can be serialized and deserialized."""
# Build a transformer network to use within the BERT trainer.
test_network = networks.BertEncoder(vocab_size=100, num_layers=2)
# Create a BERT trainer with the created network. (Note that all the args
# are different, so we can catch any serialization mismatches.)
bert_trainer_model = bert_classifier.BertClassifier(
test_network, num_classes=4, initializer='zeros', cls_head=cls_head)
# Create another BERT trainer via serialization and deserialization.
config = bert_trainer_model.get_config()
new_bert_trainer_model = bert_classifier.BertClassifier.from_config(config)
# Validate that the config can be forced to JSON.
_ = new_bert_trainer_model.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(bert_trainer_model.get_config(),
new_bert_trainer_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 4,815 | 43.592593 | 80 | py |
models | models-master/official/nlp/modeling/models/bert_span_labeler.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT Question Answering model."""
# pylint: disable=g-classes-have-attributes
import collections
import tensorflow as tf
from official.nlp.modeling import networks
@tf.keras.utils.register_keras_serializable(package='Text')
class BertSpanLabeler(tf.keras.Model):
"""Span labeler model based on a BERT-style transformer-based encoder.
This is an implementation of the network structure surrounding a transformer
encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers
for Language Understanding" (https://arxiv.org/abs/1810.04805).
The BertSpanLabeler allows a user to pass in a transformer encoder, and
instantiates a span labeling network based on a single dense layer.
*Note* that the model is constructed by
[Keras Functional API](https://keras.io/guides/functional_api/).
Args:
network: A transformer network. This network should output a sequence output
and a classification output. Furthermore, it should expose its embedding
table via a `get_embedding_table` method.
initializer: The initializer (if any) to use in the span labeling network.
Defaults to a Glorot uniform initializer.
output: The output style for this network. Can be either `logit`' or
`predictions`.
"""
def __init__(self,
network,
initializer='glorot_uniform',
output='logits',
**kwargs):
# We want to use the inputs of the passed network as the inputs to this
# Model. To do this, we need to keep a handle to the network inputs for use
# when we construct the Model object at the end of init.
inputs = network.inputs
# Because we have a copy of inputs to create this Model object, we can
# invoke the Network object with its own input tensors to start the Model.
outputs = network(inputs)
if isinstance(outputs, list):
sequence_output = outputs[0]
else:
sequence_output = outputs['sequence_output']
# The input network (typically a transformer model) may get outputs from all
# layers. When this case happens, we retrieve the last layer output.
if isinstance(sequence_output, list):
sequence_output = sequence_output[-1]
# This is an instance variable for ease of access to the underlying task
# network.
span_labeling = networks.SpanLabeling(
input_width=sequence_output.shape[-1],
initializer=initializer,
output=output,
name='span_labeling')
start_logits, end_logits = span_labeling(sequence_output)
# Use identity layers wrapped in lambdas to explicitly name the output
# tensors. This allows us to use string-keyed dicts in Keras fit/predict/
# evaluate calls.
start_logits = tf.keras.layers.Lambda(
tf.identity, name='start_positions')(
start_logits)
end_logits = tf.keras.layers.Lambda(
tf.identity, name='end_positions')(
end_logits)
logits = [start_logits, end_logits]
# b/164516224
# Once we've created the network using the Functional API, we call
# super().__init__ as though we were invoking the Functional API Model
# constructor, resulting in this object having all the properties of a model
# created using the Functional API. Once super().__init__ is called, we
# can assign attributes to `self` - note that all `self` assignments are
# below this line.
super(BertSpanLabeler, self).__init__(
inputs=inputs, outputs=logits, **kwargs)
self._network = network
config_dict = {
'network': network,
'initializer': initializer,
'output': output,
}
# We are storing the config dict as a namedtuple here to ensure checkpoint
# compatibility with an earlier version of this model which did not track
# the config dict attribute. TF does not track immutable attrs which
# do not contain Trackables, so by creating a config namedtuple instead of
# a dict we avoid tracking it.
config_cls = collections.namedtuple('Config', config_dict.keys())
self._config = config_cls(**config_dict)
self.span_labeling = span_labeling
@property
def checkpoint_items(self):
return dict(encoder=self._network)
def get_config(self):
return dict(self._config._asdict())
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 4,997 | 38.666667 | 80 | py |
models | models-master/official/nlp/modeling/models/bert_token_classifier_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for BERT token classifier."""
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.modeling import networks
from official.nlp.modeling.models import bert_token_classifier
class BertTokenClassifierTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters((True, True), (False, False))
def test_bert_trainer(self, dict_outputs, output_encoder_outputs):
"""Validate that the Keras object can be created."""
# Build a transformer network to use within the BERT trainer.
vocab_size = 100
sequence_length = 512
hidden_size = 768
test_network = networks.BertEncoder(
vocab_size=vocab_size,
num_layers=2,
max_sequence_length=sequence_length,
dict_outputs=dict_outputs,
hidden_size=hidden_size)
# Create a BERT trainer with the created network.
num_classes = 3
bert_trainer_model = bert_token_classifier.BertTokenClassifier(
test_network,
num_classes=num_classes,
output_encoder_outputs=output_encoder_outputs)
# Create a set of 2-dimensional inputs (the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
# Invoke the trainer model on the inputs. This causes the layer to be built.
outputs = bert_trainer_model([word_ids, mask, type_ids])
if output_encoder_outputs:
logits = outputs['logits']
encoder_outputs = outputs['encoder_outputs']
self.assertAllEqual(encoder_outputs.shape.as_list(),
[None, sequence_length, hidden_size])
else:
logits = outputs['logits']
# Validate that the outputs are of the expected shape.
expected_classification_shape = [None, sequence_length, num_classes]
self.assertAllEqual(expected_classification_shape, logits.shape.as_list())
def test_bert_trainer_tensor_call(self):
"""Validate that the Keras object can be invoked."""
# Build a transformer network to use within the BERT trainer. (Here, we use
# a short sequence_length for convenience.)
test_network = networks.BertEncoder(
vocab_size=100, num_layers=2, max_sequence_length=2)
# Create a BERT trainer with the created network.
bert_trainer_model = bert_token_classifier.BertTokenClassifier(
test_network, num_classes=2)
# Create a set of 2-dimensional data tensors to feed into the model.
word_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32)
mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32)
type_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32)
# Invoke the trainer model on the tensors. In Eager mode, this does the
# actual calculation. (We can't validate the outputs, since the network is
# too complex: this simply ensures we're not hitting runtime errors.)
_ = bert_trainer_model([word_ids, mask, type_ids])
def test_serialize_deserialize(self):
"""Validate that the BERT trainer can be serialized and deserialized."""
# Build a transformer network to use within the BERT trainer. (Here, we use
# a short sequence_length for convenience.)
test_network = networks.BertEncoder(
vocab_size=100, num_layers=2, max_sequence_length=5)
# Create a BERT trainer with the created network. (Note that all the args
# are different, so we can catch any serialization mismatches.)
bert_trainer_model = bert_token_classifier.BertTokenClassifier(
test_network, num_classes=4, initializer='zeros', output='predictions')
# Create another BERT trainer via serialization and deserialization.
config = bert_trainer_model.get_config()
new_bert_trainer_model = (
bert_token_classifier.BertTokenClassifier.from_config(config))
# Validate that the config can be forced to JSON.
_ = new_bert_trainer_model.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(bert_trainer_model.get_config(),
new_bert_trainer_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 4,835 | 41.421053 | 80 | py |
models | models-master/official/nlp/modeling/models/seq2seq_transformer_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Transformer model."""
from absl import logging
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.nlp.modeling.models import seq2seq_transformer
class Seq2SeqTransformerTest(tf.test.TestCase, parameterized.TestCase):
def _build_model(self, padded_decode, decode_max_length, embedding_width):
num_layers = 1
num_attention_heads = 2
intermediate_size = 32
vocab_size = 100
encdec_kwargs = dict(
num_layers=num_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
activation="relu",
dropout_rate=0.01,
attention_dropout_rate=0.01,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
intermediate_dropout=0.01)
encoder_layer = seq2seq_transformer.TransformerEncoder(**encdec_kwargs)
decoder_layer = seq2seq_transformer.TransformerDecoder(**encdec_kwargs)
return seq2seq_transformer.Seq2SeqTransformer(
vocab_size=vocab_size,
embedding_width=embedding_width,
dropout_rate=0.01,
padded_decode=padded_decode,
decode_max_length=decode_max_length,
beam_size=4,
alpha=0.6,
encoder_layer=encoder_layer,
decoder_layer=decoder_layer)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.cloud_tpu_strategy,
],
embed=[True, False],
is_training=[True, False],
mode="eager"))
def test_create_model_with_ds(self, distribution, embed, is_training):
with distribution.scope():
padded_decode = isinstance(
distribution,
(tf.distribute.TPUStrategy, tf.distribute.experimental.TPUStrategy))
decode_max_length = 10
batch_size = 4
embedding_width = 16
model = self._build_model(
padded_decode, decode_max_length, embedding_width)
@tf.function
def step(inputs):
def _step_fn(inputs):
return model(inputs)
outputs = distribution.run(_step_fn, args=(inputs,))
return tf.nest.map_structure(distribution.experimental_local_results,
outputs)
if embed:
fake_inputs = dict(
embedded_inputs=np.zeros(
(batch_size, decode_max_length, embedding_width),
dtype=np.float32),
input_masks=np.ones((batch_size, decode_max_length), dtype=bool))
else:
fake_inputs = dict(
inputs=np.zeros((batch_size, decode_max_length), dtype=np.int32))
if is_training:
fake_inputs["targets"] = np.zeros((batch_size, 8), dtype=np.int32)
local_outputs = step(fake_inputs)
logging.info("local_outputs=%s", local_outputs)
self.assertEqual(local_outputs[0].shape, (4, 8, 100))
else:
local_outputs = step(fake_inputs)
logging.info("local_outputs=%s", local_outputs)
self.assertEqual(local_outputs["outputs"][0].shape, (4, 10))
@parameterized.parameters(True, False)
def test_create_savedmodel(self, padded_decode):
decode_max_length = 10
embedding_width = 16
model = self._build_model(
padded_decode, decode_max_length, embedding_width)
class SaveModule(tf.Module):
def __init__(self, model):
super(SaveModule, self).__init__()
self.model = model
@tf.function
def serve(self, inputs):
return self.model.call(dict(inputs=inputs))
@tf.function
def embedded_serve(self, embedded_inputs, input_masks):
return self.model.call(
dict(embedded_inputs=embedded_inputs, input_masks=input_masks))
save_module = SaveModule(model)
if padded_decode:
tensor_shape = (4, decode_max_length)
embedded_tensor_shape = (4, decode_max_length, embedding_width)
else:
tensor_shape = (None, None)
embedded_tensor_shape = (None, None, embedding_width)
signatures = dict(
serving_default=save_module.serve.get_concrete_function(
tf.TensorSpec(shape=tensor_shape, dtype=tf.int32, name="inputs")),
embedded_serving=save_module.embedded_serve.get_concrete_function(
tf.TensorSpec(
shape=embedded_tensor_shape, dtype=tf.float32,
name="embedded_inputs"),
tf.TensorSpec(
shape=tensor_shape, dtype=tf.bool, name="input_masks"),
))
tf.saved_model.save(save_module, self.get_temp_dir(), signatures=signatures)
if __name__ == "__main__":
tf.test.main()
| 5,399 | 34.294118 | 80 | py |
models | models-master/official/nlp/modeling/models/electra_pretrainer_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ELECTRA pre trainer network."""
import tensorflow as tf
from official.nlp.modeling import networks
from official.nlp.modeling.models import electra_pretrainer
class ElectraPretrainerTest(tf.test.TestCase):
def test_electra_pretrainer(self):
"""Validate that the Keras object can be created."""
# Build a transformer network to use within the ELECTRA trainer.
vocab_size = 100
sequence_length = 512
test_generator_network = networks.BertEncoder(
vocab_size=vocab_size,
num_layers=2,
max_sequence_length=sequence_length,
dict_outputs=True)
test_discriminator_network = networks.BertEncoder(
vocab_size=vocab_size,
num_layers=2,
max_sequence_length=sequence_length,
dict_outputs=True)
# Create a ELECTRA trainer with the created network.
num_classes = 3
num_token_predictions = 2
eletrca_trainer_model = electra_pretrainer.ElectraPretrainer(
generator_network=test_generator_network,
discriminator_network=test_discriminator_network,
vocab_size=vocab_size,
num_classes=num_classes,
num_token_predictions=num_token_predictions,
disallow_correct=True)
# Create a set of 2-dimensional inputs (the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
lm_positions = tf.keras.Input(
shape=(num_token_predictions,), dtype=tf.int32)
lm_ids = tf.keras.Input(shape=(num_token_predictions,), dtype=tf.int32)
inputs = {
'input_word_ids': word_ids,
'input_mask': mask,
'input_type_ids': type_ids,
'masked_lm_positions': lm_positions,
'masked_lm_ids': lm_ids
}
# Invoke the trainer model on the inputs. This causes the layer to be built.
outputs = eletrca_trainer_model(inputs)
lm_outs = outputs['lm_outputs']
cls_outs = outputs['sentence_outputs']
disc_logits = outputs['disc_logits']
disc_label = outputs['disc_label']
# Validate that the outputs are of the expected shape.
expected_lm_shape = [None, num_token_predictions, vocab_size]
expected_classification_shape = [None, num_classes]
expected_disc_logits_shape = [None, sequence_length]
expected_disc_label_shape = [None, sequence_length]
self.assertAllEqual(expected_lm_shape, lm_outs.shape.as_list())
self.assertAllEqual(expected_classification_shape, cls_outs.shape.as_list())
self.assertAllEqual(expected_disc_logits_shape, disc_logits.shape.as_list())
self.assertAllEqual(expected_disc_label_shape, disc_label.shape.as_list())
def test_electra_trainer_tensor_call(self):
"""Validate that the Keras object can be invoked."""
# Build a transformer network to use within the ELECTRA trainer. (Here, we
# use a short sequence_length for convenience.)
test_generator_network = networks.BertEncoder(
vocab_size=100, num_layers=4, max_sequence_length=3, dict_outputs=True)
test_discriminator_network = networks.BertEncoder(
vocab_size=100, num_layers=4, max_sequence_length=3, dict_outputs=True)
# Create a ELECTRA trainer with the created network.
eletrca_trainer_model = electra_pretrainer.ElectraPretrainer(
generator_network=test_generator_network,
discriminator_network=test_discriminator_network,
vocab_size=100,
num_classes=2,
num_token_predictions=2)
# Create a set of 2-dimensional data tensors to feed into the model.
word_ids = tf.constant([[1, 1, 1], [2, 2, 2]], dtype=tf.int32)
mask = tf.constant([[1, 1, 1], [1, 0, 0]], dtype=tf.int32)
type_ids = tf.constant([[1, 1, 1], [2, 2, 2]], dtype=tf.int32)
lm_positions = tf.constant([[0, 1], [0, 2]], dtype=tf.int32)
lm_ids = tf.constant([[10, 20], [20, 30]], dtype=tf.int32)
inputs = {
'input_word_ids': word_ids,
'input_mask': mask,
'input_type_ids': type_ids,
'masked_lm_positions': lm_positions,
'masked_lm_ids': lm_ids
}
# Invoke the trainer model on the tensors. In Eager mode, this does the
# actual calculation. (We can't validate the outputs, since the network is
# too complex: this simply ensures we're not hitting runtime errors.)
_ = eletrca_trainer_model(inputs)
def test_serialize_deserialize(self):
"""Validate that the ELECTRA trainer can be serialized and deserialized."""
# Build a transformer network to use within the BERT trainer. (Here, we use
# a short sequence_length for convenience.)
test_generator_network = networks.BertEncoder(
vocab_size=100, num_layers=4, max_sequence_length=3)
test_discriminator_network = networks.BertEncoder(
vocab_size=100, num_layers=4, max_sequence_length=3)
# Create a ELECTRA trainer with the created network. (Note that all the args
# are different, so we can catch any serialization mismatches.)
electra_trainer_model = electra_pretrainer.ElectraPretrainer(
generator_network=test_generator_network,
discriminator_network=test_discriminator_network,
vocab_size=100,
num_classes=2,
num_token_predictions=2)
# Create another BERT trainer via serialization and deserialization.
config = electra_trainer_model.get_config()
new_electra_trainer_model = electra_pretrainer.ElectraPretrainer.from_config(
config)
# Validate that the config can be forced to JSON.
_ = new_electra_trainer_model.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(electra_trainer_model.get_config(),
new_electra_trainer_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 6,503 | 41.509804 | 81 | py |
models | models-master/official/nlp/modeling/models/dual_encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trainer network for dual encoder style models."""
# pylint: disable=g-classes-have-attributes
import collections
import tensorflow as tf
from official.nlp.modeling import layers
@tf.keras.utils.register_keras_serializable(package='Text')
class DualEncoder(tf.keras.Model):
"""A dual encoder model based on a transformer-based encoder.
This is an implementation of the dual encoder network structure based on the
transfomer stack, as described in ["Language-agnostic BERT Sentence
Embedding"](https://arxiv.org/abs/2007.01852)
The DualEncoder allows a user to pass in a transformer stack, and build a dual
encoder model based on the transformer stack.
Args:
network: A transformer network which should output an encoding output.
max_seq_length: The maximum allowed sequence length for transformer.
normalize: If set to True, normalize the encoding produced by transfomer.
logit_scale: The scaling factor of dot products when doing training.
logit_margin: The margin between positive and negative when doing training.
output: The output style for this network. Can be either `logits` or
`predictions`. If set to `predictions`, it will output the embedding
producted by transformer network.
"""
def __init__(self,
network: tf.keras.Model,
max_seq_length: int = 32,
normalize: bool = True,
logit_scale: float = 1.0,
logit_margin: float = 0.0,
output: str = 'logits',
**kwargs) -> None:
if output == 'logits':
left_word_ids = tf.keras.layers.Input(
shape=(max_seq_length,), dtype=tf.int32, name='left_word_ids')
left_mask = tf.keras.layers.Input(
shape=(max_seq_length,), dtype=tf.int32, name='left_mask')
left_type_ids = tf.keras.layers.Input(
shape=(max_seq_length,), dtype=tf.int32, name='left_type_ids')
else:
# Keep the consistant with legacy BERT hub module input names.
left_word_ids = tf.keras.layers.Input(
shape=(max_seq_length,), dtype=tf.int32, name='input_word_ids')
left_mask = tf.keras.layers.Input(
shape=(max_seq_length,), dtype=tf.int32, name='input_mask')
left_type_ids = tf.keras.layers.Input(
shape=(max_seq_length,), dtype=tf.int32, name='input_type_ids')
left_inputs = [left_word_ids, left_mask, left_type_ids]
left_outputs = network(left_inputs)
if isinstance(left_outputs, list):
left_sequence_output, left_encoded = left_outputs
else:
left_sequence_output = left_outputs['sequence_output']
left_encoded = left_outputs['pooled_output']
if normalize:
left_encoded = tf.keras.layers.Lambda(
lambda x: tf.nn.l2_normalize(x, axis=1))(
left_encoded)
if output == 'logits':
right_word_ids = tf.keras.layers.Input(
shape=(max_seq_length,), dtype=tf.int32, name='right_word_ids')
right_mask = tf.keras.layers.Input(
shape=(max_seq_length,), dtype=tf.int32, name='right_mask')
right_type_ids = tf.keras.layers.Input(
shape=(max_seq_length,), dtype=tf.int32, name='right_type_ids')
right_inputs = [right_word_ids, right_mask, right_type_ids]
right_outputs = network(right_inputs)
if isinstance(right_outputs, list):
_, right_encoded = right_outputs
else:
right_encoded = right_outputs['pooled_output']
if normalize:
right_encoded = tf.keras.layers.Lambda(
lambda x: tf.nn.l2_normalize(x, axis=1))(
right_encoded)
dot_products = layers.MatMulWithMargin(
logit_scale=logit_scale,
logit_margin=logit_margin,
name='dot_product')
inputs = [
left_word_ids, left_mask, left_type_ids, right_word_ids, right_mask,
right_type_ids
]
left_logits, right_logits = dot_products(left_encoded, right_encoded)
outputs = dict(left_logits=left_logits, right_logits=right_logits)
elif output == 'predictions':
inputs = [left_word_ids, left_mask, left_type_ids]
# To keep consistent with legacy BERT hub modules, the outputs are
# "pooled_output" and "sequence_output".
outputs = dict(
sequence_output=left_sequence_output, pooled_output=left_encoded)
else:
raise ValueError('output type %s is not supported' % output)
# b/164516224
# Once we've created the network using the Functional API, we call
# super().__init__ as though we were invoking the Functional API Model
# constructor, resulting in this object having all the properties of a model
# created using the Functional API. Once super().__init__ is called, we
# can assign attributes to `self` - note that all `self` assignments are
# below this line.
super(DualEncoder, self).__init__(inputs=inputs, outputs=outputs, **kwargs)
config_dict = {
'network': network,
'max_seq_length': max_seq_length,
'normalize': normalize,
'logit_scale': logit_scale,
'logit_margin': logit_margin,
'output': output,
}
# We are storing the config dict as a namedtuple here to ensure checkpoint
# compatibility with an earlier version of this model which did not track
# the config dict attribute. TF does not track immutable attrs which
# do not contain Trackables, so by creating a config namedtuple instead of
# a dict we avoid tracking it.
config_cls = collections.namedtuple('Config', config_dict.keys())
self._config = config_cls(**config_dict)
self.network = network
def get_config(self):
return dict(self._config._asdict())
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(encoder=self.network)
return items
| 6,583 | 39.392638 | 80 | py |
models | models-master/official/nlp/modeling/models/seq2seq_transformer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implement Seq2Seq Transformer model by TF official NLP library.
Model paper: https://arxiv.org/pdf/1706.03762.pdf
"""
import math
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling import layers
from official.nlp.modeling.ops import beam_search
EOS_ID = 1
class Seq2SeqTransformer(tf.keras.Model):
"""Transformer model with Keras.
Implemented as described in: https://arxiv.org/pdf/1706.03762.pdf
The Transformer model consists of an encoder and decoder. The input is an int
sequence (or a batch of sequences). The encoder produces a continuous
representation, and the decoder uses the encoder output to generate
probabilities for the output sequence.
"""
def __init__(self,
vocab_size=33708,
embedding_width=512,
dropout_rate=0.0,
padded_decode=False,
decode_max_length=None,
extra_decode_length=0,
beam_size=4,
alpha=0.6,
encoder_layer=None,
decoder_layer=None,
eos_id=EOS_ID,
**kwargs):
"""Initialize layers to build Transformer model.
Args:
vocab_size: Size of vocabulary.
embedding_width: Size of hidden layer for embedding.
dropout_rate: Dropout probability.
padded_decode: Whether to max_sequence_length padding is used. If set
False, max_sequence_length padding is not used.
decode_max_length: maximum number of steps to decode a sequence.
extra_decode_length: Beam search will run extra steps to decode.
beam_size: Number of beams for beam search
alpha: The strength of length normalization for beam search.
encoder_layer: An initialized encoder layer.
decoder_layer: An initialized decoder layer.
eos_id: Id of end of sentence token.
**kwargs: other keyword arguments.
"""
super().__init__(**kwargs)
self._vocab_size = vocab_size
self._embedding_width = embedding_width
self._dropout_rate = dropout_rate
self._padded_decode = padded_decode
self._decode_max_length = decode_max_length
self._extra_decode_length = extra_decode_length
self._beam_size = beam_size
self._alpha = alpha
self._eos_id = eos_id
self.embedding_lookup = layers.OnDeviceEmbedding(
vocab_size=self._vocab_size,
embedding_width=self._embedding_width,
initializer=tf.random_normal_initializer(
mean=0., stddev=self._embedding_width**-0.5),
scale_factor=self._embedding_width**0.5)
self.encoder_layer = encoder_layer
self.decoder_layer = decoder_layer
self.position_embedding = layers.RelativePositionEmbedding(
hidden_size=self._embedding_width)
self.encoder_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
self.decoder_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
def get_config(self):
config = {
"vocab_size": self._vocab_size,
"hidden_size": self._embedding_width,
"dropout_rate": self._dropout_rate,
"padded_decode": self._padded_decode,
"decode_max_length": self._decode_max_length,
"eos_id": self._eos_id,
"extra_decode_length": self._extra_decode_length,
"beam_size": self._beam_size,
"alpha": self._alpha,
"encoder_layer": self.encoder_layer,
"decoder_layer": self.decoder_layer,
}
base_config = super(Seq2SeqTransformer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _embedding_linear(self, embedding_matrix, x):
"""Uses embeddings as linear transformation weights."""
embedding_matrix = tf.cast(embedding_matrix, dtype=self.compute_dtype)
x = tf.cast(x, dtype=self.compute_dtype)
batch_size = tf.shape(x)[0]
length = tf.shape(x)[1]
hidden_size = tf.shape(x)[2]
vocab_size = tf.shape(embedding_matrix)[0]
x = tf.reshape(x, [-1, hidden_size])
logits = tf.matmul(x, embedding_matrix, transpose_b=True)
return tf.reshape(logits, [batch_size, length, vocab_size])
def _parse_inputs(self, inputs):
"""Parses the `call` inputs and returns an uniformed output."""
sources = inputs.get("inputs", None)
input_mask = inputs.get("input_masks", None)
embedded = inputs.get("embedded_inputs", None)
if sources is None and embedded is not None:
embedded_inputs = embedded
boolean_mask = input_mask
input_shape = tf_utils.get_shape_list(embedded, expected_rank=3)
source_dtype = embedded.dtype
elif sources is not None:
embedded_inputs = self.embedding_lookup(sources)
boolean_mask = tf.not_equal(sources, 0)
input_shape = tf_utils.get_shape_list(sources, expected_rank=2)
source_dtype = sources.dtype
else:
raise KeyError(
"The call method expects either `inputs` or `embedded_inputs` and "
"`input_masks` as input features.")
return embedded_inputs, boolean_mask, input_shape, source_dtype
def call(self, inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
"""Calculate target logits or inferred target sequences.
Args:
inputs: a dictionary of tensors.
Feature `inputs` (optional): int tensor with shape
`[batch_size, input_length]`.
Feature `embedded_inputs` (optional): float tensor with shape
`[batch_size, input_length, embedding_width]`.
Feature `targets` (optional): None or int tensor with shape
`[batch_size, target_length]`.
Feature `input_masks` (optional): When providing the `embedded_inputs`,
the dictionary must provide a boolean mask marking the filled time
steps. The shape of the tensor is `[batch_size, input_length]`.
Either `inputs` or `embedded_inputs` and `input_masks` must be present
in the input dictionary. In the second case the projection of the
integer tokens to the transformer embedding space is skipped and
`input_masks` is expected to be present.
Returns:
If targets is defined, then return logits for each word in the target
sequence, which is a float tensor with shape
`(batch_size, target_length, vocab_size)`. If target is `None`, then
generate output sequence one token at a time and
returns a dictionary {
outputs: `(batch_size, decoded_length)`
scores: `(batch_size, 1)`}
Even when `float16` is used, the output tensor(s) are always `float32`.
Raises:
NotImplementedError: If try to use padded decode method on CPU/GPUs.
"""
# Prepare inputs to the layer stack by adding positional encodings and
# applying dropout.
targets = inputs.get("targets", None)
(embedded_inputs, boolean_mask,
input_shape, source_dtype) = self._parse_inputs(inputs)
embedding_mask = tf.cast(boolean_mask, embedded_inputs.dtype)
embedded_inputs *= tf.expand_dims(embedding_mask, -1)
# Attention_mask generation.
attention_mask = tf.cast(
tf.reshape(boolean_mask, [input_shape[0], 1, input_shape[1]]),
dtype=source_dtype)
broadcast_ones = tf.ones(
shape=[input_shape[0], input_shape[1], 1], dtype=source_dtype)
attention_mask = broadcast_ones * attention_mask
pos_encoding = self.position_embedding(embedded_inputs)
pos_encoding = tf.cast(pos_encoding, embedded_inputs.dtype)
encoder_inputs = embedded_inputs + pos_encoding
encoder_inputs = self.encoder_dropout(encoder_inputs)
encoder_outputs = self.encoder_layer(
encoder_inputs, attention_mask=attention_mask)
if targets is None:
if self._padded_decode:
max_decode_length = self._decode_max_length
else:
max_decode_length = self._decode_max_length or (
tf.shape(encoder_outputs)[1] + self._extra_decode_length)
symbols_to_logits_fn = self._get_symbols_to_logits_fn(max_decode_length)
batch_size = tf.shape(encoder_outputs)[0]
# Create initial set of IDs that will be passed to symbols_to_logits_fn.
initial_ids = tf.zeros([batch_size], dtype=tf.int32)
# Create cache storing decoder attention values for each layer.
init_decode_length = (max_decode_length if self._padded_decode else 0)
num_heads = self.decoder_layer.num_attention_heads
dim_per_head = self._embedding_width // num_heads
# Cache dtype needs to match beam_search dtype.
# pylint: disable=g-complex-comprehension
cache = {
str(layer): {
"key":
tf.zeros(
[batch_size, init_decode_length, num_heads, dim_per_head],
dtype=self.compute_dtype),
"value":
tf.zeros(
[batch_size, init_decode_length, num_heads, dim_per_head],
dtype=self.compute_dtype)
} for layer in range(self.decoder_layer.num_layers)
}
# pylint: enable=g-complex-comprehension
# Add encoder output and attention bias to the cache.
encoder_outputs = tf.cast(encoder_outputs, dtype=self.compute_dtype)
attention_mask = tf.cast(
tf.reshape(boolean_mask, [input_shape[0], 1, input_shape[1]]),
dtype=self.compute_dtype)
cache["encoder_outputs"] = encoder_outputs
cache["encoder_decoder_attention_mask"] = attention_mask
# Use beam search to find the top beam_size sequences and scores.
decoded_ids, scores = beam_search.sequence_beam_search(
symbols_to_logits_fn=symbols_to_logits_fn,
initial_ids=initial_ids,
initial_cache=cache,
vocab_size=self._vocab_size,
beam_size=self._beam_size,
alpha=self._alpha,
max_decode_length=max_decode_length,
eos_id=self._eos_id,
padded_decode=self._padded_decode,
dtype=self.compute_dtype)
# Get the top sequence for each batch element
top_decoded_ids = decoded_ids[:, 0, 1:]
top_scores = scores[:, 0]
return {"outputs": top_decoded_ids, "scores": top_scores}
# Shift targets to the right, and remove the last element
targets = tf.pad(targets, [[0, 0], [1, 0]])[:, :-1]
decoder_inputs = self.embedding_lookup(targets)
length = tf.shape(decoder_inputs)[1]
pos_encoding = self.position_embedding(decoder_inputs)
pos_encoding = tf.cast(pos_encoding, embedded_inputs.dtype)
decoder_inputs += pos_encoding
decoder_inputs = self.decoder_dropout(decoder_inputs)
decoder_shape = tf_utils.get_shape_list(decoder_inputs, expected_rank=3)
batch_size = decoder_shape[0]
decoder_length = decoder_shape[1]
self_attention_mask = tf.linalg.band_part(tf.ones([length, length]), -1, 0)
self_attention_mask = tf.reshape(self_attention_mask, [1, length, length])
self_attention_mask = tf.tile(self_attention_mask, [batch_size, 1, 1])
attention_mask = tf.cast(
tf.expand_dims(boolean_mask, axis=1), dtype=source_dtype)
attention_mask = tf.tile(attention_mask, [1, decoder_length, 1])
outputs = self.decoder_layer(
decoder_inputs,
encoder_outputs,
self_attention_mask=self_attention_mask,
cross_attention_mask=attention_mask)
logits = self._embedding_linear(self.embedding_lookup.embeddings, outputs)
# Model outputs should be float32 to avoid numeric issues.
# https://www.tensorflow.org/guide/mixed_precision#building_the_model
logits = tf.cast(logits, tf.float32)
return logits
def _get_symbols_to_logits_fn(self, max_decode_length):
"""Returns a decoding function that calculates logits of the next tokens."""
timing_signal = self.position_embedding(
inputs=None, length=max_decode_length + 1)
timing_signal = tf.cast(timing_signal, dtype=self.compute_dtype)
decoder_self_attention_mask = tf.linalg.band_part(
tf.ones([max_decode_length, max_decode_length],
dtype=self.compute_dtype), -1, 0)
decoder_self_attention_mask = tf.reshape(
decoder_self_attention_mask, [1, max_decode_length, max_decode_length])
def symbols_to_logits_fn(ids, i, cache):
"""Generate logits for next potential IDs.
Args:
ids: Current decoded sequences. int tensor with shape `(batch_size *
beam_size, i + 1)`.
i: Loop index.
cache: Dictionary of values storing the encoder output, encoder-decoder
attention bias, and previous decoder attention values.
Returns:
Tuple of
(logits with shape `(batch_size * beam_size, vocab_size)`,
updated cache values)
"""
# Set decoder input to the last generated IDs
decoder_input = ids[:, -1:]
# Preprocess decoder input by getting embeddings and adding timing signal.
decoder_input = self.embedding_lookup(decoder_input)
decoder_input += timing_signal[i]
if self._padded_decode:
# indexing does not work on TPU.
bias_shape = decoder_self_attention_mask.shape.as_list()
self_attention_mask = tf.slice(decoder_self_attention_mask, [0, i, 0],
[bias_shape[0], 1, bias_shape[2]])
else:
self_attention_mask = decoder_self_attention_mask[:, i:i + 1, :i + 1]
decoder_shape = tf_utils.get_shape_list(decoder_input, expected_rank=3)
batch_size = decoder_shape[0]
decoder_length = decoder_shape[1]
self_attention_mask = tf.tile(self_attention_mask, [batch_size, 1, 1])
attention_mask = cache.get("encoder_decoder_attention_mask")
attention_mask = tf.tile(attention_mask, [1, decoder_length, 1])
decoder_outputs = self.decoder_layer(
decoder_input,
cache.get("encoder_outputs"),
self_attention_mask=self_attention_mask,
cross_attention_mask=attention_mask,
cache=cache,
decode_loop_step=i if self._padded_decode else None)
decoder_outputs = tf.cast(decoder_outputs, dtype=self.compute_dtype)
logits = self._embedding_linear(self.embedding_lookup.embeddings,
decoder_outputs)
logits = tf.squeeze(logits, axis=[1])
return logits, cache
return symbols_to_logits_fn
class TransformerEncoder(tf.keras.layers.Layer):
"""Transformer encoder.
Transformer encoder is made up of N identical layers. Each layer is composed
of the sublayers:
1. Self-attention layer
2. Feedforward network (which is 2 fully-connected layers)
"""
def __init__(self,
num_layers=6,
num_attention_heads=8,
intermediate_size=2048,
activation="relu",
dropout_rate=0.0,
attention_dropout_rate=0.0,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
intermediate_dropout=0.0,
**kwargs):
"""Initialize a Transformer encoder.
Args:
num_layers: Number of layers.
num_attention_heads: Number of attention heads.
intermediate_size: Size of the intermediate (Feedforward) layer.
activation: Activation for the intermediate layer.
dropout_rate: Dropout probability.
attention_dropout_rate: Dropout probability for attention layers.
use_bias: Whether to enable use_bias in attention layer. If set False,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate
dense layers. If set False, output of attention and intermediate dense
layers is normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
intermediate_dropout: Dropout probability for intermediate_dropout_layer.
**kwargs: key word arguemnts passed to tf.keras.layers.Layer.
"""
super(TransformerEncoder, self).__init__(**kwargs)
self.num_layers = num_layers
self.num_attention_heads = num_attention_heads
self._intermediate_size = intermediate_size
self._activation = activation
self._dropout_rate = dropout_rate
self._attention_dropout_rate = attention_dropout_rate
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._intermediate_dropout = intermediate_dropout
def build(self, input_shape):
"""Implements build() for the layer."""
self.encoder_layers = []
for i in range(self.num_layers):
self.encoder_layers.append(
layers.TransformerEncoderBlock(
num_attention_heads=self.num_attention_heads,
inner_dim=self._intermediate_size,
inner_activation=self._activation,
output_dropout=self._dropout_rate,
attention_dropout=self._attention_dropout_rate,
use_bias=self._use_bias,
norm_first=self._norm_first,
norm_epsilon=self._norm_epsilon,
inner_dropout=self._intermediate_dropout,
attention_initializer=attention_initializer(input_shape[2]),
name=("layer_%d" % i)))
self.output_normalization = tf.keras.layers.LayerNormalization(
epsilon=self._norm_epsilon, dtype="float32")
super(TransformerEncoder, self).build(input_shape)
def get_config(self):
config = {
"num_layers": self.num_layers,
"num_attention_heads": self.num_attention_heads,
"intermediate_size": self._intermediate_size,
"activation": self._activation,
"dropout_rate": self._dropout_rate,
"attention_dropout_rate": self._attention_dropout_rate,
"use_bias": self._use_bias,
"norm_first": self._norm_first,
"norm_epsilon": self._norm_epsilon,
"intermediate_dropout": self._intermediate_dropout
}
base_config = super(TransformerEncoder, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, encoder_inputs, attention_mask=None):
"""Return the output of the encoder.
Args:
encoder_inputs: A tensor with shape `(batch_size, input_length,
hidden_size)`.
attention_mask: A mask for the encoder self-attention layer with shape
`(batch_size, input_length, input_length)`.
Returns:
Output of encoder which is a `float32` tensor with shape
`(batch_size, input_length, hidden_size)`.
"""
for layer_idx in range(self.num_layers):
encoder_inputs = self.encoder_layers[layer_idx](
[encoder_inputs, attention_mask])
output_tensor = encoder_inputs
output_tensor = self.output_normalization(output_tensor)
return output_tensor
class TransformerDecoder(tf.keras.layers.Layer):
"""Transformer decoder.
Like the encoder, the decoder is made up of N identical layers.
Each layer is composed of the sublayers:
1. Self-attention layer
2. Multi-headed attention layer combining encoder outputs with results from
the previous self-attention layer.
3. Feedforward network (2 fully-connected layers)
"""
def __init__(self,
num_layers=6,
num_attention_heads=8,
intermediate_size=2048,
activation="relu",
dropout_rate=0.0,
attention_dropout_rate=0.0,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
intermediate_dropout=0.0,
**kwargs):
"""Initialize a Transformer decoder.
Args:
num_layers: Number of layers.
num_attention_heads: Number of attention heads.
intermediate_size: Size of the intermediate (Feedforward) layer.
activation: Activation for the intermediate layer.
dropout_rate: Dropout probability.
attention_dropout_rate: Dropout probability for attention layers.
use_bias: Whether to enable use_bias in attention layer. If set `False`,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate
dense layers. If set `False`, output of attention and intermediate dense
layers is normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
intermediate_dropout: Dropout probability for intermediate_dropout_layer.
**kwargs: key word arguemnts passed to tf.keras.layers.Layer.
"""
super(TransformerDecoder, self).__init__(**kwargs)
self.num_layers = num_layers
self.num_attention_heads = num_attention_heads
self._intermediate_size = intermediate_size
self._activation = activation
self._dropout_rate = dropout_rate
self._attention_dropout_rate = attention_dropout_rate
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._intermediate_dropout = intermediate_dropout
def build(self, input_shape):
"""Implements build() for the layer."""
self.decoder_layers = []
for i in range(self.num_layers):
self.decoder_layers.append(
layers.TransformerDecoderBlock(
num_attention_heads=self.num_attention_heads,
intermediate_size=self._intermediate_size,
intermediate_activation=self._activation,
dropout_rate=self._dropout_rate,
attention_dropout_rate=self._attention_dropout_rate,
use_bias=self._use_bias,
norm_first=self._norm_first,
norm_epsilon=self._norm_epsilon,
intermediate_dropout=self._intermediate_dropout,
attention_initializer=attention_initializer(input_shape[2]),
name=("layer_%d" % i)))
self.output_normalization = tf.keras.layers.LayerNormalization(
epsilon=1e-6, dtype="float32")
super(TransformerDecoder, self).build(input_shape)
def get_config(self):
config = {
"num_layers": self.num_layers,
"num_attention_heads": self.num_attention_heads,
"intermediate_size": self._intermediate_size,
"activation": self._activation,
"dropout_rate": self._dropout_rate,
"attention_dropout_rate": self._attention_dropout_rate,
"use_bias": self._use_bias,
"norm_first": self._norm_first,
"norm_epsilon": self._norm_epsilon,
"intermediate_dropout": self._intermediate_dropout
}
base_config = super(TransformerDecoder, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self,
target,
memory,
self_attention_mask=None,
cross_attention_mask=None,
cache=None,
decode_loop_step=None,
return_all_decoder_outputs=False):
"""Return the output of the decoder layer stacks.
Args:
target: A tensor with shape `(batch_size, target_length, hidden_size)`.
memory: A tensor with shape `(batch_size, input_length, hidden_size)`.
self_attention_mask: A tensor with shape `(batch_size, target_len,
target_length)`, the mask for decoder self-attention layer.
cross_attention_mask: A tensor with shape `(batch_size, target_length,
input_length)` which is the mask for encoder-decoder attention layer.
cache: (Used for fast decoding) A nested dictionary storing previous
decoder self-attention values. The items are:
{layer_n: {"k": A tensor with shape `(batch_size, i, key_channels)`,
"v": A tensor with shape `(batch_size, i, value_channels)`},
...}
decode_loop_step: An integer, the step number of the decoding loop. Used
only for autoregressive inference on TPU.
return_all_decoder_outputs: Return all decoder layer outputs.
Note that the outputs are layer normed.
This is useful when introducing per layer auxiliary loss.
Returns:
Output of decoder.
float32 tensor with shape `(batch_size, target_length, hidden_size`).
"""
output_tensor = target
decoder_outputs = []
for layer_idx in range(self.num_layers):
transformer_inputs = [
output_tensor, memory, cross_attention_mask, self_attention_mask
]
# Gets the cache for decoding.
if cache is None:
output_tensor, _ = self.decoder_layers[layer_idx](transformer_inputs)
else:
cache_layer_idx = str(layer_idx)
output_tensor, cache[cache_layer_idx] = self.decoder_layers[layer_idx](
transformer_inputs,
cache=cache[cache_layer_idx],
decode_loop_step=decode_loop_step)
if return_all_decoder_outputs:
decoder_outputs.append(self.output_normalization(output_tensor))
if return_all_decoder_outputs:
return decoder_outputs
else:
return self.output_normalization(output_tensor)
def attention_initializer(hidden_size):
"""Initializer for attention layers in Seq2SeqTransformer."""
hidden_size = int(hidden_size)
limit = math.sqrt(6.0 / (hidden_size + hidden_size))
return tf.keras.initializers.RandomUniform(minval=-limit, maxval=limit)
| 25,791 | 40.333333 | 100 | py |
models | models-master/official/nlp/modeling/models/bert_token_classifier.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT token classifier."""
# pylint: disable=g-classes-have-attributes
import collections
import tensorflow as tf
@tf.keras.utils.register_keras_serializable(package='Text')
class BertTokenClassifier(tf.keras.Model):
"""Token classifier model based on a BERT-style transformer-based encoder.
This is an implementation of the network structure surrounding a transformer
encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers
for Language Understanding" (https://arxiv.org/abs/1810.04805).
The BertTokenClassifier allows a user to pass in a transformer stack, and
instantiates a token classification network based on the passed `num_classes`
argument.
*Note* that the model is constructed by
[Keras Functional API](https://keras.io/guides/functional_api/).
Args:
network: A transformer network. This network should output a sequence output
and a classification output. Furthermore, it should expose its embedding
table via a `get_embedding_table` method.
num_classes: Number of classes to predict from the classification network.
initializer: The initializer (if any) to use in the classification networks.
Defaults to a Glorot uniform initializer.
output: The output style for this network. Can be either `logits` or
`predictions`.
dropout_rate: The dropout probability of the token classification head.
output_encoder_outputs: Whether to include intermediate sequence output
in the final output.
"""
def __init__(self,
network,
num_classes,
initializer='glorot_uniform',
output='logits',
dropout_rate=0.1,
output_encoder_outputs=False,
**kwargs):
# We want to use the inputs of the passed network as the inputs to this
# Model. To do this, we need to keep a handle to the network inputs for use
# when we construct the Model object at the end of init.
inputs = network.inputs
# Because we have a copy of inputs to create this Model object, we can
# invoke the Network object with its own input tensors to start the Model.
outputs = network(inputs)
if isinstance(outputs, list):
sequence_output = outputs[0]
else:
sequence_output = outputs['sequence_output']
sequence_output = tf.keras.layers.Dropout(rate=dropout_rate)(
sequence_output)
classifier = tf.keras.layers.Dense(
num_classes,
activation=None,
kernel_initializer=initializer,
name='predictions/transform/logits')
logits = classifier(sequence_output)
if output == 'logits':
output_tensors = {'logits': logits}
elif output == 'predictions':
output_tensors = {
'predictions': tf.keras.layers.Activation(tf.nn.log_softmax)(logits)
}
else:
raise ValueError(
('Unknown `output` value "%s". `output` can be either "logits" or '
'"predictions"') % output)
if output_encoder_outputs:
output_tensors['encoder_outputs'] = sequence_output
# b/164516224
# Once we've created the network using the Functional API, we call
# super().__init__ as though we were invoking the Functional API Model
# constructor, resulting in this object having all the properties of a model
# created using the Functional API. Once super().__init__ is called, we
# can assign attributes to `self` - note that all `self` assignments are
# below this line.
super(BertTokenClassifier, self).__init__(
inputs=inputs, outputs=output_tensors, **kwargs)
self._network = network
config_dict = {
'network': network,
'num_classes': num_classes,
'initializer': initializer,
'output': output,
'output_encoder_outputs': output_encoder_outputs
}
# We are storing the config dict as a namedtuple here to ensure checkpoint
# compatibility with an earlier version of this model which did not track
# the config dict attribute. TF does not track immutable attrs which
# do not contain Trackables, so by creating a config namedtuple instead of
# a dict we avoid tracking it.
config_cls = collections.namedtuple('Config', config_dict.keys())
self._config = config_cls(**config_dict)
self.classifier = classifier
self.logits = logits
@property
def checkpoint_items(self):
return dict(encoder=self._network)
def get_config(self):
return dict(self._config._asdict())
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 5,216 | 37.932836 | 80 | py |
models | models-master/official/nlp/modeling/models/xlnet_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for XLNet classifier network."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling import networks
from official.nlp.modeling.models import xlnet
def _get_xlnet_base() -> tf.keras.layers.Layer:
"""Returns a trivial base XLNet model."""
return networks.XLNetBase(
vocab_size=100,
num_layers=2,
hidden_size=4,
num_attention_heads=2,
head_size=2,
inner_size=2,
dropout_rate=0.,
attention_dropout_rate=0.,
attention_type='bi',
bi_data=True,
initializer=tf.keras.initializers.RandomNormal(stddev=0.1),
two_stream=False,
tie_attention_biases=True,
reuse_length=0,
inner_activation='relu')
class XLNetMaskedLMTest(tf.test.TestCase):
def test_xlnet_masked_lm_head(self):
hidden_size = 10
seq_length = 8
batch_size = 2
masked_lm = xlnet.XLNetMaskedLM(vocab_size=10,
hidden_size=hidden_size,
initializer='glorot_uniform')
sequence_data = np.random.uniform(size=(batch_size, seq_length))
embedding_table = np.random.uniform(size=(hidden_size, hidden_size))
mlm_output = masked_lm(sequence_data, embedding_table)
self.assertAllClose(mlm_output.shape, (batch_size, hidden_size))
class XLNetPretrainerTest(tf.test.TestCase):
def test_xlnet_trainer(self):
"""Validates that the Keras object can be created."""
seq_length = 4
num_predictions = 2
# Build a simple XLNet based network to use with the XLNet trainer.
xlnet_base = _get_xlnet_base()
# Create an XLNet trainer with the created network.
xlnet_trainer_model = xlnet.XLNetPretrainer(network=xlnet_base)
inputs = dict(
input_word_ids=tf.keras.layers.Input(
shape=(seq_length,), dtype=tf.int32, name='input_word_ids'),
input_type_ids=tf.keras.layers.Input(
shape=(seq_length,), dtype=tf.int32, name='input_type_ids'),
input_mask=tf.keras.layers.Input(
shape=(seq_length,), dtype=tf.int32, name='input_mask'),
permutation_mask=tf.keras.layers.Input(
shape=(seq_length, seq_length,), dtype=tf.int32,
name='permutation_mask'),
target_mapping=tf.keras.layers.Input(
shape=(num_predictions, seq_length), dtype=tf.int32,
name='target_mapping'),
masked_tokens=tf.keras.layers.Input(
shape=(seq_length,), dtype=tf.int32, name='masked_tokens'))
logits, _ = xlnet_trainer_model(inputs)
# [None, hidden_size, vocab_size]
expected_output_shape = [None, 4, 100]
self.assertAllEqual(expected_output_shape, logits.shape.as_list())
def test_xlnet_tensor_call(self):
"""Validates that the Keras object can be invoked."""
seq_length = 4
batch_size = 2
num_predictions = 2
# Build a simple XLNet based network to use with the XLNet trainer.
xlnet_base = _get_xlnet_base()
# Create an XLNet trainer with the created network.
xlnet_trainer_model = xlnet.XLNetPretrainer(network=xlnet_base)
sequence_shape = (batch_size, seq_length)
inputs = dict(
input_word_ids=np.random.randint(
10, size=sequence_shape, dtype='int32'),
input_type_ids=np.random.randint(2, size=sequence_shape, dtype='int32'),
input_mask=np.random.randint(2, size=sequence_shape).astype('int32'),
permutation_mask=np.random.randint(
2, size=(batch_size, seq_length, seq_length)).astype('int32'),
target_mapping=np.random.randint(
10, size=(num_predictions, seq_length), dtype='int32'),
masked_tokens=np.random.randint(
10, size=sequence_shape, dtype='int32'))
xlnet_trainer_model(inputs)
def test_serialize_deserialize(self):
"""Validates that the XLNet trainer can be serialized and deserialized."""
# Build a simple XLNet based network to use with the XLNet trainer.
xlnet_base = _get_xlnet_base()
# Create an XLNet trainer with the created network.
xlnet_trainer_model = xlnet.XLNetPretrainer(
network=xlnet_base,
mlm_activation='gelu',
mlm_initializer='random_normal')
# Create another XLNet trainer via serialization and deserialization.
config = xlnet_trainer_model.get_config()
new_xlnet_trainer_model = xlnet.XLNetPretrainer.from_config(
config)
# Validate that the config can be forced to JSON.
_ = new_xlnet_trainer_model.to_json()
# If serialization was successful, then the new config should match the old.
self.assertAllEqual(xlnet_trainer_model.get_config(),
new_xlnet_trainer_model.get_config())
class XLNetClassifierTest(tf.test.TestCase, parameterized.TestCase):
def test_xlnet_trainer(self):
"""Validate that the Keras object can be created."""
num_classes = 2
seq_length = 4
# Build a simple XLNet based network to use with the XLNet trainer.
xlnet_base = _get_xlnet_base()
# Create an XLNet trainer with the created network.
xlnet_trainer_model = xlnet.XLNetClassifier(
network=xlnet_base,
num_classes=num_classes,
initializer=tf.keras.initializers.RandomNormal(stddev=0.1),
summary_type='last',
dropout_rate=0.1)
inputs = dict(
input_word_ids=tf.keras.layers.Input(
shape=(seq_length,), dtype=tf.int32, name='input_word_ids'),
input_type_ids=tf.keras.layers.Input(
shape=(seq_length,), dtype=tf.int32, name='input_type_ids'),
input_mask=tf.keras.layers.Input(
shape=(seq_length,), dtype=tf.int32, name='input_mask'),
permutation_mask=tf.keras.layers.Input(
shape=(seq_length, seq_length,), dtype=tf.int32,
name='permutation_mask'),
masked_tokens=tf.keras.layers.Input(
shape=(seq_length,), dtype=tf.int32, name='masked_tokens'))
logits = xlnet_trainer_model(inputs)
expected_classification_shape = [None, num_classes]
self.assertAllEqual(expected_classification_shape, logits.shape.as_list())
@parameterized.parameters(1, 2)
def test_xlnet_tensor_call(self, num_classes):
"""Validates that the Keras object can be invoked."""
seq_length = 4
batch_size = 2
# Build a simple XLNet based network to use with the XLNet trainer.
xlnet_base = _get_xlnet_base()
# Create an XLNet trainer with the created network.
xlnet_trainer_model = xlnet.XLNetClassifier(
network=xlnet_base,
num_classes=num_classes,
initializer=tf.keras.initializers.RandomNormal(stddev=0.1),
summary_type='last',
dropout_rate=0.1)
sequence_shape = (batch_size, seq_length)
inputs = dict(
input_word_ids=np.random.randint(
10, size=sequence_shape, dtype='int32'),
input_type_ids=np.random.randint(2, size=sequence_shape, dtype='int32'),
input_mask=np.random.randint(2, size=sequence_shape).astype('int32'),
permutation_mask=np.random.randint(
2, size=(batch_size, seq_length, seq_length)).astype('int32'),
masked_tokens=np.random.randint(
10, size=sequence_shape, dtype='int32'))
xlnet_trainer_model(inputs)
def test_serialize_deserialize(self):
"""Validates that the XLNet trainer can be serialized and deserialized."""
# Build a simple XLNet based network to use with the XLNet trainer.
xlnet_base = _get_xlnet_base()
# Create an XLNet trainer with the created network.
xlnet_trainer_model = xlnet.XLNetClassifier(
network=xlnet_base,
num_classes=2,
initializer=tf.keras.initializers.RandomNormal(stddev=0.1),
summary_type='last',
dropout_rate=0.1)
# Create another XLNet trainer via serialization and deserialization.
config = xlnet_trainer_model.get_config()
new_xlnet_trainer_model = xlnet.XLNetClassifier.from_config(
config)
# Validate that the config can be forced to JSON.
_ = new_xlnet_trainer_model.to_json()
# If serialization was successful, then the new config should match the old.
self.assertAllEqual(xlnet_trainer_model.get_config(),
new_xlnet_trainer_model.get_config())
class XLNetSpanLabelerTest(tf.test.TestCase):
def test_xlnet_trainer(self):
"""Validate that the Keras object can be created."""
top_n = 2
seq_length = 4
# Build a simple XLNet based network to use with the XLNet trainer.
xlnet_base = _get_xlnet_base()
# Create an XLNet trainer with the created network.
xlnet_trainer_model = xlnet.XLNetSpanLabeler(
network=xlnet_base,
start_n_top=top_n,
end_n_top=top_n,
initializer=tf.keras.initializers.RandomNormal(stddev=0.1),
span_labeling_activation='tanh',
dropout_rate=0.1)
inputs = dict(
input_word_ids=tf.keras.layers.Input(
shape=(seq_length,), dtype=tf.int32, name='input_word_ids'),
input_type_ids=tf.keras.layers.Input(
shape=(seq_length,), dtype=tf.int32, name='input_type_ids'),
input_mask=tf.keras.layers.Input(
shape=(seq_length,), dtype=tf.int32, name='input_mask'),
paragraph_mask=tf.keras.layers.Input(
shape=(seq_length,), dtype=tf.int32, name='paragraph_mask'),
class_index=tf.keras.layers.Input(
shape=(), dtype=tf.int32, name='class_index'),
start_positions=tf.keras.layers.Input(
shape=(), dtype=tf.int32, name='start_positions'))
outputs = xlnet_trainer_model(inputs)
self.assertIsInstance(outputs, dict)
# Test tensor value calls for the created model.
batch_size = 2
sequence_shape = (batch_size, seq_length)
inputs = dict(
input_word_ids=np.random.randint(
10, size=sequence_shape, dtype='int32'),
input_type_ids=np.random.randint(2, size=sequence_shape, dtype='int32'),
input_mask=np.random.randint(2, size=sequence_shape).astype('int32'),
paragraph_mask=np.random.randint(
1, size=(sequence_shape)).astype('int32'),
class_index=np.random.randint(1, size=(batch_size)).astype('uint8'),
start_positions=tf.random.uniform(
shape=(batch_size,), maxval=5, dtype=tf.int32))
common_keys = {
'start_logits', 'end_logits', 'start_predictions', 'end_predictions',
'class_logits',
}
inference_keys = {
'start_top_predictions', 'end_top_predictions', 'start_top_index',
'end_top_index',
}
outputs = xlnet_trainer_model(inputs)
self.assertSetEqual(common_keys | inference_keys, set(outputs.keys()))
outputs = xlnet_trainer_model(inputs, training=True)
self.assertIsInstance(outputs, dict)
self.assertSetEqual(common_keys, set(outputs.keys()))
self.assertIsInstance(outputs, dict)
def test_serialize_deserialize(self):
"""Validates that the XLNet trainer can be serialized and deserialized."""
# Build a simple XLNet based network to use with the XLNet trainer.
xlnet_base = _get_xlnet_base()
# Create an XLNet trainer with the created network.
xlnet_trainer_model = xlnet.XLNetSpanLabeler(
network=xlnet_base,
start_n_top=2,
end_n_top=2,
initializer=tf.keras.initializers.RandomNormal(stddev=0.1),
span_labeling_activation='tanh',
dropout_rate=0.1)
# Create another XLNet trainer via serialization and deserialization.
config = xlnet_trainer_model.get_config()
new_xlnet_trainer_model = xlnet.XLNetSpanLabeler.from_config(
config)
# Validate that the config can be forced to JSON.
_ = new_xlnet_trainer_model.to_json()
# If serialization was successful, then the new config should match the old.
self.assertAllEqual(xlnet_trainer_model.get_config(),
new_xlnet_trainer_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 12,608 | 38.158385 | 80 | py |
models | models-master/official/nlp/modeling/models/t5.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implement T5 Transformer model by TF official NLP library.
Model paper: https://arxiv.org/pdf/1910.10683.pdf
T5TransformerParams and T5Transformer are public interfaces.
Other modules are implementation details, so users should never build libraries
depending on them.
To use with Keras, users can wrap them within Keras customized layers.
"""
import dataclasses
import functools
import math
from typing import Callable, Dict, Optional, Sequence, Text, Union
import numpy as np
import tensorflow as tf
from official.modeling import tf_utils
ShapeLike = Union[int, Sequence[int], tf.TensorShape]
Initializer = Callable[..., tf.Tensor]
class Module(tf.Module):
"""The nn Module extends from the tf.Module."""
def __init__(self, dtype: tf.DType = tf.float32, name: Optional[Text] = None):
"""Initializes the nn Module.
Args:
dtype: the variable allocation dtype.
name: a string for the module name.
"""
super().__init__(name=name)
self.dtype = dtype
def create_variable(self,
name: Text,
shape: ShapeLike,
initializer: Initializer,
dtype: tf.DType = tf.float32,
**kwargs):
initializer = tf_utils.clone_initializer(initializer)
return tf.Variable(initializer(shape, dtype=dtype, **kwargs), name=name)
def read_variable(self,
variable: tf.Variable,
as_dtype: Optional[tf.DType] = None):
if as_dtype is not None:
variable = tf.cast(variable, dtype=as_dtype)
return variable
@tf.custom_gradient
def dense_gradient(x: tf.Tensor):
"""Identity operation whose gradient is converted to a ``tf.Tensor``.
>>> embedding = tf.Variable(tf.random.normal([3, 3]))
>>> with tf.GradientTape() as tape:
... y = tf.nn.embedding_lookup(dense_gradient(embedding), [1])
>>> tape.gradient(y, embedding).numpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.],
[ 0., 0., 0.]], dtype=float32)
Args:
x: A ``tf.Tensor``.
Returns:
The input ``tf.Tensor`` and a dense identity gradient function.
"""
def grad(dy):
if isinstance(dy, tf.IndexedSlices):
return tf.convert_to_tensor(dy)
else:
return dy
return x, grad
def make_attention_mask(query_input,
key_input,
pairwise_fn=tf.multiply,
dtype=tf.float32):
"""Mask-making helper for attention weights.
In case of 1d inputs (i.e., `[batch..., len_q]`, `[batch..., len_kv]`, the
attention weights will be `[batch..., heads, len_q, len_kv]` and this
function will produce `[batch..., 1, len_q, len_kv]`.
Args:
query_input: a batched, flat input of query_length size
key_input: a batched, flat input of key_length size
pairwise_fn: broadcasting elementwise comparison function
dtype: mask return dtype
Returns:
A `[batch..., 1, len_q, len_kv]` shaped mask for 1d attention.
"""
mask = pairwise_fn(
tf.expand_dims(query_input, axis=-1), tf.expand_dims(key_input, axis=-2))
mask = tf.expand_dims(mask, axis=-3)
return tf.cast(mask, dtype=dtype)
def make_causal_mask(x, dtype=tf.float32):
"""Make a causal mask for self-attention.
In case of 1d inputs (i.e., `[batch..., len]`, the self-attention weights
will be `[batch..., heads, len, len]` and this function will produce a
causal mask of shape `[batch..., 1, len, len]`.
Args:
x: input array of shape `[batch..., len]`
dtype: mask return dtype
Returns:
A `[batch..., 1, len, len]` shaped causal mask for 1d attention.
"""
x_shape = tf.shape(x)
idxs = tf.broadcast_to(tf.range(x_shape[-1], dtype=tf.int32), x_shape)
return make_attention_mask(idxs, idxs, tf.greater_equal, dtype=dtype)
class Embed(Module):
"""Embedding Module.
A parameterized function from integers [0, n) to d-dimensional vectors.
"""
def __init__(self,
vocab_size: int,
features: int,
embeddings_initializer: Optional[Initializer] = None,
compute_dtype: tf.DType = tf.float32,
**kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.features = features
self.compute_dtype = compute_dtype
if embeddings_initializer:
self.embed_init = embeddings_initializer
else:
self.embed_init = tf.keras.initializers.TruncatedNormal(stddev=1.0)
with self.name_scope:
self.embeddings = self.create_variable(
"embedding", [self.vocab_size, self.features],
self.embed_init,
dtype=self.dtype)
@tf.Module.with_name_scope
def __call__(self, inputs: tf.Tensor, one_hot: bool = True):
"""Embeds the inputs along the last dimension.
Args:
inputs: input data, the last dimension is to embed.
one_hot: whether to use one-hot matmul to gather embeddings.
Returns:
The output shape follows the input, with an additional `features`
dimension appended.
"""
if one_hot:
flat_inputs = tf.reshape(inputs, [-1])
one_hot_data = tf.one_hot(
flat_inputs, depth=self.vocab_size, dtype=self.compute_dtype)
embeddings = tf.matmul(
one_hot_data,
self.read_variable(self.embeddings, as_dtype=self.compute_dtype))
input_shape = tf_utils.get_shape_list(inputs)
embeddings = tf.reshape(embeddings, input_shape + [self.features])
return embeddings
else:
return tf.nn.embedding_lookup(
dense_gradient(
self.read_variable(self.embeddings, as_dtype=self.compute_dtype)),
inputs)
def attend(self, query):
"""Attends over the embedding using a query tensor.
Args:
query: array with last dimension equal the feature depth `features` of the
embedding.
Returns:
An tensor with final dim `num_embeddings` corresponding to the batched
inner-product of the array of query vectors against each embedding.
Commonly used for weight-sharing between embeddings and logit transform
in NLP models.
"""
return tf.matmul(
query,
self.read_variable(self.embeddings, as_dtype=query.dtype),
transpose_b=True)
class RMSNorm(Module):
"""A layernorm module in the T5 style.
No bias and no subtraction of mean.
"""
def __init__(self, hidden_size: int, epsilon: float = 1e-6, **kwargs):
super().__init__(**kwargs)
self.variance_epsilon = epsilon
with self.name_scope:
self.weight = self.create_variable(
"scale", [hidden_size],
dtype=self.dtype,
initializer=tf.keras.initializers.Ones())
@tf.Module.with_name_scope
def __call__(self, x):
# Keeps the computation inside the layer norm to be float32.
compute_dtype = x.dtype
x = tf.cast(x, dtype=tf.float32)
variance = tf.math.reduce_mean(tf.math.square(x), axis=-1, keepdims=True)
x = x * tf.math.rsqrt(variance + self.variance_epsilon)
x = tf.cast(x, dtype=compute_dtype)
return self.read_variable(self.weight, as_dtype=compute_dtype) * x
class Linear(Module):
"""Linear module, optionally including bias."""
def __init__(self,
in_features: int,
out_features: int,
use_bias: bool = True,
w_init: Optional[Initializer] = None,
b_init: Optional[Initializer] = None,
**kwargs):
"""Constructs a `Linear` module."""
super().__init__(**kwargs)
self.in_features = in_features
self.out_features = out_features
self.use_bias = use_bias
self.w_init = w_init
if self.use_bias:
self.b_init = b_init if b_init else tf.keras.initializers.Zeros()
elif b_init is not None:
raise ValueError("When not using a bias the b_init must be None.")
with self.name_scope:
if self.w_init is None:
stddev = 1 / math.sqrt(self.in_features)
self.w_init = tf.keras.initializers.HeNormal()
self.w = self.create_variable(
"kernel", [self.in_features, self.out_features],
initializer=self.w_init,
dtype=self.dtype)
if self.use_bias:
self.b = self.create_variable(
"bias", [self.out_features],
initializer=self.b_init,
dtype=self.dtype)
@tf.Module.with_name_scope
def __call__(self, inputs: tf.Tensor) -> tf.Tensor:
outputs = tf.matmul(inputs,
self.read_variable(self.w, as_dtype=inputs.dtype))
if self.use_bias:
outputs = tf.add(outputs,
self.read_variable(self.b, as_dtype=inputs.dtype))
return outputs
class Linear3D(Module):
"""Linear3D module, optionally including bias.
Kernel stored as 2d parameter for compatibility with Adafactor optimizer.
"""
def __init__(self,
in_features: int,
out_features: int,
num_heads: int,
use_bias: bool = True,
to_3d: bool = True,
w_init: Optional[Initializer] = None,
b_init: Optional[Initializer] = None,
**kwargs):
"""Constructs a `Linear3D` module."""
super().__init__(**kwargs)
self.in_features = in_features
self.out_features = out_features
self.num_heads = num_heads
self.use_bias = use_bias
self.to_3d = to_3d
self.w_init = w_init
if self.to_3d:
self.kernel_2d_shape = (self.in_features,
self.num_heads * self.out_features)
self.kernel_3d_shape = (self.in_features, self.num_heads,
self.out_features)
self.bias_shape = (self.num_heads, self.out_features)
bias_rank = 2
else:
self.kernel_2d_shape = (self.in_features * self.num_heads,
self.out_features)
self.kernel_3d_shape = (self.num_heads, self.in_features,
self.out_features)
self.bias_shape = (self.out_features,)
bias_rank = 1
if self.use_bias:
self.b_init = b_init or tf.keras.initializers.Zeros()
elif b_init is not None:
raise ValueError("When not using a bias the b_init must be None.")
with self.name_scope:
if self.w_init is None:
self.w_init = tf.keras.initializers.HeNormal()
self.w = self.create_variable(
"kernel",
self.kernel_2d_shape,
initializer=self.w_init,
dtype=self.dtype)
if self.use_bias:
self.b = self.create_variable(
"bias", self.bias_shape, initializer=self.b_init, dtype=self.dtype)
@tf.Module.with_name_scope
def __call__(self, inputs: tf.Tensor) -> tf.Tensor:
# B: batch size
# S: From Sequence length
# D: dimension
# N: Number of heads
# H: head size
compute_dtype = inputs.dtype
w = self.read_variable(self.w, as_dtype=compute_dtype)
w = tf.reshape(w, self.kernel_3d_shape)
if self.to_3d:
outputs = tf.einsum("BSD,DNH->BSNH", inputs, w)
else:
outputs = tf.einsum("BSNH,NHD->BSD", inputs, w)
if self.use_bias:
outputs = tf.add(outputs,
self.read_variable(self.b, as_dtype=compute_dtype))
return outputs
class Dropout(Module):
"""Randomly drop units in the input at a given rate."""
def __init__(self, rate: float, **kwargs):
"""Constructs a Dropout module.
Args:
rate: Probability that each element of x is discarded. Must be a scalar in
the range `[0, 1)`.
**kwargs: other keyword args.
"""
super().__init__(**kwargs)
self._rate = rate
@tf.Module.with_name_scope
def __call__(self,
x: tf.Tensor,
training: bool,
noise_shape: Optional[ShapeLike] = None) -> tf.Tensor:
"""call method for the Dropout module.
Args:
x: the input tensor.
training: whether it is performing training pass.
noise_shape: (Optional) Shape vector controlling the shape of the random
noise used to apply dropout. If not set this will be the shape of the
input. If set it should be broadcastable to the input shape.
Returns:
A tensor after applying dropout.
"""
if not training:
return x
return tf.nn.dropout(x, rate=self._rate, noise_shape=noise_shape)
class FFN(Module):
"""Feed-forward Network. No layer norm, output dropout, or skip connection."""
activation_map = {
"relu": tf.nn.relu,
"gelu": functools.partial(tf.nn.gelu, approximate=True),
"swish": tf.nn.silu,
"silu": tf.nn.silu,
}
def __init__(self,
d_model: int,
d_ff: int,
activations: Sequence[str],
use_bias: bool = False,
dropout_rate: Optional[float] = 0.0,
layer_norm_epsilon: Optional[float] = 1e-6,
weight_initializer: Optional[Initializer] = None,
bias_initializer: Optional[Initializer] = None,
**kwargs):
super().__init__(**kwargs)
self.use_bias = use_bias
with self.name_scope:
self.wi = []
self.activations = activations
for idx, act_fn in enumerate(activations):
if (act_fn is not None and act_fn != "linear" and
act_fn not in self.activation_map):
raise ValueError("Invalid activation function string is passed: %s" %
act_fn)
dense_name = "wi" if len(activations) == 1 else f"wi_{idx}"
self.wi.append(
Linear(
d_model,
d_ff,
use_bias=self.use_bias,
w_init=weight_initializer,
b_init=bias_initializer,
dtype=self.dtype,
name=dense_name))
self.wo = Linear(
d_ff,
d_model,
use_bias=self.use_bias,
w_init=weight_initializer,
b_init=bias_initializer,
dtype=self.dtype,
name="wo")
self.dropout = Dropout(rate=dropout_rate)
@tf.Module.with_name_scope
def __call__(self,
hidden_states: tf.Tensor,
training: bool = False) -> tf.Tensor:
h = hidden_states
factors = []
for wi, act_fn in zip(self.wi, self.activations):
if act_fn is None or act_fn == "linear":
factors.append(wi(h))
else:
factors.append(self.activation_map[act_fn](wi(h)))
h = functools.reduce(tf.math.multiply, factors)
h_shape = tf_utils.get_shape_list(h)
h_shape[-2] = 1
h = self.dropout(h, noise_shape=h_shape, training=training)
h = self.wo(h)
return h
class RelativePositionEmbedding(Module):
"""Relative position embeddings of T5 style."""
def __init__(self,
num_heads: int,
relative_attention_num_buckets: int = 32,
relative_attention_max_distance: int = 128,
bidirectional: bool = True,
embeddings_initializer: Optional[Initializer] = None,
compute_dtype: tf.DType = tf.float32,
**kwargs):
super().__init__(**kwargs)
self.num_heads = num_heads
self.relative_attention_num_buckets = relative_attention_num_buckets
self.bidirectional = bidirectional
self.relative_attention_max_distance = relative_attention_max_distance
with self.name_scope:
self.relative_attention_bias = Embed(
vocab_size=self.relative_attention_num_buckets,
features=self.num_heads,
embeddings_initializer=embeddings_initializer,
dtype=self.dtype,
compute_dtype=compute_dtype,
name="rel_embedding")
@staticmethod
def _relative_position_bucket(relative_position,
bidirectional=True,
num_buckets=32,
max_distance=128):
"""Translate relative position to a bucket number for relative attention.
The relative position is defined as memory_position - query_position, i.e.
the distance in tokens from the attending position to the attended-to
position.
If bidirectional=False, then positive relative positions are invalid.
We use smaller buckets for small absolute relative_position and larger
buckets for larger absolute relative_positions.
All relative positions >=max_distance map to the same bucket.
All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences
than the model has been trained on.
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32
values in the range [0, num_buckets)
"""
ret = 0
n = -relative_position
if bidirectional:
num_buckets //= 2
ret += tf.cast(tf.math.less(n, 0), tf.int32) * num_buckets
n = tf.math.abs(n)
else:
n = tf.math.maximum(n, 0)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = tf.math.less(n, max_exact)
val_if_large = max_exact + tf.dtypes.cast(
tf.math.log(
tf.cast(n, tf.float32) / max_exact + np.finfo(np.float32).eps) /
math.log(max_distance / max_exact) * (num_buckets - max_exact),
tf.int32,
)
val_if_large = tf.math.minimum(val_if_large, num_buckets - 1)
ret += tf.where(is_small, n, val_if_large)
return ret
@tf.Module.with_name_scope
def __call__(self, qlen, klen):
context_position = tf.range(qlen)[:, None]
memory_position = tf.range(klen)[None, :]
relative_position = memory_position - context_position # shape (qlen, klen)
rp_bucket = self._relative_position_bucket(
relative_position,
bidirectional=self.bidirectional,
num_buckets=self.relative_attention_num_buckets,
max_distance=self.relative_attention_max_distance)
values = self.relative_attention_bias(rp_bucket)
values = tf.expand_dims(
tf.transpose(values, [2, 0, 1]),
axis=0) # shape (1, num_heads, qlen, klen)
return values
class MultiHeadAttention(Module):
"""T5 Attention from Mesh TensorFlow."""
def __init__(self,
d_model: int,
d_kv: int,
num_heads: int,
use_bias: bool = False,
dropout_rate: Optional[float] = 0.0,
rescale_query: bool = False,
weight_initializer: Optional[Initializer] = None,
bias_initializer: Optional[Initializer] = None,
**kwargs):
super().__init__(**kwargs)
with self.name_scope:
self.d_model = d_model
self.d_kv = d_kv
self.num_heads = num_heads
self.rescale_query = rescale_query
self.use_bias = use_bias
if rescale_query or weight_initializer is None:
query_w_init = weight_initializer
else:
init_std_rescaling = tf.math.sqrt(tf.cast(self.d_kv, dtype=self.dtype))
query_w_init = (
lambda *args, **kwargs: ( # pylint: disable=g-long-lambda
tf_utils.clone_initializer(weight_initializer)
(*args, **kwargs) / init_std_rescaling))
self.q = Linear3D(
self.d_model,
self.d_kv,
num_heads=self.num_heads,
use_bias=self.use_bias,
w_init=query_w_init,
b_init=bias_initializer,
dtype=self.dtype,
name="q")
self.k = Linear3D(
self.d_model,
self.d_kv,
num_heads=self.num_heads,
use_bias=self.use_bias,
w_init=weight_initializer,
b_init=bias_initializer,
dtype=self.dtype,
name="k")
self.v = Linear3D(
self.d_model,
self.d_kv,
num_heads=self.num_heads,
use_bias=self.use_bias,
w_init=weight_initializer,
b_init=bias_initializer,
dtype=self.dtype,
name="v")
self.o = Linear3D(
self.d_kv,
self.d_model,
num_heads=self.num_heads,
use_bias=self.use_bias,
to_3d=False,
w_init=weight_initializer,
b_init=bias_initializer,
dtype=self.dtype,
name="o")
self.dropout = Dropout(dropout_rate)
def _update_cache(self, key, value, cache, decode_position):
"""Updates cache states and gets full-length key/value tensors."""
# Combines cached keys and values with new keys and values.
# TPU one-hot handling.
key_seq_dim = cache["key"].shape.as_list()[1]
indices = tf.reshape(
tf.one_hot(decode_position, key_seq_dim, dtype=key.dtype),
[1, key_seq_dim, 1, 1])
key = cache["key"] + key * indices
value_seq_dim = cache["value"].shape.as_list()[1]
indices = tf.reshape(
tf.one_hot(decode_position, value_seq_dim, dtype=value.dtype),
[1, value_seq_dim, 1, 1])
value = cache["value"] + value * indices
# Update cache
cache["key"] = key
cache["value"] = value
return key, value
@tf.Module.with_name_scope
def __call__(self,
query,
mask=None,
kv=None,
position_bias=None,
cache: Optional[Dict[str, tf.Tensor]] = None,
decode_position=None,
training=False):
"""MultiHeadAttention at work.
Args:
query: Tensor of shape (bs, qlen, d_model).
mask: None or Tensor of shape (bs, n_heads, qlen, klen).
kv: None or Tensor of shape (bs, klen, d_model).
position_bias: None or Tensor of shape (bs, n_heads, qlen, klen).
cache: If not None, cache["key"] and cache["value"] are Tensors of shape
(bs, klen, n_heads, d_kv).
decode_position: If not None, which position of the sequence we are
decoding for. Ranges from 0 to klen - 1.
training: Effects the behavior of dropout.
Returns:
A dictionary, output["context"] is the output after attention,
output["cache"] contains updated cache for the next round of
autoregressive decoding.
"""
# Input is (bs, qlen, d_model)
use_cache = cache is not None
if kv is None:
kv = query
q = self.q(query)
if self.rescale_query:
q /= tf.math.sqrt(tf.cast(self.d_kv, dtype=q.dtype))
k = self.k(kv)
v = self.v(kv)
if use_cache:
k, v = self._update_cache(k, v, cache, decode_position)
# NOTE: T5 does not explicitly rescale the attention logits by
# 1/sqrt(q_dim)! This is folded into the initializers of the
# linear transformations, which is equivalent under Adafactor.
scores = tf.einsum("bqnd,bknd->bnqk", q, k) # (bs, n_heads, qlen, klen)
if position_bias is not None:
# If position_bias is None, the input embedings should already include
# position embeddings.
if use_cache:
bias_shape = position_bias.shape.as_list()
position_bias = tf.slice(
position_bias, [0, 0, decode_position, 0],
[bias_shape[0], bias_shape[1], 1, bias_shape[3]])
scores += position_bias
if mask is not None:
scores += mask # (bs, n_heads, qlen, klen)
weights = tf.nn.softmax(tf.cast(scores, tf.float32), axis=-1)
# weights shape = (bs, n_heads, qlen, klen)
weights = tf.cast(weights, scores.dtype)
weight_shape = tf_utils.get_shape_list(weights)
# NOTE: T5 broadcasts along the "length" dim, but unclear which one that
# corresponds to. We assume it is the query dimension.
# (bs, n_heads, qlen, klen)
weight_shape[-2] = 1
weights = self.dropout(weights, training=training, noise_shape=weight_shape)
c = tf.einsum("bnqk,bknd->bqnd", weights, v)
c = self.o(c)
outputs = dict(context=c)
if cache:
outputs["cache"] = cache
return outputs
class SelfAttention(Module):
"""Self attention block including residual connection."""
def __init__(self,
d_model: int,
d_kv: int,
num_heads: int,
dropout_rate: Optional[float] = 0.0,
layer_norm_epsilon: Optional[float] = 1e-6,
rescale_query: bool = False,
weight_initializer: Optional[Initializer] = None,
bias_initializer: Optional[Initializer] = None,
**kwargs):
super().__init__(**kwargs)
with self.name_scope:
self.self_attention = MultiHeadAttention(
d_model=d_model,
d_kv=d_kv,
num_heads=num_heads,
dropout_rate=dropout_rate,
rescale_query=rescale_query,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self.dtype,
name="attention")
self.layer_norm = RMSNorm(
hidden_size=d_model,
epsilon=layer_norm_epsilon,
dtype=self.dtype,
name="layer_norm")
self.dropout = Dropout(dropout_rate)
@tf.Module.with_name_scope
def __call__(self,
hidden_states,
attention_mask=None,
position_bias=None,
cache=None,
decode_position=None,
training=False):
norm_x = self.layer_norm(hidden_states)
attention_outputs = self.self_attention(
query=norm_x,
mask=attention_mask,
position_bias=position_bias,
cache=cache,
decode_position=decode_position,
training=training)
y = attention_outputs.pop("context")
tensor_shape = tf_utils.get_shape_list(y)
tensor_shape[-2] = 1
y = self.dropout(y, noise_shape=tensor_shape, training=training)
layer_output = hidden_states + y
attention_outputs["layer_output"] = layer_output
return attention_outputs
class CrossAttention(Module):
"""Cross attention block including residual connection."""
def __init__(self,
d_model: int,
d_kv: int,
num_heads: int,
dropout_rate: Optional[float] = 0.0,
layer_norm_epsilon: Optional[float] = 1e-6,
rescale_query: bool = False,
weight_initializer: Optional[Initializer] = None,
bias_initializer: Optional[Initializer] = None,
**kwargs):
super().__init__(**kwargs)
with self.name_scope:
self.cross_attention = MultiHeadAttention(
d_model=d_model,
d_kv=d_kv,
num_heads=num_heads,
dropout_rate=dropout_rate,
rescale_query=rescale_query,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self.dtype,
name="attention")
self.layer_norm = RMSNorm(
hidden_size=d_model,
epsilon=layer_norm_epsilon,
dtype=self.dtype,
name="layer_norm")
self.dropout = Dropout(dropout_rate)
@tf.Module.with_name_scope
def __call__(self,
hidden_states,
kv,
attention_mask=None,
position_bias=None,
cache=None,
training=False):
norm_x = self.layer_norm(hidden_states)
attention_outputs = self.cross_attention(
query=norm_x,
kv=kv,
mask=attention_mask,
position_bias=position_bias,
cache=cache,
training=training)
y = attention_outputs.pop("context")
tensor_shape = tf_utils.get_shape_list(y)
tensor_shape[-2] = 1
y = self.dropout(y, noise_shape=tensor_shape, training=training)
layer_output = hidden_states + y
attention_outputs["layer_output"] = layer_output
return attention_outputs
class EncoderBlock(Module):
"""Transformer Encoder Block with only self attention."""
def __init__(self,
d_model: int,
d_kv: int,
num_heads: int,
d_ff: int,
ffn_activations: Sequence[str] = ("relu",),
dropout_rate: Optional[float] = 0.0,
layer_norm_epsilon: Optional[float] = 1e-6,
rescale_query: bool = False,
weight_initializer: Optional[Initializer] = None,
bias_initializer: Optional[Initializer] = None,
**kwargs):
super().__init__(**kwargs)
with self.name_scope:
self.self_attention = SelfAttention(
d_model=d_model,
d_kv=d_kv,
num_heads=num_heads,
dropout_rate=dropout_rate,
rescale_query=rescale_query,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self.dtype,
name="self_attention")
self.ffn_layer_norm = RMSNorm(
hidden_size=d_model,
epsilon=layer_norm_epsilon,
dtype=self.dtype,
name="ffn_layer_norm")
self.ffn = FFN(
d_model=d_model,
d_ff=d_ff,
dropout_rate=dropout_rate,
activations=ffn_activations,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self.dtype,
name="ffn")
self.ffn_output_dropout = Dropout(dropout_rate)
@tf.Module.with_name_scope
def __call__(self,
hidden_states,
attention_mask=None,
position_bias=None,
training=False):
attention_outputs = self.self_attention(
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
training=training)
attn_output = attention_outputs["layer_output"]
ffn_output = self.ffn_layer_norm(attn_output)
ffn_output = self.ffn(ffn_output, training=training)
tensor_shape = tf_utils.get_shape_list(ffn_output)
tensor_shape[-2] = 1
ffn_output = self.ffn_output_dropout(
ffn_output, noise_shape=tensor_shape, training=training)
ffn_output = attn_output + ffn_output
return ffn_output
class EncDecoderBlock(Module):
"""Transformer Decoder Block with enc-decoder cross attention."""
def __init__(self,
d_model: int,
d_kv: int,
num_heads: int,
d_ff: int,
ffn_activations: Sequence[str] = ("relu",),
dropout_rate: Optional[float] = 0.0,
layer_norm_epsilon: Optional[float] = 1e-6,
rescale_query: bool = False,
weight_initializer: Optional[Initializer] = None,
bias_initializer: Optional[Initializer] = None,
**kwargs):
super().__init__(**kwargs)
with self.name_scope:
self.self_attention = SelfAttention(
d_model=d_model,
d_kv=d_kv,
num_heads=num_heads,
dropout_rate=dropout_rate,
rescale_query=rescale_query,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self.dtype,
name="self_attention")
self.cross_attention = CrossAttention(
d_model=d_model,
d_kv=d_kv,
num_heads=num_heads,
dropout_rate=dropout_rate,
rescale_query=rescale_query,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self.dtype,
name="cross_attention")
self.ffn_layer_norm = RMSNorm(
hidden_size=d_model,
epsilon=layer_norm_epsilon,
dtype=self.dtype,
name="ffn_layer_norm")
self.ffn = FFN(
d_model=d_model,
d_ff=d_ff,
dropout_rate=dropout_rate,
activations=ffn_activations,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
dtype=self.dtype,
name="ffn")
self.ffn_output_dropout = Dropout(dropout_rate,)
@tf.Module.with_name_scope
def __call__(self,
hidden_states,
encoder_hidden_states,
attention_mask=None,
encoder_decoder_mask=None,
position_bias=None,
cache=None,
decode_position=None,
training=False):
self_attention_outputs = self.self_attention(
hidden_states,
attention_mask=attention_mask,
decode_position=decode_position,
position_bias=position_bias,
cache=cache,
training=training)
if "cache" in self_attention_outputs:
cache = self_attention_outputs["cache"]
# No relative position bias is used for encoder-decoder cross attention.
cross_attention_outputs = self.cross_attention(
self_attention_outputs["layer_output"],
kv=encoder_hidden_states,
attention_mask=encoder_decoder_mask,
training=training)
attn_output = cross_attention_outputs["layer_output"]
ffn_output = self.ffn_layer_norm(attn_output)
ffn_output = self.ffn(ffn_output, training=training)
tensor_shape = tf_utils.get_shape_list(ffn_output)
tensor_shape[-2] = 1
ffn_output = self.ffn_output_dropout(
ffn_output, noise_shape=tensor_shape, training=training)
ffn_output = attn_output + ffn_output
return ffn_output, cache
@dataclasses.dataclass
class T5TransformerParams:
"""Transformer parameters."""
num_layers: int
d_model: int
d_kv: int
num_heads: int
d_ff: int
vocab_size: int
target_vocab_size: Optional[int] = None
dropout_rate: float = 0.0
layer_norm_epsilon: float = 1e-6
shared_embedding: bool = False
vocab_embeddings_initializer: Optional[Initializer] = None
relative_attention_num_buckets: int = 32
relative_attention_max_distance: int = 128
relative_embeddings_initializer: Optional[Initializer] = None
weight_initializer: Optional[Initializer] = (tf.keras.initializers.HeNormal())
bias_initializer: Optional[Initializer] = None
rescale_query: bool = False
bidirectional: bool = True
ffn_activations: Sequence[str] = ("relu",)
logits_via_embedding: bool = True
num_decoder_layers: Optional[int] = None
one_hot_embedding: bool = True
layer_sharing: bool = False
# If true, uses one relative embedding for all encoder layers and one for all
# decoder layers. Otherwise, have relative embedding for each layer.
use_shared_relative_position_bias: bool = True
class Encoder(Module):
"""Transformer Model Encoder for sequence to sequence."""
def __init__(self,
config: T5TransformerParams,
shared_embedding: Optional[tf.Variable] = None,
compute_dtype: tf.DType = tf.float32,
**kwargs):
super().__init__(**kwargs)
self.config = config
self.compute_dtype = compute_dtype
self.embed_dim = config.d_model
with self.name_scope:
# Input Embedding.
if shared_embedding is None:
self.input_embed = Embed(
vocab_size=self.config.vocab_size,
features=self.config.d_model,
embeddings_initializer=self.config.vocab_embeddings_initializer,
dtype=self.dtype,
compute_dtype=self.compute_dtype,
name="input_embedding")
else:
self.input_embed = shared_embedding
# Creates an alias to the input embed for encoder-only models.
self.word_embed = self.input_embed
if config.use_shared_relative_position_bias:
self.relative_embedding = RelativePositionEmbedding(
num_heads=self.config.num_heads,
relative_attention_num_buckets=self.config
.relative_attention_num_buckets,
relative_attention_max_distance=self.config
.relative_attention_max_distance,
bidirectional=self.config.bidirectional,
embeddings_initializer=self.config.relative_embeddings_initializer,
dtype=self.dtype,
compute_dtype=self.compute_dtype,
name="relative_posemb")
else:
self.relative_embeddings = []
for layer_idx in range(self.config.num_layers):
relative_embedding = RelativePositionEmbedding(
num_heads=self.config.num_heads,
relative_attention_num_buckets=self.config
.relative_attention_num_buckets,
relative_attention_max_distance=self.config
.relative_attention_max_distance,
bidirectional=self.config.bidirectional,
embeddings_initializer=self.config
.relative_embeddings_initializer,
dtype=self.dtype,
compute_dtype=self.compute_dtype,
name=f"relative_posemb_{layer_idx}")
self.relative_embeddings.append(relative_embedding)
self.input_dropout = Dropout(self.config.dropout_rate,)
self.encoder_layers = []
for layer_idx in range(self.config.num_layers):
if self.config.layer_sharing and layer_idx > 0:
self.encoder_layers.append(self.encoder_layers[0])
else:
self.encoder_layers.append(
EncoderBlock(
d_model=self.config.d_model,
d_kv=self.config.d_kv,
num_heads=self.config.num_heads,
d_ff=self.config.d_ff,
dropout_rate=self.config.dropout_rate,
ffn_activations=self.config.ffn_activations,
rescale_query=self.config.rescale_query,
weight_initializer=self.config.weight_initializer,
bias_initializer=self.config.bias_initializer,
dtype=self.dtype,
name="encoder_block_%d" % layer_idx))
self.output_norm = RMSNorm(
hidden_size=self.config.d_model,
epsilon=self.config.layer_norm_epsilon,
dtype=self.dtype,
name="final_layer_norm")
self.output_dropout = Dropout(self.config.dropout_rate,)
@tf.Module.with_name_scope
def get_relpos_bias(self,
input_length: int,
dense_inputs: tf.Tensor,
layer_idx: Optional[int] = None) -> tf.Tensor:
if self.config.use_shared_relative_position_bias:
position_bias = self.relative_embedding(input_length, input_length)
else:
position_bias = self.relative_embeddings[layer_idx](input_length,
input_length)
if dense_inputs is not None:
# Here we ignore relative position bias for dense embeddings.
# TODO(yejiayu): If we proceed to video use cases, rework this part.
dense_input_length = tf_utils.get_shape_list(dense_inputs)[1]
# Position bias shape: [batch, 1, len, len]
paddings = tf.constant([[0, 0], [0, 0], [0, dense_input_length],
[0, dense_input_length]])
position_bias = tf.pad(position_bias, paddings, "CONSTANT")
return position_bias
@tf.Module.with_name_scope
def __call__(self,
inputs=None,
encoder_mask=None,
dense_inputs=None,
training=False):
"""Applies Transformer model on the inputs.
Args:
inputs: input word ids. Optional if dense data are provided.
encoder_mask: the encoder self-attention mask.
dense_inputs: dense input data. Concat after the embedding if word ids are
provided.
training: whether it is training pass, affecting dropouts.
Returns:
output of a transformer encoder.
"""
# Casts inputs to the dtype.
if encoder_mask is not None:
encoder_mask = tf.cast(encoder_mask, self.compute_dtype)
cfg = self.config
inputs_array = []
if inputs is not None:
inputs_array.append(
self.input_embed(inputs, one_hot=cfg.one_hot_embedding))
if dense_inputs is not None:
inputs_array.append(dense_inputs)
if not inputs_array:
raise ValueError("At least one of inputs and dense_inputs must not be "
"None.")
x = tf.concat(inputs_array, axis=1)
tensor_shape = tf_utils.get_shape_list(x)
tensor_shape[-2] = 1
x = self.input_dropout(x, noise_shape=tensor_shape, training=training)
if inputs is not None:
input_length = tf_utils.get_shape_list(inputs)[1]
else:
input_length = 0
for i in range(cfg.num_layers):
position_bias = self.get_relpos_bias(input_length, dense_inputs, i)
x = self.encoder_layers[i](
x,
attention_mask=encoder_mask,
position_bias=position_bias,
training=training)
encoded = self.output_norm(x)
encoded = self.output_dropout(encoded, training=training)
return encoded
class Decoder(Module):
"""Transformer Model Decoder for sequence to sequence."""
def __init__(self,
config: T5TransformerParams,
shared_embedding: Optional[tf.Variable] = None,
compute_dtype: tf.DType = tf.float32,
**kwargs):
super().__init__(**kwargs)
self.config = config
self.compute_dtype = compute_dtype
if self.config.num_decoder_layers is None:
self.config.num_decoder_layers = self.config.num_layers
if not hasattr(
self.config,
"target_vocab_size") or self.config.target_vocab_size is None:
self.config.target_vocab_size = self.config.vocab_size
with self.name_scope:
# Target Embedding.
if shared_embedding is None:
self.target_embed = Embed(
vocab_size=self.config.target_vocab_size,
features=self.config.d_model,
embeddings_initializer=self.config.vocab_embeddings_initializer,
dtype=self.dtype,
compute_dtype=self.compute_dtype,
name="target_embedding")
else:
self.target_embed = shared_embedding
self.target_dropout = Dropout(self.config.dropout_rate,)
# Position bias for the target self attention.
if config.use_shared_relative_position_bias:
self.relative_embedding = RelativePositionEmbedding(
num_heads=self.config.num_heads,
relative_attention_num_buckets=self.config
.relative_attention_num_buckets,
relative_attention_max_distance=self.config
.relative_attention_max_distance,
bidirectional=self.config.bidirectional,
embeddings_initializer=self.config.relative_embeddings_initializer,
dtype=self.dtype,
compute_dtype=self.compute_dtype,
name="relative_posemb")
else:
self.relative_embeddings = []
for layer_idx in range(self.config.num_decoder_layers):
relative_embedding = RelativePositionEmbedding(
num_heads=self.config.num_heads,
relative_attention_num_buckets=self.config
.relative_attention_num_buckets,
relative_attention_max_distance=self.config
.relative_attention_max_distance,
bidirectional=self.config.bidirectional,
embeddings_initializer=self.config
.relative_embeddings_initializer,
dtype=self.dtype,
compute_dtype=self.compute_dtype,
name=f"relative_posemb_{layer_idx}")
self.relative_embeddings.append(relative_embedding)
self.decoder_layers = []
for layer_idx in range(self.config.num_decoder_layers):
if self.config.layer_sharing and layer_idx > 0:
self.decoder_layers.append(self.decoder_layers[0])
else:
self.decoder_layers.append(
EncDecoderBlock(
d_model=self.config.d_model,
d_kv=self.config.d_kv,
num_heads=self.config.num_heads,
d_ff=self.config.d_ff,
dropout_rate=self.config.dropout_rate,
ffn_activations=self.config.ffn_activations,
rescale_query=self.config.rescale_query,
weight_initializer=self.config.weight_initializer,
bias_initializer=self.config.bias_initializer,
dtype=self.dtype,
name="decoder_block_%d" % layer_idx))
self.output_norm = RMSNorm(
hidden_size=self.config.d_model,
epsilon=self.config.layer_norm_epsilon,
dtype=self.dtype,
name="final_layer_norm")
self.output_dropout = Dropout(self.config.dropout_rate,)
if not self.config.logits_via_embedding:
self.logits_dense = Linear(
in_features=self.config.d_model,
out_features=self.config.target_vocab_size,
use_bias=False,
dtype=self.dtype,
name="logits")
@tf.Module.with_name_scope
def get_relpos_bias(self, input_length: int, layer_idx: int) -> tf.Tensor:
if self.config.use_shared_relative_position_bias:
return self.relative_embedding(input_length, input_length)
else:
return self.relative_embeddings[layer_idx](input_length, input_length)
@tf.Module.with_name_scope
def __call__(self,
decoder_input_tokens,
encoded,
decoder_mask=None,
encoder_decoder_mask=None,
decode=False,
decode_position=None,
cache=None,
max_decode_len=None,
training=False):
"""Applies Transformer model on the inputs.
Args:
decoder_input_tokens: the decoder input tokens.
encoded: the encoder outputs.
decoder_mask: the decoder self-attention mask.
encoder_decoder_mask: the cross-attention mask.
decode: Whether to perform autoregressive decoding.
decode_position: integer, the position to decode.
cache: The cache dictionary of key, value tensors.
max_decode_len: An optional integer specifying the maximum decoding
length. Note that this is only used for defining the relative position
embedding parameters.
training: Whether it is training pass, affecting dropouts.
Returns:
output of a transformer encoder including
1. logits: Logits for each word in the vocab.
2. raw_logits: Logits along the moded dimension.
3. cache: Used for decoding in inference mode.
"""
cfg = self.config
# Casts inputs to the dtype.
encoded = tf.cast(encoded, self.compute_dtype)
if decoder_mask is not None:
decoder_mask = tf.cast(decoder_mask, self.compute_dtype)
if encoder_decoder_mask is not None:
encoder_decoder_mask = tf.cast(encoder_decoder_mask, self.compute_dtype)
x = self.target_embed(decoder_input_tokens, one_hot=cfg.one_hot_embedding)
tensor_shape = tf_utils.get_shape_list(x)
tensor_shape[-2] = 1
x = self.target_dropout(x, noise_shape=tensor_shape, training=training)
for i in range(cfg.num_decoder_layers):
if cache is not None:
position_bias = self.get_relpos_bias(max_decode_len, i)
else:
input_length = tf_utils.get_shape_list(decoder_input_tokens)[1]
position_bias = self.get_relpos_bias(input_length, i)
if cache is None:
x, _ = self.decoder_layers[i](
x,
encoder_hidden_states=encoded,
attention_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask,
position_bias=position_bias,
training=training)
else:
x, cache[i] = self.decoder_layers[i](
x,
encoder_hidden_states=encoded,
attention_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask,
position_bias=position_bias,
decode_position=decode_position,
cache=cache[i],
training=training)
output = self.output_norm(x)
tensor_shape = tf_utils.get_shape_list(output)
tensor_shape[-2] = 1
output = self.target_dropout(
output, noise_shape=tensor_shape, training=training)
if self.config.logits_via_embedding:
logits = self.target_embed.attend(output)
logits = logits / math.sqrt(cfg.d_model)
else:
logits = self.logits_dense(output)
return dict(logits=logits, cache=cache, raw_logits=output)
class T5Transformer(Module):
"""Transformer Encoder+Decoder for sequence to sequence."""
def __init__(self,
config: T5TransformerParams,
compute_dtype: tf.DType = tf.float32,
**kwargs):
super().__init__(**kwargs)
# Builds the model components.
shared_embedding = config.shared_embedding
self.compute_dtype = compute_dtype
self.decoder_cfg = dataclasses.replace(config, bidirectional=False)
if self.decoder_cfg.num_decoder_layers is None:
self.decoder_cfg.num_decoder_layers = self.decoder_cfg.num_layers
self.encoder_cfg = dataclasses.replace(config, bidirectional=True)
with self.name_scope:
if shared_embedding:
self.shared_embedding = Embed(
vocab_size=config.vocab_size,
features=config.d_model,
embeddings_initializer=config.vocab_embeddings_initializer,
dtype=self.dtype,
compute_dtype=self.compute_dtype,
name="shared")
else:
self.shared_embedding = None
self.encoder = Encoder(
self.encoder_cfg,
self.shared_embedding,
dtype=self.dtype,
compute_dtype=self.compute_dtype)
self.decoder = Decoder(
self.decoder_cfg,
self.shared_embedding,
dtype=self.dtype,
compute_dtype=self.compute_dtype)
def encode(self,
encoder_input_tokens=None,
encoder_segment_ids=None,
encoder_dense_inputs=None,
encoder_dense_segment_ids=None,
training=False):
eligible_position_array = []
if encoder_input_tokens is not None:
eligible_position_array.append(
tf.cast(tf.not_equal(encoder_input_tokens, 0), self.compute_dtype))
if encoder_dense_inputs is not None:
eligible_dense_positions = tf.cast(
tf.reduce_any(tf.not_equal(encoder_dense_inputs, 0), axis=-1),
self.compute_dtype)
eligible_position_array.append(eligible_dense_positions)
if not eligible_position_array:
raise ValueError("At least one of encoder_input_tokens and"
" encoder_dense_inputs must be provided.")
eligible_positions = tf.concat(eligible_position_array, axis=1)
encoder_mask = make_attention_mask(
eligible_positions, eligible_positions, dtype=tf.bool)
encoder_segment_id_array = []
if encoder_segment_ids is not None:
encoder_segment_id_array.append(encoder_segment_ids)
if encoder_dense_segment_ids is not None:
encoder_segment_id_array.append(encoder_dense_segment_ids)
if encoder_segment_id_array:
encoder_segment_ids = tf.concat(encoder_segment_id_array, axis=1)
segment_mask = make_attention_mask(
encoder_segment_ids, encoder_segment_ids, tf.equal, dtype=tf.bool)
encoder_mask = tf.math.logical_and(encoder_mask, segment_mask)
encoder_mask = (1.0 - tf.cast(encoder_mask, self.compute_dtype)) * -1e9
return self.encoder(
encoder_input_tokens,
encoder_mask,
encoder_dense_inputs,
training=training)
def decode(
self,
encoded,
decoder_target_tokens,
encoder_input_tokens=None, # only used for masks
encoder_dense_inputs=None,
decoder_input_tokens=None,
encoder_segment_ids=None,
encoder_dense_segment_ids=None,
decoder_segment_ids=None,
decode_position=None,
cache=None,
max_decode_len=None,
decode=False,
training=False) -> Dict[str, tf.Tensor]:
eligible_inputs_array = []
if encoder_input_tokens is not None:
eligible_inputs = tf.cast(
tf.not_equal(encoder_input_tokens, 0), self.compute_dtype)
eligible_inputs_array.append(eligible_inputs)
if encoder_dense_inputs is not None:
eligible_dense_inputs = tf.cast(
tf.reduce_any(tf.not_equal(encoder_dense_inputs, 0), axis=-1),
self.compute_dtype)
eligible_inputs_array.append(eligible_dense_inputs)
eligible_inputs = tf.concat(eligible_inputs_array, axis=1)
if decode:
# For decoding, the decoder_input_tokens is the decoder_target_tokens.
decoder_input_tokens = decoder_target_tokens
# fast autoregressive decoding uses only a special encoder-decoder mask
decoder_mask = None
encoder_decoder_mask = make_attention_mask(
tf.cast(
tf.not_equal(tf.ones_like(decoder_target_tokens), 0),
self.compute_dtype),
eligible_inputs,
dtype=tf.bool)
else:
# Note that, masks should be created using decoder_target_tokens.
eligible_targets = tf.cast(
tf.not_equal(decoder_target_tokens, 0), self.compute_dtype)
decoder_mask = tf.math.logical_and(
make_attention_mask(
eligible_targets, eligible_targets, dtype=tf.bool),
make_causal_mask(decoder_target_tokens, dtype=tf.bool))
encoder_decoder_mask = make_attention_mask(
eligible_targets, eligible_inputs, dtype=tf.bool)
if encoder_segment_ids is not None:
if decoder_mask is not None:
decoder_mask = tf.math.logical_and(
decoder_mask,
make_attention_mask(
decoder_segment_ids,
decoder_segment_ids,
tf.equal,
dtype=tf.bool))
if encoder_dense_segment_ids is not None:
encoder_segment_ids = tf.concat(
[encoder_segment_ids, encoder_dense_segment_ids], axis=1)
encoder_decoder_mask = tf.math.logical_and(
encoder_decoder_mask,
make_attention_mask(
decoder_segment_ids,
encoder_segment_ids,
tf.equal,
dtype=tf.bool))
if decoder_mask is not None:
decoder_mask = (1.0 - tf.cast(decoder_mask, self.compute_dtype)) * -1e9
encoder_decoder_mask = (
1.0 - tf.cast(encoder_decoder_mask, self.compute_dtype)) * -1e9
outputs = self.decoder(
decoder_input_tokens,
encoded,
decode_position=decode_position,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask,
cache=cache,
max_decode_len=max_decode_len,
decode=decode,
training=training)
outputs["encoded"] = encoded
return outputs
@tf.Module.with_name_scope
def __call__(self,
encoder_input_tokens=None,
decoder_target_tokens=None,
encoder_dense_inputs=None,
encoder_dense_segment_ids=None,
decoder_input_tokens=None,
encoder_segment_ids=None,
decoder_segment_ids=None,
training=False):
"""Applies Transformer model on the inputs.
Args:
encoder_input_tokens: input tokens to the encoder.
decoder_target_tokens: target tokens to the decoder.
encoder_dense_inputs: input dense vectors to the encoder.
encoder_dense_segment_ids: dense input segmentation info for packed
decoder_input_tokens: input tokens to the decoder, only required for
training.
encoder_segment_ids: input segmentation info for packed examples.
examples.
decoder_segment_ids: target segmentation info for packed examples.
training: whether it is training pass, affecting dropouts.
Returns:
a dictionary of logits/cache.
"""
encoded = self.encode(
encoder_input_tokens=encoder_input_tokens,
encoder_segment_ids=encoder_segment_ids,
encoder_dense_inputs=encoder_dense_inputs,
encoder_dense_segment_ids=encoder_dense_segment_ids,
training=training)
outputs = self.decode(
encoded=encoded,
decoder_target_tokens=decoder_target_tokens,
encoder_input_tokens=encoder_input_tokens, # only used for masks.
encoder_dense_inputs=encoder_dense_inputs, # only used for masks.
decoder_input_tokens=decoder_input_tokens,
encoder_segment_ids=encoder_segment_ids,
encoder_dense_segment_ids=encoder_dense_segment_ids,
decoder_segment_ids=decoder_segment_ids,
training=training)
outputs["encoded"] = encoded
return outputs
@property
def checkpoint_items(self):
return dict(encoder=self.encoder, decoder=self.decoder)
| 57,312 | 35.389206 | 80 | py |
models | models-master/official/nlp/modeling/models/xlnet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""XLNet models."""
# pylint: disable=g-classes-have-attributes
from typing import Any, Mapping, Optional, Union
import tensorflow as tf
from official.nlp.modeling import layers
from official.nlp.modeling import networks
class XLNetMaskedLM(tf.keras.layers.Layer):
"""XLNet pretraining head."""
def __init__(self,
vocab_size: int,
hidden_size: int,
initializer: str = 'glorot_uniform',
activation: str = 'gelu',
name=None,
**kwargs):
super().__init__(name=name, **kwargs)
self._vocab_size = vocab_size
self._hidden_size = hidden_size
self._initializer = initializer
self._activation = activation
def build(self, input_shape):
self.dense = tf.keras.layers.Dense(
units=self._hidden_size,
activation=self._activation,
kernel_initializer=self._initializer,
name='transform/dense')
self.layer_norm = tf.keras.layers.LayerNormalization(
axis=-1, epsilon=1e-12, name='transform/LayerNorm')
self.bias = self.add_weight(
'output_bias/bias',
shape=(self._vocab_size,),
initializer='zeros',
trainable=True)
super().build(input_shape)
def call(self,
sequence_data: tf.Tensor,
embedding_table: tf.Tensor):
lm_data = self.dense(sequence_data)
lm_data = self.layer_norm(lm_data)
lm_data = tf.matmul(lm_data, embedding_table, transpose_b=True)
logits = tf.nn.bias_add(lm_data, self.bias)
return logits
def get_config(self) -> Mapping[str, Any]:
config = {
'vocab_size':
self._vocab_size,
'hidden_size':
self._hidden_size,
'initializer':
self._initializer
}
base_config = super(XLNetMaskedLM, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf.keras.utils.register_keras_serializable(package='Text')
class XLNetPretrainer(tf.keras.Model):
"""XLNet-based pretrainer.
This is an implementation of the network structure surrounding a
Transformer-XL encoder as described in "XLNet: Generalized Autoregressive
Pretraining for Language Understanding" (https://arxiv.org/abs/1906.08237).
Args:
network: An XLNet/Transformer-XL based network. This network should output a
sequence output and list of `state` tensors.
mlm_activation: The activation (if any) to use in the Masked LM network. If
None, then no activation will be used.
mlm_initializer: The initializer (if any) to use in the masked LM. Defaults
to a Glorot uniform initializer.
"""
def __init__(
self,
network: Union[tf.keras.layers.Layer, tf.keras.Model],
mlm_activation=None,
mlm_initializer='glorot_uniform',
name: Optional[str] = None,
**kwargs):
super().__init__(name=name, **kwargs)
self._config = {
'network': network,
'mlm_activation': mlm_activation,
'mlm_initializer': mlm_initializer,
}
self._network = network
self._hidden_size = network.get_config()['hidden_size']
self._vocab_size = network.get_config()['vocab_size']
self._activation = mlm_activation
self._initializer = mlm_initializer
self._masked_lm = XLNetMaskedLM(
vocab_size=self._vocab_size,
hidden_size=self._hidden_size,
initializer=self._initializer)
def call(self, inputs: Mapping[str, Any]): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
input_word_ids = inputs['input_word_ids']
input_type_ids = inputs['input_type_ids']
masked_tokens = inputs['masked_tokens']
permutation_mask = inputs['permutation_mask']
target_mapping = inputs['target_mapping']
state = inputs.get('state', None)
attention_output, state = self._network(
input_ids=input_word_ids,
segment_ids=input_type_ids,
input_mask=None,
state=state,
permutation_mask=permutation_mask,
target_mapping=target_mapping,
masked_tokens=masked_tokens)
embedding_table = self._network.get_embedding_lookup_table()
mlm_outputs = self._masked_lm(
sequence_data=attention_output,
embedding_table=embedding_table)
return mlm_outputs, state
def get_config(self) -> Mapping[str, Any]:
return self._config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def checkpoint_items(self):
return dict(encoder=self._network)
@tf.keras.utils.register_keras_serializable(package='Text')
class XLNetClassifier(tf.keras.Model):
"""Classifier model based on XLNet.
This is an implementation of the network structure surrounding a
Transformer-XL encoder as described in "XLNet: Generalized Autoregressive
Pretraining for Language Understanding" (https://arxiv.org/abs/1906.08237).
Note: This model does not use utilize the memory mechanism used in the
original XLNet Classifier.
Args:
network: An XLNet/Transformer-XL based network. This network should output a
sequence output and list of `state` tensors.
num_classes: Number of classes to predict from the classification network.
initializer: The initializer (if any) to use in the classification networks.
Defaults to a RandomNormal initializer.
summary_type: Method used to summarize a sequence into a compact vector.
dropout_rate: The dropout probability of the cls head.
head_name: Name of the classification head.
"""
def __init__(
self,
network: Union[tf.keras.layers.Layer, tf.keras.Model],
num_classes: int,
initializer: tf.keras.initializers.Initializer = 'random_normal',
summary_type: str = 'last',
dropout_rate: float = 0.1,
head_name: str = 'sentence_prediction', # pytype: disable=annotation-type-mismatch # typed-keras
**kwargs):
super().__init__(**kwargs)
self._network = network
self._initializer = initializer
self._summary_type = summary_type
self._num_classes = num_classes
self._config = {
'network': network,
'initializer': initializer,
'num_classes': num_classes,
'summary_type': summary_type,
'dropout_rate': dropout_rate,
'head_name': head_name,
}
if summary_type == 'last':
cls_token_idx = -1
elif summary_type == 'first':
cls_token_idx = 0
else:
raise ValueError('Invalid summary type provided: %s.' % summary_type)
self.classifier = layers.ClassificationHead(
inner_dim=network.get_config()['hidden_size'],
num_classes=num_classes,
initializer=initializer,
dropout_rate=dropout_rate,
cls_token_idx=cls_token_idx,
name=head_name)
def call(self, inputs: Mapping[str, Any]): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
input_ids = inputs['input_word_ids']
segment_ids = inputs['input_type_ids']
input_mask = tf.cast(inputs['input_mask'], tf.float32)
state = inputs.get('mems', None)
attention_output, _ = self._network(
input_ids=input_ids,
segment_ids=segment_ids,
input_mask=input_mask,
state=state)
logits = self.classifier(attention_output)
return logits
def get_config(self):
return self._config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def checkpoint_items(self):
items = dict(encoder=self._network)
if hasattr(self.classifier, 'checkpoint_items'):
for key, item in self.classifier.checkpoint_items.items():
items['.'.join([self.classifier.name, key])] = item
return items
@tf.keras.utils.register_keras_serializable(package='Text')
class XLNetSpanLabeler(tf.keras.Model):
"""Span labeler model based on XLNet.
This is an implementation of the network structure surrounding a
Transformer-XL encoder as described in "XLNet: Generalized Autoregressive
Pretraining for Language Understanding" (https://arxiv.org/abs/1906.08237).
Args:
network: A transformer network. This network should output a sequence output
and a classification output. Furthermore, it should expose its embedding
table via a "get_embedding_table" method.
start_n_top: Beam size for span start.
end_n_top: Beam size for span end.
dropout_rate: The dropout rate for the span labeling layer.
span_labeling_activation: The activation for the span labeling head.
initializer: The initializer (if any) to use in the span labeling network.
Defaults to a Glorot uniform initializer.
"""
def __init__(
self,
network: Union[tf.keras.layers.Layer, tf.keras.Model],
start_n_top: int = 5,
end_n_top: int = 5,
dropout_rate: float = 0.1,
span_labeling_activation: tf.keras.initializers.Initializer = 'tanh',
initializer: tf.keras.initializers.Initializer = 'glorot_uniform', # pytype: disable=annotation-type-mismatch # typed-keras
**kwargs):
super().__init__(**kwargs)
self._config = {
'network': network,
'start_n_top': start_n_top,
'end_n_top': end_n_top,
'dropout_rate': dropout_rate,
'span_labeling_activation': span_labeling_activation,
'initializer': initializer,
}
network_config = network.get_config()
try:
input_width = network_config['inner_size']
self._xlnet_base = True
except KeyError:
# BertEncoder uses 'intermediate_size' due to legacy naming.
input_width = network_config['intermediate_size']
self._xlnet_base = False
self._network = network
self._initializer = initializer
self._start_n_top = start_n_top
self._end_n_top = end_n_top
self._dropout_rate = dropout_rate
self._activation = span_labeling_activation
self.span_labeling = networks.XLNetSpanLabeling(
input_width=input_width,
start_n_top=self._start_n_top,
end_n_top=self._end_n_top,
activation=self._activation,
dropout_rate=self._dropout_rate,
initializer=self._initializer)
def call(self, inputs: Mapping[str, Any]): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
input_word_ids = inputs['input_word_ids']
input_type_ids = inputs['input_type_ids']
input_mask = inputs['input_mask']
class_index = inputs['class_index']
paragraph_mask = inputs['paragraph_mask']
start_positions = inputs.get('start_positions', None)
if self._xlnet_base:
attention_output, _ = self._network(
input_ids=input_word_ids,
segment_ids=input_type_ids,
input_mask=input_mask)
else:
network_output_dict = self._network(dict(
input_word_ids=input_word_ids,
input_type_ids=input_type_ids,
input_mask=input_mask))
attention_output = network_output_dict['sequence_output']
outputs = self.span_labeling(
sequence_data=attention_output,
class_index=class_index,
paragraph_mask=paragraph_mask,
start_positions=start_positions)
return outputs
@property
def checkpoint_items(self):
return dict(encoder=self._network)
def get_config(self):
return self._config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 12,004 | 33.696532 | 131 | py |
models | models-master/official/nlp/modeling/models/t5_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for t5."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.nlp.modeling.models import t5
def _create_cache(batch_size,
init_decode_length,
num_heads,
head_size,
dtype=tf.float32):
if num_heads is None:
kv_shape = [batch_size, init_decode_length, head_size]
else:
kv_shape = [batch_size, init_decode_length, num_heads, head_size]
return {
"key": tf.zeros(kv_shape, dtype=dtype),
"value": tf.zeros(kv_shape, dtype=dtype)
}
class ModulesTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(("bfloat16", tf.bfloat16),
("float32", tf.float32))
def test_embed(self, dtype):
l = t5.Embed(vocab_size=5, features=4, compute_dtype=dtype, name="foo")
inputs = np.array([[2, 3], [1, 2]], dtype=np.int32)
inputs = tf.convert_to_tensor(inputs)
one_hot_outputs = l(inputs, one_hot=True)
gather_outputs = l(inputs, one_hot=False)
self.assertEqual(one_hot_outputs.shape, (2, 2, 4))
self.assertLen(l.trainable_variables, 1)
self.assertAllClose(one_hot_outputs, gather_outputs)
outputs = l.attend(query=tf.zeros((2, 2, 4), dtype))
self.assertEqual(outputs.shape, (2, 2, 5))
# Test initializers.
l = t5.Embed(
vocab_size=5,
features=4,
compute_dtype=dtype,
name="foo",
embeddings_initializer=tf.keras.initializers.Zeros())
self.assertAllClose(l(inputs), tf.zeros((2, 2, 4), dtype))
@parameterized.named_parameters(("bfloat16", tf.bfloat16),
("float32", tf.float32))
def test_rms_norm(self, dtype):
l = t5.RMSNorm(hidden_size=4, epsilon=0.0, name="foo")
inputs = tf.ones((2, 4), dtype=dtype)
outputs = l(inputs)
self.assertAllEqual(l(inputs), inputs)
self.assertEqual(outputs.dtype, dtype)
self.assertLen(l.trainable_variables, 1)
self.assertIn("foo/scale", l.trainable_variables[0].name)
@parameterized.named_parameters(("bfloat16", tf.bfloat16),
("float32", tf.float32))
def test_linear(self, dtype):
l = t5.Linear(
in_features=4,
out_features=4,
w_init=tf.keras.initializers.Ones(),
name="foo")
inputs = tf.ones((2, 4), dtype=dtype)
outputs = l(inputs)
self.assertEqual(outputs.shape, inputs.shape)
self.assertEqual(outputs.dtype, dtype)
self.assertLen(l.trainable_variables, 2)
def test_linear3d(self):
batch_size = 2
l = t5.Linear3D(
in_features=4,
out_features=4,
num_heads=2,
to_3d=True,
w_init=tf.keras.initializers.Ones(),
name="foo")
inputs = np.ones((batch_size, 2, 4), dtype=np.float32)
self.assertEqual(l(inputs).shape, (batch_size, 2, 2, 4))
l = t5.Linear3D(
in_features=2,
out_features=4,
num_heads=2,
to_3d=False,
w_init=tf.keras.initializers.Ones(),
name="foo")
inputs = np.ones((batch_size, 2, 2, 2), dtype=np.float32)
self.assertEqual(l(inputs).shape, (batch_size, 2, 4))
def test_ffn(self):
inputs = np.ones((2, 4), dtype=np.float32)
for activation in ["relu", "linear", "gelu", "swish"]:
l = t5.FFN(
d_model=4,
d_ff=8,
use_bias=True,
dropout_rate=0.1,
activations=[activation],
name="foo")
self.assertEqual(l(inputs).shape, inputs.shape)
self.assertLen(l.trainable_variables, 4)
l = t5.FFN(
d_model=4,
d_ff=8,
dropout_rate=0.1,
activations=["linear", "gelu"],
name="bar")
self.assertLen(l.trainable_variables, 3)
self.assertEqual(l(inputs).shape, inputs.shape)
@parameterized.named_parameters(("bfloat16", tf.bfloat16),
("float32", tf.float32))
def test_relative_position(self, dtype):
l = t5.RelativePositionEmbedding(
num_heads=4,
bidirectional=False,
embeddings_initializer=tf.keras.initializers.Ones(),
compute_dtype=dtype,
name="foo")
self.assertEqual(l(4, 2).shape, (1, 4, 4, 2))
l = t5.RelativePositionEmbedding(
num_heads=4,
bidirectional=True,
embeddings_initializer=tf.keras.initializers.Ones(),
compute_dtype=dtype,
name="bar")
outputs = l(4, 2)
self.assertEqual(outputs.shape, (1, 4, 4, 2))
self.assertEqual(outputs.dtype, dtype)
def test_masks(self):
causal_mask = t5.make_causal_mask(np.zeros((2, 5)))
self.assertEqual(causal_mask.shape, (2, 1, 5, 5))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.cloud_tpu_strategy,
],
mode="eager"))
def test_attention(self, distribution):
num_heads, head_size = 2, 4
from_seq_length, to_seq_length = 4, 6
batch_size = 2
pos_embed = t5.RelativePositionEmbedding(
num_heads=4,
bidirectional=False,
embeddings_initializer=tf.keras.initializers.Ones(),
name="pos_embed")
position_bias = pos_embed(from_seq_length, from_seq_length)
l = t5.MultiHeadAttention(d_model=4, d_kv=2, num_heads=4, dropout_rate=0.1)
query = tf.convert_to_tensor(
np.ones((batch_size, from_seq_length, 4), dtype=np.float32))
self.assertEqual(
l(query, position_bias=position_bias)["context"].shape, query.shape)
kv = tf.convert_to_tensor(
np.ones((batch_size, to_seq_length, 4), dtype=np.float32))
position_bias = pos_embed(from_seq_length, to_seq_length)
outputs = l(query, kv=kv, position_bias=position_bias)
self.assertEqual(outputs["context"].shape, query.shape)
with distribution.scope():
l = t5.MultiHeadAttention(
d_model=4, d_kv=head_size, num_heads=num_heads, dropout_rate=0.1)
@tf.function
def step(inputs):
def _step_fn(inputs):
cache = _create_cache(batch_size, from_seq_length, num_heads,
head_size)
mask = t5.make_causal_mask(tf.ones((batch_size, 1)))
return l(
query=inputs,
mask=mask,
cache=cache,
decode_position=decode_position)
outputs = distribution.run(_step_fn, args=(inputs,))
return tf.nest.map_structure(distribution.experimental_local_results,
outputs)
decode_position = 2
query = tf.convert_to_tensor(np.ones((2, 1, 4), dtype=np.float32))
local_outputs = step(query)
self.assertEqual(local_outputs["context"][0].shape, (2, 1, 4))
self.assertNotEqual(
np.sum(local_outputs["cache"]["key"][0][:, decode_position,
...].numpy()), 0.0)
class T5Test(tf.test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.cloud_tpu_strategy,
],
mode="eager"))
def test_attention_layers(self, distribution):
num_heads, head_size = 2, 2
from_seq_length = 4
# TPU decoding should pre-allocate the entire sequence.
batch_size = 2
with distribution.scope():
pos_embed = t5.RelativePositionEmbedding(
num_heads=head_size,
bidirectional=False,
embeddings_initializer=tf.keras.initializers.Ones(),
name="pos_embed")
l = t5.SelfAttention(
d_model=4, d_kv=head_size, num_heads=num_heads, dropout_rate=0.1)
decode_position = 2
@tf.function
def step(inputs):
def _step_fn(inputs):
cache = _create_cache(batch_size, from_seq_length, num_heads,
head_size)
mask = t5.make_causal_mask(tf.ones((batch_size, 1)))
position_bias = pos_embed(from_seq_length, from_seq_length)
return l(
hidden_states=inputs,
cache=cache,
attention_mask=mask,
decode_position=decode_position,
position_bias=position_bias)
outputs = distribution.run(_step_fn, args=(inputs,))
return tf.nest.map_structure(distribution.experimental_local_results,
outputs)
query = tf.convert_to_tensor(np.ones((2, 1, 4), dtype=np.float32))
local_outputs = step(query)
self.assertEqual(local_outputs["layer_output"][0].shape, (2, 1, 4))
self.assertNotEqual(
np.sum(
local_outputs["cache"]["key"][0][:,
decode_position, :, :].numpy()),
0.0)
l = t5.CrossAttention(
d_model=4, d_kv=head_size, num_heads=num_heads, dropout_rate=0.1)
to_seq_length = 6
query = tf.convert_to_tensor(
np.ones((2, from_seq_length, 4), dtype=np.float32))
kv = tf.convert_to_tensor(
np.ones((2, to_seq_length, 4), dtype=np.float32))
@tf.function
def step_cross_attn(inputs):
def _step_fn(inputs):
query, kv = inputs
mask = t5.make_attention_mask(
tf.ones((batch_size, from_seq_length)),
tf.ones((batch_size, to_seq_length)))
return l(hidden_states=query, kv=kv, attention_mask=mask)
outputs = distribution.run(_step_fn, args=(inputs,))
return tf.nest.map_structure(distribution.experimental_local_results,
outputs)
local_outputs = step_cross_attn((query, kv))
self.assertEqual(local_outputs["layer_output"][0].shape,
(2, from_seq_length, 4))
def test_encoder_block(self):
batch_size = 2
from_seq_length = 5
d_model = 4
l = t5.EncoderBlock(d_model=4, d_kv=3, num_heads=2, d_ff=8, name="foo")
pos_embed = t5.RelativePositionEmbedding(
num_heads=2,
bidirectional=True,
embeddings_initializer=tf.keras.initializers.Ones(),
name="bar")
attention_mask = t5.make_attention_mask(
tf.ones((batch_size, from_seq_length)),
tf.ones((batch_size, from_seq_length)))
position_bias = pos_embed(from_seq_length, from_seq_length)
inputs = tf.ones((batch_size, from_seq_length, d_model), dtype=tf.float32)
outputs = l(
inputs, attention_mask=attention_mask, position_bias=position_bias)
self.assertEqual(outputs.shape, (batch_size, from_seq_length, d_model))
def test_encdec_block(self):
batch_size = 2
from_seq_length = 5
to_seq_length = 3
d_model = 4
l = t5.EncDecoderBlock(d_model=4, d_kv=3, num_heads=2, d_ff=8, name="foo")
pos_embed = t5.RelativePositionEmbedding(
num_heads=2,
bidirectional=True,
embeddings_initializer=tf.keras.initializers.Ones(),
name="bar")
encoder_decoder_mask = t5.make_attention_mask(
tf.ones((batch_size, from_seq_length)),
tf.ones((batch_size, to_seq_length)))
position_bias = pos_embed(from_seq_length, from_seq_length)
inputs = tf.ones((batch_size, from_seq_length, d_model), dtype=tf.float32)
encoder_hidden_states = tf.ones((batch_size, to_seq_length, d_model),
dtype=tf.float32)
outputs = l(
inputs,
encoder_hidden_states,
encoder_decoder_mask=encoder_decoder_mask,
position_bias=position_bias)
self.assertEqual(outputs[0].shape, (batch_size, from_seq_length, d_model))
@parameterized.named_parameters(("bfloat16", tf.bfloat16),
("float32", tf.float32))
def test_encoder(self, dtype):
config = t5.T5TransformerParams(
num_layers=2,
d_model=4,
d_kv=3,
num_heads=4,
d_ff=16,
vocab_size=10,
vocab_embeddings_initializer=tf.keras.initializers.Ones(),
relative_embeddings_initializer=tf.keras.initializers.Ones())
encoder = t5.Encoder(config, compute_dtype=dtype)
encoded = encoder(tf.zeros((4, 8), dtype=tf.int32))
self.assertEqual(encoded.shape, (4, 8, config.d_model))
@parameterized.named_parameters(("bfloat16", tf.bfloat16),
("float32", tf.float32))
def test_encoder_with_dense(self, dtype):
config = t5.T5TransformerParams(
num_layers=2,
d_model=4,
d_kv=3,
num_heads=4,
d_ff=16,
vocab_size=10,
vocab_embeddings_initializer=tf.keras.initializers.Ones(),
relative_embeddings_initializer=tf.keras.initializers.Ones())
encoder = t5.Encoder(config, compute_dtype=dtype)
encoded = encoder(
tf.zeros((4, 8), dtype=tf.int32),
dense_inputs=tf.ones((4, 2, 4), dtype=dtype))
self.assertEqual(encoded.shape, (4, 10, config.d_model))
@parameterized.named_parameters(("bfloat16", tf.bfloat16),
("float32", tf.float32))
def test_encoder_only_dense(self, dtype):
config = t5.T5TransformerParams(
num_layers=2,
d_model=4,
d_kv=3,
num_heads=4,
d_ff=16,
vocab_size=10,
vocab_embeddings_initializer=tf.keras.initializers.Ones(),
relative_embeddings_initializer=tf.keras.initializers.Ones())
encoder = t5.Encoder(config, compute_dtype=dtype)
encoded = encoder(dense_inputs=tf.ones((4, 2, 4), dtype=dtype))
self.assertEqual(encoded.shape, (4, 2, config.d_model))
def test_decoder(self):
max_decode_len = 10
config = t5.T5TransformerParams(
num_layers=2,
d_model=4,
d_kv=3,
num_heads=4,
d_ff=16,
vocab_size=10,
vocab_embeddings_initializer=tf.keras.initializers.Ones(),
relative_embeddings_initializer=tf.keras.initializers.Ones())
decoder = t5.Decoder(config)
batch_size = 4
targets = tf.zeros((4, 8), dtype=tf.int32)
encoded = tf.zeros((4, 8, config.d_model), dtype=tf.float32)
outputs = decoder(targets, encoded)
logits = outputs["logits"]
cache = outputs["cache"]
self.assertEqual(logits.shape, (4, 8, config.vocab_size))
cache = {}
cache[0] = _create_cache(batch_size, max_decode_len, config.num_heads,
config.d_kv)
cache[1] = _create_cache(batch_size, max_decode_len, config.num_heads,
config.d_kv)
targets = tf.zeros((4, 1), dtype=tf.int32)
outputs = decoder(
targets,
encoded,
decode_position=2,
cache=cache,
decode=True,
max_decode_len=max_decode_len)
logits = outputs["logits"]
cache = outputs["cache"]
self.assertEqual(logits.shape, (batch_size, 1, config.vocab_size))
for entry in cache.values():
for tensor in entry.values():
self.assertNotAllEqual(tensor.numpy()[:, 2, :, :], 0.0)
@parameterized.named_parameters(
("t5_10", ("relu",), True, 26, False, tf.float32),
("t5_11", ("gelu", "linear"), False, 29, False, tf.float32),
("t5_10_bfloat16", ("relu",), True, 26, False, tf.bfloat16),
("t5_11_bfloat16", ("gelu", "linear"), False, 29, False, tf.bfloat16),
("t5_10_layer_sharing", ("relu",), True, 26, True, tf.float32),
("t5_11_layer_sharing", ("gelu", "linear"), False, 29, True, tf.float32),
("t5_10_bfloat16_layer_sharing", ("relu",), True, 26, True, tf.bfloat16),
("t5_11_bfloat16_layer_sharing",
("gelu", "linear"), False, 29, True, tf.bfloat16))
def test_transformer(self, ffn_activations, logits_via_embedding,
expect_num_variables, layer_sharing, dtype):
max_decode_len = 10
config = t5.T5TransformerParams(
num_layers=1,
d_model=8,
d_kv=4,
num_heads=4,
d_ff=32,
vocab_size=10,
shared_embedding=True,
layer_sharing=layer_sharing,
ffn_activations=ffn_activations,
logits_via_embedding=logits_via_embedding)
transformer = t5.T5Transformer(config, compute_dtype=dtype)
self.assertLen(transformer.trainable_variables, expect_num_variables)
inputs = tf.convert_to_tensor(
np.array([[2, 2, 1, 3, 1, 0], [3, 3, 1, 2, 2, 1]]))
segments = tf.convert_to_tensor(
np.array([[1, 1, 1, 2, 2, 0], [1, 1, 1, 2, 2, 2]]))
outputs = transformer(
encoder_input_tokens=inputs,
decoder_input_tokens=inputs,
decoder_target_tokens=inputs,
encoder_segment_ids=segments,
decoder_segment_ids=segments)
cache = {}
batch_size = 2
cache[0] = _create_cache(
batch_size, max_decode_len, config.num_heads, config.d_kv, dtype=dtype)
outputs = transformer.decode(
encoder_input_tokens=inputs,
encoded=outputs["encoded"],
decoder_target_tokens=tf.ones((batch_size, 1), dtype=tf.int32),
decode_position=1,
decode=True,
max_decode_len=max_decode_len,
cache=cache)
self.assertEqual(outputs["logits"].shape,
(batch_size, 1, config.vocab_size))
for v in transformer.trainable_variables:
print(v.name, v.shape)
self.assertEqual(v.dtype, tf.float32)
@parameterized.named_parameters(
("t5_10_dense", ("relu",), True, 26, False, tf.float32),)
def test_transformer_with_dense(self, ffn_activations, logits_via_embedding,
expect_num_variables, layer_sharing, dtype):
max_decode_len = 10
config = t5.T5TransformerParams(
num_layers=1,
d_model=8,
d_kv=4,
num_heads=4,
d_ff=32,
vocab_size=10,
shared_embedding=True,
layer_sharing=layer_sharing,
ffn_activations=ffn_activations,
logits_via_embedding=logits_via_embedding)
transformer = t5.T5Transformer(config, compute_dtype=dtype)
self.assertLen(transformer.trainable_variables, expect_num_variables)
inputs = tf.convert_to_tensor(
np.array([[2, 2, 1, 3, 1, 0], [3, 3, 1, 2, 2, 1]]))
segments = tf.convert_to_tensor(
np.array([[1, 1, 1, 2, 2, 0], [1, 1, 1, 2, 2, 2]]))
dense_inputs = tf.convert_to_tensor(np.random.randn(2, 2, 8), dtype=dtype)
dense_segments = tf.convert_to_tensor(np.array([[1, 2], [1, 2]]))
outputs = transformer(
encoder_input_tokens=inputs,
encoder_dense_inputs=dense_inputs,
decoder_input_tokens=inputs,
decoder_target_tokens=inputs,
encoder_segment_ids=segments,
encoder_dense_segment_ids=dense_segments,
decoder_segment_ids=segments)
cache = {}
batch_size = 2
cache[0] = _create_cache(
batch_size, max_decode_len, config.num_heads, config.d_kv, dtype=dtype)
outputs = transformer.decode(
encoder_input_tokens=inputs,
encoder_dense_inputs=dense_inputs,
encoded=outputs["encoded"],
decoder_target_tokens=tf.ones((batch_size, 1), dtype=tf.int32),
decode_position=1,
decode=True,
max_decode_len=max_decode_len,
cache=cache)
self.assertEqual(outputs["logits"].shape,
(batch_size, 1, config.vocab_size))
for v in transformer.trainable_variables:
print(v.name, v.shape)
self.assertEqual(v.dtype, tf.float32)
@parameterized.named_parameters(
("t5_10_dense_layerwise_relpos",
("relu",), True, 26, False, tf.float32, False, 1),
("t5_10_dense_shared_relpos_d2",
("relu",), True, 39, False, tf.float32, True, 2),
("t5_10_dense_layerwise_relpos_d2",
("relu",), True, 40, False, tf.float32, False, 2),
)
def test_transformer_with_lw_relpos(self, ffn_activations,
logits_via_embedding,
expect_num_variables, layer_sharing,
dtype, use_shared_relpos,
num_decoder_layers):
max_decode_len = 10
config = t5.T5TransformerParams(
num_layers=1,
num_decoder_layers=num_decoder_layers,
d_model=8,
d_kv=4,
num_heads=4,
d_ff=32,
vocab_size=10,
shared_embedding=True,
layer_sharing=layer_sharing,
ffn_activations=ffn_activations,
logits_via_embedding=logits_via_embedding,
use_shared_relative_position_bias=use_shared_relpos)
transformer = t5.T5Transformer(config, compute_dtype=dtype)
self.assertLen(transformer.trainable_variables, expect_num_variables)
inputs = tf.convert_to_tensor(
np.array([[2, 2, 1, 3, 1, 0], [3, 3, 1, 2, 2, 1]]))
segments = tf.convert_to_tensor(
np.array([[1, 1, 1, 2, 2, 0], [1, 1, 1, 2, 2, 2]]))
dense_inputs = tf.convert_to_tensor(np.random.randn(2, 2, 8), dtype=dtype)
dense_segments = tf.convert_to_tensor(np.array([[1, 2], [1, 2]]))
outputs = transformer(
encoder_input_tokens=inputs,
encoder_dense_inputs=dense_inputs,
decoder_input_tokens=inputs,
decoder_target_tokens=inputs,
encoder_segment_ids=segments,
encoder_dense_segment_ids=dense_segments,
decoder_segment_ids=segments)
cache = {}
batch_size = 2
for i in range(num_decoder_layers):
cache[i] = _create_cache(
batch_size,
max_decode_len,
config.num_heads,
config.d_kv,
dtype=dtype)
outputs = transformer.decode(
encoder_input_tokens=inputs,
encoder_dense_inputs=dense_inputs,
encoded=outputs["encoded"],
decoder_target_tokens=tf.ones((batch_size, 1), dtype=tf.int32),
decode_position=1,
decode=True,
max_decode_len=max_decode_len,
cache=cache)
self.assertEqual(outputs["logits"].shape,
(batch_size, 1, config.vocab_size))
for v in transformer.trainable_variables:
print(v.name, v.shape)
self.assertEqual(v.dtype, tf.float32)
@parameterized.named_parameters(
("t5_10", ("relu",), True, 26, False, tf.float32),)
def test_transformer_with_dense_only(self, ffn_activations,
logits_via_embedding,
expect_num_variables, layer_sharing,
dtype):
max_decode_len = 10
config = t5.T5TransformerParams(
num_layers=1,
d_model=8,
d_kv=4,
num_heads=4,
d_ff=32,
vocab_size=10,
shared_embedding=True,
layer_sharing=layer_sharing,
ffn_activations=ffn_activations,
logits_via_embedding=logits_via_embedding)
transformer = t5.T5Transformer(config, compute_dtype=dtype)
self.assertLen(transformer.trainable_variables, expect_num_variables)
decoder_inputs = tf.convert_to_tensor(
np.array([[2, 2, 1, 3, 1, 0], [3, 3, 1, 2, 2, 1]]))
decoder_segments = tf.convert_to_tensor(
np.array([[1, 1, 1, 2, 2, 0], [1, 1, 1, 2, 2, 2]]))
dense_inputs = tf.convert_to_tensor(np.random.randn(2, 2, 8), dtype=dtype)
dense_segments = tf.convert_to_tensor(np.array([[1, 2], [1, 2]]))
outputs = transformer(
encoder_dense_inputs=dense_inputs,
encoder_dense_segment_ids=dense_segments,
decoder_input_tokens=decoder_inputs,
decoder_target_tokens=decoder_inputs,
decoder_segment_ids=decoder_segments)
cache = {}
batch_size = 2
cache[0] = _create_cache(
batch_size, max_decode_len, config.num_heads, config.d_kv, dtype=dtype)
outputs = transformer.decode(
encoder_dense_inputs=dense_inputs,
encoded=outputs["encoded"],
decoder_target_tokens=tf.ones((batch_size, 1), dtype=tf.int32),
decode_position=1,
decode=True,
max_decode_len=max_decode_len,
cache=cache)
self.assertEqual(outputs["logits"].shape,
(batch_size, 1, config.vocab_size))
for v in transformer.trainable_variables:
print(v.name, v.shape)
self.assertEqual(v.dtype, tf.float32)
@parameterized.named_parameters(
("t5_10", ("relu",), True, 39, tf.float32, 2),
("t5_10_bfloat16", ("relu",), True, 39, tf.bfloat16, 2))
def test_transformer_different_num_decoder_layers(self, ffn_activations,
logits_via_embedding,
expect_num_variables, dtype,
num_decoder_layers):
max_decode_len = 10
config = t5.T5TransformerParams(
num_decoder_layers=num_decoder_layers,
num_layers=1,
d_model=8,
d_kv=4,
num_heads=4,
d_ff=32,
vocab_size=10,
shared_embedding=True,
ffn_activations=ffn_activations,
logits_via_embedding=logits_via_embedding)
transformer = t5.T5Transformer(config, compute_dtype=dtype)
self.assertLen(transformer.trainable_variables, expect_num_variables)
inputs = tf.convert_to_tensor(
np.array([[2, 2, 1, 3, 1, 0], [3, 3, 1, 2, 2, 1]]))
segments = tf.convert_to_tensor(
np.array([[1, 1, 1, 2, 2, 0], [1, 1, 1, 2, 2, 2]]))
outputs = transformer(
encoder_input_tokens=inputs,
decoder_input_tokens=inputs,
decoder_target_tokens=inputs,
encoder_segment_ids=segments,
decoder_segment_ids=segments)
cache = {}
batch_size = 2
for i in range(num_decoder_layers):
cache[i] = _create_cache(
batch_size,
max_decode_len,
config.num_heads,
config.d_kv,
dtype=dtype)
outputs = transformer.decode(
encoder_input_tokens=inputs,
encoded=outputs["encoded"],
decoder_target_tokens=tf.ones((batch_size, 1), dtype=tf.int32),
decode_position=1,
decode=True,
max_decode_len=max_decode_len,
cache=cache)
self.assertEqual(outputs["logits"].shape,
(batch_size, 1, config.vocab_size))
for v in transformer.trainable_variables:
print(v.name, v.shape)
self.assertEqual(v.dtype, tf.float32)
if __name__ == "__main__":
tf.test.main()
| 27,024 | 36.639276 | 80 | py |
models | models-master/official/nlp/modeling/models/dual_encoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dual encoder network."""
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.modeling import networks
from official.nlp.modeling.models import dual_encoder
class DualEncoderTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters((192, 'logits'), (768, 'predictions'))
def test_dual_encoder(self, hidden_size, output):
"""Validate that the Keras object can be created."""
# Build a transformer network to use within the dual encoder model.
vocab_size = 100
sequence_length = 512
test_network = networks.BertEncoder(
vocab_size=vocab_size,
num_layers=2,
hidden_size=hidden_size,
dict_outputs=True)
# Create a dual encoder model with the created network.
dual_encoder_model = dual_encoder.DualEncoder(
test_network, max_seq_length=sequence_length, output=output)
# Create a set of 2-dimensional inputs (the first dimension is implicit).
left_word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
left_mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
left_type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
right_word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
right_mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
right_type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
if output == 'logits':
outputs = dual_encoder_model([
left_word_ids, left_mask, left_type_ids, right_word_ids, right_mask,
right_type_ids
])
_ = outputs['left_logits']
elif output == 'predictions':
outputs = dual_encoder_model([left_word_ids, left_mask, left_type_ids])
# Validate that the outputs are of the expected shape.
expected_sequence_shape = [None, sequence_length, 768]
self.assertAllEqual(expected_sequence_shape,
outputs['sequence_output'].shape.as_list())
left_encoded = outputs['pooled_output']
expected_encoding_shape = [None, 768]
self.assertAllEqual(expected_encoding_shape, left_encoded.shape.as_list())
@parameterized.parameters((192, 'logits'), (768, 'predictions'))
def test_dual_encoder_tensor_call(self, hidden_size, output):
"""Validate that the Keras object can be invoked."""
# Build a transformer network to use within the dual encoder model.
del hidden_size
sequence_length = 2
test_network = networks.BertEncoder(vocab_size=100, num_layers=2)
# Create a dual encoder model with the created network.
dual_encoder_model = dual_encoder.DualEncoder(
test_network, max_seq_length=sequence_length, output=output)
# Create a set of 2-dimensional data tensors to feed into the model.
word_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32)
mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32)
type_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32)
# Invoke the model model on the tensors. In Eager mode, this does the
# actual calculation. (We can't validate the outputs, since the network is
# too complex: this simply ensures we're not hitting runtime errors.)
if output == 'logits':
_ = dual_encoder_model(
[word_ids, mask, type_ids, word_ids, mask, type_ids])
elif output == 'predictions':
_ = dual_encoder_model([word_ids, mask, type_ids])
def test_serialize_deserialize(self):
"""Validate that the dual encoder model can be serialized / deserialized."""
# Build a transformer network to use within the dual encoder model.
sequence_length = 32
test_network = networks.BertEncoder(vocab_size=100, num_layers=2)
# Create a dual encoder model with the created network. (Note that all the
# args are different, so we can catch any serialization mismatches.)
dual_encoder_model = dual_encoder.DualEncoder(
test_network, max_seq_length=sequence_length, output='predictions')
# Create another dual encoder moel via serialization and deserialization.
config = dual_encoder_model.get_config()
new_dual_encoder = dual_encoder.DualEncoder.from_config(config)
# Validate that the config can be forced to JSON.
_ = new_dual_encoder.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(dual_encoder_model.get_config(),
new_dual_encoder.get_config())
if __name__ == '__main__':
tf.test.main()
| 5,120 | 42.398305 | 80 | py |
models | models-master/official/nlp/modeling/models/bert_span_labeler_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for BERT trainer network."""
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.modeling import networks
from official.nlp.modeling.models import bert_span_labeler
class BertSpanLabelerTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(True, False)
def test_bert_trainer(self, dict_outputs):
"""Validate that the Keras object can be created."""
# Build a transformer network to use within the BERT trainer.
vocab_size = 100
sequence_length = 512
test_network = networks.BertEncoder(
vocab_size=vocab_size, num_layers=2, dict_outputs=dict_outputs)
# Create a BERT trainer with the created network.
bert_trainer_model = bert_span_labeler.BertSpanLabeler(test_network)
# Create a set of 2-dimensional inputs (the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
# Invoke the trainer model on the inputs. This causes the layer to be built.
cls_outs = bert_trainer_model([word_ids, mask, type_ids])
# Validate that there are 2 outputs are of the expected shape.
self.assertLen(cls_outs, 2)
expected_shape = [None, sequence_length]
for out in cls_outs:
self.assertAllEqual(expected_shape, out.shape.as_list())
def test_bert_trainer_named_compilation(self):
"""Validate compilation using explicit output names."""
# Build a transformer network to use within the BERT trainer.
vocab_size = 100
test_network = networks.BertEncoder(vocab_size=vocab_size, num_layers=2)
# Create a BERT trainer with the created network.
bert_trainer_model = bert_span_labeler.BertSpanLabeler(test_network)
# Attempt to compile the model using a string-keyed dict of output names to
# loss functions. This will validate that the outputs are named as we
# expect.
bert_trainer_model.compile(
optimizer='sgd',
loss={
'start_positions': 'mse',
'end_positions': 'mse'
})
def test_bert_trainer_tensor_call(self):
"""Validate that the Keras object can be invoked."""
# Build a transformer network to use within the BERT trainer. (Here, we use
# a short sequence_length for convenience.)
test_network = networks.BertEncoder(vocab_size=100, num_layers=2)
# Create a BERT trainer with the created network.
bert_trainer_model = bert_span_labeler.BertSpanLabeler(test_network)
# Create a set of 2-dimensional data tensors to feed into the model.
word_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32)
mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32)
type_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32)
# Invoke the trainer model on the tensors. In Eager mode, this does the
# actual calculation. (We can't validate the outputs, since the network is
# too complex: this simply ensures we're not hitting runtime errors.)
_ = bert_trainer_model([word_ids, mask, type_ids])
def test_serialize_deserialize(self):
"""Validate that the BERT trainer can be serialized and deserialized."""
# Build a transformer network to use within the BERT trainer.
test_network = networks.BertEncoder(vocab_size=100, num_layers=2)
# Create a BERT trainer with the created network. (Note that all the args
# are different, so we can catch any serialization mismatches.)
bert_trainer_model = bert_span_labeler.BertSpanLabeler(test_network)
# Create another BERT trainer via serialization and deserialization.
config = bert_trainer_model.get_config()
new_bert_trainer_model = bert_span_labeler.BertSpanLabeler.from_config(
config)
# Validate that the config can be forced to JSON.
_ = new_bert_trainer_model.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(bert_trainer_model.get_config(),
new_bert_trainer_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 4,769 | 40.842105 | 80 | py |
models | models-master/official/nlp/modeling/models/electra_pretrainer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trainer network for ELECTRA models."""
# pylint: disable=g-classes-have-attributes
import copy
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling import layers
@tf.keras.utils.register_keras_serializable(package='Text')
class ElectraPretrainer(tf.keras.Model):
"""ELECTRA network training model.
This is an implementation of the network structure described in "ELECTRA:
Pre-training Text Encoders as Discriminators Rather Than Generators" (
https://arxiv.org/abs/2003.10555).
The ElectraPretrainer allows a user to pass in two transformer models, one for
generator, the other for discriminator, and instantiates the masked language
model (at generator side) and classification networks (at discriminator side)
that are used to create the training objectives.
*Note* that the model is constructed by Keras Subclass API, where layers are
defined inside `__init__` and `call()` implements the computation.
Args:
generator_network: A transformer network for generator, this network should
output a sequence output and an optional classification output.
discriminator_network: A transformer network for discriminator, this network
should output a sequence output
vocab_size: Size of generator output vocabulary
num_classes: Number of classes to predict from the classification network
for the generator network (not used now)
num_token_predictions: Number of tokens to predict from the masked LM.
mlm_activation: The activation (if any) to use in the masked LM and
classification networks. If None, no activation will be used.
mlm_initializer: The initializer (if any) to use in the masked LM and
classification networks. Defaults to a Glorot uniform initializer.
output_type: The output style for this network. Can be either `logits` or
`predictions`.
disallow_correct: Whether to disallow the generator to generate the exact
same token in the original sentence
"""
def __init__(self,
generator_network,
discriminator_network,
vocab_size,
num_classes,
num_token_predictions,
mlm_activation=None,
mlm_initializer='glorot_uniform',
output_type='logits',
disallow_correct=False,
**kwargs):
super(ElectraPretrainer, self).__init__()
self._config = {
'generator_network': generator_network,
'discriminator_network': discriminator_network,
'vocab_size': vocab_size,
'num_classes': num_classes,
'num_token_predictions': num_token_predictions,
'mlm_activation': mlm_activation,
'mlm_initializer': mlm_initializer,
'output_type': output_type,
'disallow_correct': disallow_correct,
}
for k, v in kwargs.items():
self._config[k] = v
self.generator_network = generator_network
self.discriminator_network = discriminator_network
self.vocab_size = vocab_size
self.num_classes = num_classes
self.num_token_predictions = num_token_predictions
self.mlm_activation = mlm_activation
self.mlm_initializer = mlm_initializer
self.output_type = output_type
self.disallow_correct = disallow_correct
self.masked_lm = layers.MaskedLM(
embedding_table=generator_network.get_embedding_table(),
activation=mlm_activation,
initializer=tf_utils.clone_initializer(mlm_initializer),
output=output_type,
name='generator_masked_lm')
self.classification = layers.ClassificationHead(
inner_dim=generator_network.get_config()['hidden_size'],
num_classes=num_classes,
initializer=tf_utils.clone_initializer(mlm_initializer),
name='generator_classification_head')
self.discriminator_projection = tf.keras.layers.Dense(
units=discriminator_network.get_config()['hidden_size'],
activation=mlm_activation,
kernel_initializer=tf_utils.clone_initializer(mlm_initializer),
name='discriminator_projection_head')
self.discriminator_head = tf.keras.layers.Dense(
units=1,
kernel_initializer=tf_utils.clone_initializer(mlm_initializer))
def call(self, inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
"""ELECTRA forward pass.
Args:
inputs: A dict of all inputs, same as the standard BERT model.
Returns:
outputs: A dict of pretrainer model outputs, including
(1) lm_outputs: A `[batch_size, num_token_predictions, vocab_size]`
tensor indicating logits on masked positions.
(2) sentence_outputs: A `[batch_size, num_classes]` tensor indicating
logits for nsp task.
(3) disc_logits: A `[batch_size, sequence_length]` tensor indicating
logits for discriminator replaced token detection task.
(4) disc_label: A `[batch_size, sequence_length]` tensor indicating
target labels for discriminator replaced token detection task.
"""
input_word_ids = inputs['input_word_ids']
input_mask = inputs['input_mask']
input_type_ids = inputs['input_type_ids']
masked_lm_positions = inputs['masked_lm_positions']
### Generator ###
sequence_output = self.generator_network(
[input_word_ids, input_mask, input_type_ids])['sequence_output']
# The generator encoder network may get outputs from all layers.
if isinstance(sequence_output, list):
sequence_output = sequence_output[-1]
lm_outputs = self.masked_lm(sequence_output, masked_lm_positions)
sentence_outputs = self.classification(sequence_output)
### Sampling from generator ###
fake_data = self._get_fake_data(inputs, lm_outputs, duplicate=True)
### Discriminator ###
disc_input = fake_data['inputs']
disc_label = fake_data['is_fake_tokens']
disc_sequence_output = self.discriminator_network([
disc_input['input_word_ids'], disc_input['input_mask'],
disc_input['input_type_ids']
])['sequence_output']
# The discriminator encoder network may get outputs from all layers.
if isinstance(disc_sequence_output, list):
disc_sequence_output = disc_sequence_output[-1]
disc_logits = self.discriminator_head(
self.discriminator_projection(disc_sequence_output))
disc_logits = tf.squeeze(disc_logits, axis=-1)
outputs = {
'lm_outputs': lm_outputs,
'sentence_outputs': sentence_outputs,
'disc_logits': disc_logits,
'disc_label': disc_label,
}
return outputs
def _get_fake_data(self, inputs, mlm_logits, duplicate=True):
"""Generate corrupted data for discriminator.
Args:
inputs: A dict of all inputs, same as the input of `call()` function
mlm_logits: The generator's output logits
duplicate: Whether to copy the original inputs dict during modifications
Returns:
A dict of generated fake data
"""
inputs = unmask(inputs, duplicate)
if self.disallow_correct:
disallow = tf.one_hot(
inputs['masked_lm_ids'], depth=self.vocab_size, dtype=tf.float32)
else:
disallow = None
sampled_tokens = tf.stop_gradient(
sample_from_softmax(mlm_logits, disallow=disallow))
sampled_tokids = tf.argmax(sampled_tokens, -1, output_type=tf.int32)
updated_input_ids, masked = scatter_update(inputs['input_word_ids'],
sampled_tokids,
inputs['masked_lm_positions'])
labels = masked * (1 - tf.cast(
tf.equal(updated_input_ids, inputs['input_word_ids']), tf.int32))
updated_inputs = get_updated_inputs(
inputs, duplicate, input_word_ids=updated_input_ids)
return {
'inputs': updated_inputs,
'is_fake_tokens': labels,
'sampled_tokens': sampled_tokens
}
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(encoder=self.discriminator_network)
return items
def get_config(self):
return self._config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
def scatter_update(sequence, updates, positions):
"""Scatter-update a sequence.
Args:
sequence: A `[batch_size, seq_len]` or `[batch_size, seq_len, depth]`
tensor.
updates: A tensor of size `batch_size*seq_len(*depth)`.
positions: A `[batch_size, n_positions]` tensor.
Returns:
updated_sequence: A `[batch_size, seq_len]` or
`[batch_size, seq_len, depth]` tensor of "sequence" with elements at
"positions" replaced by the values at "updates". Updates to index 0 are
ignored. If there are duplicated positions the update is only
applied once.
updates_mask: A `[batch_size, seq_len]` mask tensor of which inputs were
updated.
"""
shape = tf_utils.get_shape_list(sequence, expected_rank=[2, 3])
depth_dimension = (len(shape) == 3)
if depth_dimension:
batch_size, seq_len, depth = shape
else:
batch_size, seq_len = shape
depth = 1
sequence = tf.expand_dims(sequence, -1)
n_positions = tf_utils.get_shape_list(positions)[1]
shift = tf.expand_dims(seq_len * tf.range(batch_size), -1)
flat_positions = tf.reshape(positions + shift, [-1, 1])
flat_updates = tf.reshape(updates, [-1, depth])
updates = tf.scatter_nd(flat_positions, flat_updates,
[batch_size * seq_len, depth])
updates = tf.reshape(updates, [batch_size, seq_len, depth])
flat_updates_mask = tf.ones([batch_size * n_positions], tf.int32)
updates_mask = tf.scatter_nd(flat_positions, flat_updates_mask,
[batch_size * seq_len])
updates_mask = tf.reshape(updates_mask, [batch_size, seq_len])
not_first_token = tf.concat([
tf.zeros((batch_size, 1), tf.int32),
tf.ones((batch_size, seq_len - 1), tf.int32)
], -1)
updates_mask *= not_first_token
updates_mask_3d = tf.expand_dims(updates_mask, -1)
# account for duplicate positions
if sequence.dtype == tf.float32:
updates_mask_3d = tf.cast(updates_mask_3d, tf.float32)
updates /= tf.maximum(1.0, updates_mask_3d)
else:
assert sequence.dtype == tf.int32
updates = tf.math.floordiv(updates, tf.maximum(1, updates_mask_3d))
updates_mask = tf.minimum(updates_mask, 1)
updates_mask_3d = tf.minimum(updates_mask_3d, 1)
updated_sequence = (((1 - updates_mask_3d) * sequence) +
(updates_mask_3d * updates))
if not depth_dimension:
updated_sequence = tf.squeeze(updated_sequence, -1)
return updated_sequence, updates_mask
def sample_from_softmax(logits, disallow=None):
"""Implement softmax sampling using gumbel softmax trick.
Args:
logits: A `[batch_size, num_token_predictions, vocab_size]` tensor
indicating the generator output logits for each masked position.
disallow: If `None`, we directly sample tokens from the logits. Otherwise,
this is a tensor of size `[batch_size, num_token_predictions, vocab_size]`
indicating the true word id in each masked position.
Returns:
sampled_tokens: A `[batch_size, num_token_predictions, vocab_size]` one hot
tensor indicating the sampled word id in each masked position.
"""
if disallow is not None:
logits -= 1000.0 * disallow
uniform_noise = tf.random.uniform(
tf_utils.get_shape_list(logits), minval=0, maxval=1)
gumbel_noise = -tf.math.log(-tf.math.log(uniform_noise + 1e-9) + 1e-9)
# Here we essentially follow the original paper and use temperature 1.0 for
# generator output logits.
sampled_tokens = tf.one_hot(
tf.argmax(tf.nn.softmax(logits + gumbel_noise), -1, output_type=tf.int32),
logits.shape[-1])
return sampled_tokens
def unmask(inputs, duplicate):
unmasked_input_word_ids, _ = scatter_update(inputs['input_word_ids'],
inputs['masked_lm_ids'],
inputs['masked_lm_positions'])
return get_updated_inputs(
inputs, duplicate, input_word_ids=unmasked_input_word_ids)
def get_updated_inputs(inputs, duplicate, **kwargs):
if duplicate:
new_inputs = copy.copy(inputs)
else:
new_inputs = inputs
for k, v in kwargs.items():
new_inputs[k] = v
return new_inputs
| 13,036 | 37.916418 | 100 | py |
models | models-master/official/nlp/modeling/models/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models are combinations of `tf.keras` layers and models that can be trained.
Several pre-built canned models are provided to train encoder networks.
These models are intended as both convenience functions and canonical examples.
"""
from official.nlp.modeling.models.bert_classifier import BertClassifier
from official.nlp.modeling.models.bert_pretrainer import *
from official.nlp.modeling.models.bert_span_labeler import BertSpanLabeler
from official.nlp.modeling.models.bert_token_classifier import BertTokenClassifier
from official.nlp.modeling.models.dual_encoder import DualEncoder
from official.nlp.modeling.models.electra_pretrainer import ElectraPretrainer
from official.nlp.modeling.models.seq2seq_transformer import *
from official.nlp.modeling.models.t5 import T5Transformer
from official.nlp.modeling.models.t5 import T5TransformerParams
from official.nlp.modeling.models.xlnet import XLNetClassifier
from official.nlp.modeling.models.xlnet import XLNetPretrainer
from official.nlp.modeling.models.xlnet import XLNetSpanLabeler
| 1,654 | 50.71875 | 82 | py |
models | models-master/official/nlp/modeling/models/bert_classifier.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT cls-token classifier."""
# pylint: disable=g-classes-have-attributes
import collections
import tensorflow as tf
from official.nlp.modeling import layers
@tf.keras.utils.register_keras_serializable(package='Text')
class BertClassifier(tf.keras.Model):
"""Classifier model based on a BERT-style transformer-based encoder.
This is an implementation of the network structure surrounding a transformer
encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers
for Language Understanding" (https://arxiv.org/abs/1810.04805).
The BertClassifier allows a user to pass in a transformer stack, and
instantiates a classification network based on the passed `num_classes`
argument. If `num_classes` is set to 1, a regression network is instantiated.
*Note* that the model is constructed by
[Keras Functional API](https://keras.io/guides/functional_api/).
Args:
network: A transformer network. This network should output a sequence output
and a classification output. Furthermore, it should expose its embedding
table via a "get_embedding_table" method.
num_classes: Number of classes to predict from the classification network.
initializer: The initializer (if any) to use in the classification networks.
Defaults to a Glorot uniform initializer.
dropout_rate: The dropout probability of the cls head.
use_encoder_pooler: Whether to use the pooler layer pre-defined inside the
encoder.
head_name: Name of the classification head.
cls_head: (Optional) The layer instance to use for the classifier head.
It should take in the output from network and produce the final logits.
If set, the arguments ('num_classes', 'initializer', 'dropout_rate',
'use_encoder_pooler', 'head_name') will be ignored.
"""
def __init__(self,
network,
num_classes,
initializer='glorot_uniform',
dropout_rate=0.1,
use_encoder_pooler=True,
head_name='sentence_prediction',
cls_head=None,
**kwargs):
self.num_classes = num_classes
self.head_name = head_name
self.initializer = initializer
self.use_encoder_pooler = use_encoder_pooler
# We want to use the inputs of the passed network as the inputs to this
# Model. To do this, we need to keep a handle to the network inputs for use
# when we construct the Model object at the end of init.
inputs = network.inputs
if use_encoder_pooler:
# Because we have a copy of inputs to create this Model object, we can
# invoke the Network object with its own input tensors to start the Model.
outputs = network(inputs)
if isinstance(outputs, list):
cls_inputs = outputs[1]
else:
cls_inputs = outputs['pooled_output']
cls_inputs = tf.keras.layers.Dropout(rate=dropout_rate)(cls_inputs)
else:
outputs = network(inputs)
if isinstance(outputs, list):
cls_inputs = outputs[0]
else:
cls_inputs = outputs['sequence_output']
if cls_head:
classifier = cls_head
else:
classifier = layers.ClassificationHead(
inner_dim=0 if use_encoder_pooler else cls_inputs.shape[-1],
num_classes=num_classes,
initializer=initializer,
dropout_rate=dropout_rate,
name=head_name)
predictions = classifier(cls_inputs)
# b/164516224
# Once we've created the network using the Functional API, we call
# super().__init__ as though we were invoking the Functional API Model
# constructor, resulting in this object having all the properties of a model
# created using the Functional API. Once super().__init__ is called, we
# can assign attributes to `self` - note that all `self` assignments are
# below this line.
super(BertClassifier, self).__init__(
inputs=inputs, outputs=predictions, **kwargs)
self._network = network
self._cls_head = cls_head
config_dict = self._make_config_dict()
# We are storing the config dict as a namedtuple here to ensure checkpoint
# compatibility with an earlier version of this model which did not track
# the config dict attribute. TF does not track immutable attrs which
# do not contain Trackables, so by creating a config namedtuple instead of
# a dict we avoid tracking it.
config_cls = collections.namedtuple('Config', config_dict.keys())
self._config = config_cls(**config_dict)
self.classifier = classifier
@property
def checkpoint_items(self):
items = dict(encoder=self._network)
if hasattr(self.classifier, 'checkpoint_items'):
for key, item in self.classifier.checkpoint_items.items():
items['.'.join([self.classifier.name, key])] = item
return items
def get_config(self):
return dict(self._config._asdict())
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
def _make_config_dict(self):
return {
'network': self._network,
'num_classes': self.num_classes,
'head_name': self.head_name,
'initializer': self.initializer,
'use_encoder_pooler': self.use_encoder_pooler,
'cls_head': self._cls_head,
}
| 5,896 | 38.844595 | 80 | py |
models | models-master/official/nlp/modeling/layers/kernel_attention_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.projects.kernel.attention."""
import itertools
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.modeling.layers import kernel_attention as attention
_FEATURE_TRANSFORM = ["relu", "elu", "exp", "expplus"]
_REDRAW = [True, False]
_TRAINING = [True, False]
_IS_SHORT_SEQ = [True, False]
_BEGIN_KERNEL = [0, 512]
class KernelAttentionTest(tf.test.TestCase, parameterized.TestCase):
# expplus is only designed for bi-directional use case.
# exp can be numeric unstable.
@parameterized.parameters(itertools.product(
["relu", "elu"], [1, 4], [0.9]))
def test_causal_windowed_attention_projection_streaming(
self, feature_transform, causal_chunk_length, causal_weight_decay):
num_heads = 12
key_dim = 64
seq_length = 16
num_chunks = seq_length // causal_chunk_length
causal_window_length = num_chunks
batch_size = 2
training = False
num_random_features = 0
test_layer = attention.KernelAttention(
num_heads=num_heads,
key_dim=key_dim,
feature_transform=feature_transform,
num_random_features=num_random_features,
redraw=False,
is_short_seq=False,
begin_kernel=False,
use_causal_windowed=True,
causal_chunk_length=causal_chunk_length,
causal_window_length=causal_window_length,
causal_window_decay=causal_weight_decay,
causal_padding=None,
)
query = tf.random.normal(
shape=(batch_size, seq_length, key_dim), seed=2)
value = query
encoder_inputs_mask = tf.ones((batch_size, seq_length), dtype=tf.int32)
masks = tf.cast(encoder_inputs_mask, dtype=tf.float32)
output = test_layer(
query=query,
value=value,
attention_mask=masks,
training=training)
dim = num_random_features if num_random_features > 0 else key_dim
kv_cache = tf.zeros(
(batch_size, num_heads, dim, dim))
k_sum_cache = tf.zeros((batch_size, num_heads, dim))
stream_output = []
cache = {"kv": kv_cache, "k_sum": k_sum_cache}
for i in range(num_chunks):
stream_output.append(
test_layer(
query=query[:, i * causal_chunk_length:(i + 1) *
causal_chunk_length, :],
value=value[:, i * causal_chunk_length:(i + 1) *
causal_chunk_length, :],
attention_mask=masks[:, i * causal_chunk_length:(i + 1) *
causal_chunk_length],
cache=cache,
training=training))
stream_output = tf.concat(stream_output, axis=1)
self.assertAllClose(output, stream_output)
@parameterized.parameters(
itertools.product(_FEATURE_TRANSFORM, [127], _TRAINING, [True, False],
_IS_SHORT_SEQ, _BEGIN_KERNEL))
def test_attention_projection(
self, feature_transform, num_random_features, training, redraw, is_short,
begin_kernel):
num_heads = 12
key_dim = 64
seq_length = 1024
batch_size = 2
test_layer = attention.KernelAttention(
num_heads=num_heads,
key_dim=key_dim,
feature_transform=feature_transform,
num_random_features=num_random_features,
redraw=redraw,
is_short_seq=is_short,
begin_kernel=begin_kernel)
query = tf.random.normal(
shape=(batch_size, seq_length, key_dim))
value = query
encoder_inputs_mask = tf.zeros((batch_size, seq_length), dtype=tf.int32)
masks = tf.cast(encoder_inputs_mask, dtype=tf.float32)
output = test_layer(
query=query,
value=value,
attention_mask=masks,
training=training)
self.assertEqual(output.shape, [batch_size, seq_length, key_dim])
@parameterized.parameters(
itertools.product(["relu", "exp"], [127], _TRAINING, [True, False],
[0], [None, 0.97], [None, "left", "right"]))
def test_causal_windowed_attention_projection(
self, feature_transform, num_random_features, training, redraw,
begin_kernel, causal_window_decay, causal_padding):
num_heads = 12
key_dim = 64
seq_length = 1024
batch_size = 2
test_layer = attention.KernelAttention(
num_heads=num_heads,
key_dim=key_dim,
feature_transform=feature_transform,
num_random_features=num_random_features,
redraw=redraw,
is_short_seq=False,
begin_kernel=begin_kernel,
use_causal_windowed=True,
causal_chunk_length=8,
causal_window_length=3,
causal_window_decay=causal_window_decay,
causal_padding=causal_padding)
query = tf.random.normal(
shape=(batch_size, seq_length, key_dim))
value = query
encoder_inputs_mask = tf.zeros((batch_size, seq_length), dtype=tf.int32)
masks = tf.cast(encoder_inputs_mask, dtype=tf.float32)
output = test_layer(
query=query,
value=value,
attention_mask=masks,
training=training)
self.assertEqual(output.shape, [batch_size, seq_length, key_dim])
@parameterized.parameters(itertools.product(
_FEATURE_TRANSFORM, [0], _TRAINING, [False],
_IS_SHORT_SEQ, _BEGIN_KERNEL))
def test_attention_no_projection(
self, feature_transform, num_random_features, training, redraw, is_short,
begin_kernel):
num_heads = 12
key_dim = 64
seq_length = 1024
batch_size = 2
test_layer = attention.KernelAttention(
num_heads=num_heads,
key_dim=key_dim,
feature_transform=feature_transform,
num_random_features=num_random_features,
redraw=redraw,
is_short_seq=is_short,
begin_kernel=begin_kernel)
query = tf.random.normal(
shape=(batch_size, seq_length, key_dim))
value = query
encoder_inputs_mask = tf.zeros((batch_size, seq_length), dtype=tf.int32)
masks = tf.cast(encoder_inputs_mask, dtype=tf.float32)
output = test_layer(
query=query,
value=value,
attention_mask=masks,
training=training)
self.assertEqual(output.shape, [batch_size, seq_length, key_dim])
@parameterized.parameters([128, 512])
def test_attention_scale_by_length(self, seq_length):
num_heads = 12
key_dim = 64
batch_size = 2
test_layer = attention.KernelAttention(
num_heads=num_heads,
key_dim=key_dim,
num_random_features=0,
scale_by_length=True)
query = tf.random.normal(
shape=(batch_size, seq_length, key_dim))
value = query
encoder_inputs_mask = tf.ones((batch_size, seq_length), dtype=tf.int32)
masks = tf.cast(encoder_inputs_mask, dtype=tf.float32)
output_scale_by_length = test_layer(
query=query, value=value, attention_mask=masks)
test_layer._scale_by_length = False
output_no_scale_by_length = test_layer(
query=query, value=value, attention_mask=masks)
if seq_length == 512: # Equals because log(seq_length, base=512) = 1.0
self.assertAllClose(output_scale_by_length, output_no_scale_by_length)
else:
self.assertNotAllClose(output_scale_by_length, output_no_scale_by_length)
def test_unsupported_feature_transform(self):
with self.assertRaisesRegex(ValueError, "Unsupported feature_transform.*"):
_ = attention.KernelAttention(feature_transform="test")
def test_redraw_true_no_projection(self):
with self.assertRaisesRegex(
ValueError, "There is nothing to redraw when num_random_features.*"):
_ = attention.KernelAttention(
num_heads=2, key_dim=64, feature_transform="elu",
num_random_features=0, redraw=True)
def test_config(self):
num_heads = 12
key_dim = 64
test_layer = attention.KernelAttention(
num_heads=num_heads,
key_dim=key_dim,
feature_transform="exp",
num_random_features=128,
is_short_seq=True)
new_layer = attention.KernelAttention.from_config(
test_layer.get_config())
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(test_layer.get_config(), new_layer.get_config())
def test_rectangular_window_sum(self):
x = tf.ones([2, 5, 2, 2, 2])
winsum = attention.rectangular_window_sum(x, 3)
self.assertEqual(winsum.shape, x.shape)
self.assertAllClose(
tf.tile(
tf.reshape([1., 2., 3., 3., 3.], [1, -1, 1, 1, 1]),
[2, 1, 2, 2, 2]),
winsum)
def test_weighted_window_sum(self):
x = tf.ones([2, 5, 2, 2, 2])
winsum = attention.weighted_window_sum(x, 3, [0.01, 0.1, 1.])
self.assertEqual(winsum.shape, x.shape)
self.assertAllClose(
tf.tile(
tf.reshape([1., 1.1, 1.11, 1.11, 1.11], [1, -1, 1, 1, 1]),
[2, 1, 2, 2, 2]),
winsum)
if __name__ == "__main__":
tf.test.main()
| 9,473 | 35.579151 | 79 | py |
models | models-master/official/nlp/modeling/layers/block_diag_feedforward.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based gated feedforward layer."""
# pylint: disable=g-classes-have-attributes
from typing import Optional
import tensorflow as tf
from official.modeling import tf_utils
class BlockDiagFeedforward(tf.keras.layers.Layer):
"""Block diagonal feedforward layer.
This layer replaces the weight matrix of the output_dense layer with a block
diagonal matrix to save layer parameters and FLOPs. A linear mixing layer can
be added optionally to improve layer expressibility.
Args:
intermediate_size: Size of the intermediate layer.
intermediate_activation: Activation for the intermediate layer.
dropout: Dropout probability for the output dropout.
num_blocks: The number of blocks for the block diagonal matrix of the
output_dense layer.
apply_mixing: Apply linear mixing if True.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
"""
def __init__(
self,
intermediate_size: int,
intermediate_activation: str,
dropout: float,
num_blocks: int = 1,
apply_mixing: bool = True,
kernel_initializer: str = "glorot_uniform",
bias_initializer: str = "zeros",
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
activity_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
kernel_constraint: Optional[tf.keras.constraints.Constraint] = None,
bias_constraint: Optional[tf.keras.constraints.Constraint] = None,
**kwargs): # pylint: disable=g-doc-args
super().__init__(**kwargs)
self._intermediate_size = intermediate_size
self._intermediate_activation = intermediate_activation
self._dropout = dropout
self._num_blocks = num_blocks
self._apply_mixing = apply_mixing
if intermediate_size % num_blocks != 0:
raise ValueError("Intermediate_size (%d) isn't a multiple of num_blocks "
"(%d)." % (intermediate_size, num_blocks))
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
def build(self, input_shape):
hidden_size = input_shape.as_list()[-1]
common_kwargs = dict(
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
self._intermediate_dense = tf.keras.layers.EinsumDense(
"abc,cde->abde",
output_shape=(None, self._num_blocks,
self._intermediate_size // self._num_blocks),
bias_axes="de",
name="intermediate",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs)
policy = tf.keras.mixed_precision.global_policy()
if policy.name == "mixed_bfloat16":
# bfloat16 causes BERT with the LAMB optimizer to not converge
# as well, so we use float32.
policy = tf.float32
self._intermediate_activation_layer = tf.keras.layers.Activation(
self._intermediate_activation, dtype=policy)
self._output_dense = tf.keras.layers.EinsumDense(
"abde,deo->abdo",
output_shape=(None, self._num_blocks, hidden_size // self._num_blocks),
bias_axes="do",
name="output",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs)
if self._apply_mixing:
self._output_mixing = tf.keras.layers.EinsumDense(
"abdo,de->abeo",
output_shape=(None, self._num_blocks,
hidden_size // self._num_blocks),
name="output_mixing",
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs)
self._output_reshape = tf.keras.layers.Reshape((-1, hidden_size))
self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout)
def get_config(self):
config = {
"intermediate_size":
self._intermediate_size,
"intermediate_activation":
self._intermediate_activation,
"dropout":
self._dropout,
"num_blocks":
self._num_blocks,
"apply_mixing":
self._apply_mixing,
"kernel_initializer":
tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer":
tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer":
tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer":
tf.keras.regularizers.serialize(self._bias_regularizer),
"activity_regularizer":
tf.keras.regularizers.serialize(self._activity_regularizer),
"kernel_constraint":
tf.keras.constraints.serialize(self._kernel_constraint),
"bias_constraint":
tf.keras.constraints.serialize(self._bias_constraint)
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
intermediate_output = self._intermediate_dense(inputs)
intermediate_output = self._intermediate_activation_layer(
intermediate_output)
layer_output = self._output_dense(intermediate_output)
if self._apply_mixing:
layer_output = self._output_mixing(layer_output)
layer_output = self._output_reshape(layer_output)
layer_output = self._output_dropout(layer_output)
return layer_output
| 7,233 | 40.815029 | 80 | py |
models | models-master/official/nlp/modeling/layers/transformer_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-based transformer block layer."""
import tensorflow as tf
from official.nlp.modeling.layers import transformer
def _create_cache(batch_size, init_decode_length, num_heads, head_size):
return {
'key':
tf.zeros([batch_size, init_decode_length, num_heads, head_size],
dtype=tf.float32),
'value':
tf.zeros([batch_size, init_decode_length, num_heads, head_size],
dtype=tf.float32)
}
class TransformerDecoderBlockTest(tf.test.TestCase):
def test_decoder_block_with_cache(self):
num_attention_heads = 2
hidden_size = 16
decoder_block = transformer.TransformerDecoderBlock(
num_attention_heads=num_attention_heads,
intermediate_size=32,
intermediate_activation='relu',
dropout_rate=0.1,
attention_dropout_rate=0.1)
# Forward path.
dummy_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 4], dtype=tf.float32)
inputs = [dummy_tensor, dummy_tensor, dummy_mask, dummy_mask]
cache = _create_cache(2, 0, num_attention_heads,
hidden_size // num_attention_heads)
output, cache = decoder_block(inputs, cache)
self.assertEqual(output.shape, (2, 4, hidden_size))
self.assertEqual(cache['value'].shape, (2, 4, 2, 8))
def test_use_bias_norm_first(self):
num_attention_heads = 2
hidden_size = 16
decoder_block = transformer.TransformerDecoderBlock(
num_attention_heads=num_attention_heads,
intermediate_size=32,
intermediate_activation='relu',
dropout_rate=0.1,
attention_dropout_rate=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
intermediate_dropout=0.1,
attention_initializer=tf.keras.initializers.RandomUniform(
minval=0., maxval=1.))
# Forward path.
dummy_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 4], dtype=tf.float32)
inputs = [dummy_tensor, dummy_tensor, dummy_mask, dummy_mask]
output, _ = decoder_block(inputs)
self.assertEqual(output.shape, (2, 4, hidden_size))
def test_get_config(self):
num_attention_heads = 2
decoder_block = transformer.TransformerDecoderBlock(
num_attention_heads=num_attention_heads,
intermediate_size=32,
intermediate_activation='relu',
dropout_rate=0.1,
attention_dropout_rate=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
intermediate_dropout=0.1,
attention_initializer=tf.keras.initializers.RandomUniform(
minval=0., maxval=1.))
decoder_block_config = decoder_block.get_config()
new_decoder_block = transformer.TransformerDecoderBlock.from_config(
decoder_block_config)
self.assertEqual(decoder_block_config, new_decoder_block.get_config())
if __name__ == '__main__':
tf.test.main()
| 3,563 | 35.367347 | 74 | py |
models | models-master/official/nlp/modeling/layers/factorized_embedding_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for FactorizedEmbedding layer."""
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import factorized_embedding
class FactorizedEmbeddingTest(tf.test.TestCase):
def test_layer_creation(self):
vocab_size = 31
embedding_width = 27
output_dim = 45
test_layer = factorized_embedding.FactorizedEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
output_dim=output_dim)
# Create a 2-dimensional input (the first dimension is implicit).
sequence_length = 23
input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
output_tensor = test_layer(input_tensor)
# The output should be the same as the input, save that it has an extra
# embedding_width dimension on the end.
expected_output_shape = [None, sequence_length, output_dim]
self.assertEqual(expected_output_shape, output_tensor.shape.as_list())
self.assertEqual(output_tensor.dtype, tf.float32)
def test_layer_invocation(self):
vocab_size = 31
embedding_width = 27
output_dim = 45
test_layer = factorized_embedding.FactorizedEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
output_dim=output_dim)
# Create a 2-dimensional input (the first dimension is implicit).
sequence_length = 23
input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
output_tensor = test_layer(input_tensor)
# Create a model from the test layer.
model = tf.keras.Model(input_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 3
input_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
output = model.predict(input_data)
self.assertEqual(tf.float32, output.dtype)
if __name__ == "__main__":
tf.test.main()
| 2,579 | 35.338028 | 79 | py |
models | models-master/official/nlp/modeling/layers/spectral_normalization.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Normalization layers.
## References:
[1] Yuichi Yoshida, Takeru Miyato. Spectral Norm Regularization for Improving
the Generalizability of Deep Learning.
_arXiv preprint arXiv:1705.10941_, 2017. https://arxiv.org/abs/1705.10941
[2] Takeru Miyato, Toshiki Kataoka, Masanori Koyama, Yuichi Yoshida.
Spectral normalization for generative adversarial networks.
In _International Conference on Learning Representations_, 2018.
[3] Henry Gouk, Eibe Frank, Bernhard Pfahringer, Michael Cree.
Regularisation of neural networks by enforcing lipschitz continuity.
_arXiv preprint arXiv:1804.04368_, 2018. https://arxiv.org/abs/1804.04368
"""
import numpy as np
import tensorflow as tf
class SpectralNormalization(tf.keras.layers.Wrapper):
"""Implements spectral normalization for Dense layer."""
def __init__(self,
layer,
iteration=1,
norm_multiplier=0.95,
training=True,
aggregation=tf.VariableAggregation.MEAN,
inhere_layer_name=False,
**kwargs):
"""Initializer.
Args:
layer: (tf.keras.layers.Layer) A TF Keras layer to apply normalization to.
iteration: (int) The number of power iteration to perform to estimate
weight matrix's singular value.
norm_multiplier: (float) Multiplicative constant to threshold the
normalization. Usually under normalization, the singular value will
converge to this value.
training: (bool) Whether to perform power iteration to update the singular
value estimate.
aggregation: (tf.VariableAggregation) Indicates how a distributed variable
will be aggregated. Accepted values are constants defined in the class
tf.VariableAggregation.
inhere_layer_name: (bool) Whether to inhere the name of the input layer.
**kwargs: (dict) Other keyword arguments for the layers.Wrapper class.
"""
self.iteration = iteration
self.do_power_iteration = training
self.aggregation = aggregation
self.norm_multiplier = norm_multiplier
# Set layer name.
wrapper_name = kwargs.pop('name', None)
if inhere_layer_name:
wrapper_name = layer.name
if not isinstance(layer, tf.keras.layers.Layer):
raise ValueError('`layer` must be a `tf.keras.layer.Layer`. '
'Observed `{}`'.format(layer))
super().__init__(
layer, name=wrapper_name, **kwargs)
def build(self, input_shape): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
super().build(input_shape)
self.layer.kernel._aggregation = self.aggregation # pylint: disable=protected-access
self._dtype = self.layer.kernel.dtype
self.w = self.layer.kernel
self.w_shape = self.w.shape.as_list()
self.v = self.add_weight(
shape=(1, np.prod(self.w_shape[:-1])),
initializer=tf.initializers.random_normal(),
trainable=False,
name='v',
dtype=self.dtype,
aggregation=self.aggregation)
self.u = self.add_weight(
shape=(1, self.w_shape[-1]),
initializer=tf.initializers.random_normal(),
trainable=False,
name='u',
dtype=self.dtype,
aggregation=self.aggregation)
self.update_weights()
def call(self, inputs, *, training=None):
training = self.do_power_iteration if training is None else training
if training:
u_update_op, v_update_op, w_update_op = self.update_weights(
training=training)
output = self.layer(inputs)
w_restore_op = self.restore_weights()
# Register update ops.
self.add_update(u_update_op)
self.add_update(v_update_op)
self.add_update(w_update_op)
self.add_update(w_restore_op)
else:
output = self.layer(inputs)
return output
def update_weights(self, *, training=True):
w_reshaped = tf.reshape(self.w, [-1, self.w_shape[-1]])
u_hat = self.u
v_hat = self.v
if training:
for _ in range(self.iteration):
v_hat = tf.nn.l2_normalize(tf.matmul(u_hat, tf.transpose(w_reshaped)))
u_hat = tf.nn.l2_normalize(tf.matmul(v_hat, w_reshaped))
sigma = tf.matmul(tf.matmul(v_hat, w_reshaped), tf.transpose(u_hat))
# Convert sigma from a 1x1 matrix to a scalar.
sigma = tf.reshape(sigma, [])
u_update_op = self.u.assign(u_hat)
v_update_op = self.v.assign(v_hat)
# Bound spectral norm to be not larger than self.norm_multiplier.
w_norm = tf.cond((self.norm_multiplier / sigma) < 1, lambda: # pylint:disable=g-long-lambda
(self.norm_multiplier / sigma) * self.w, lambda: self.w)
w_update_op = self.layer.kernel.assign(w_norm)
return u_update_op, v_update_op, w_update_op
def restore_weights(self):
"""Restores layer weights to maintain gradient update (See Alg 1 of [1])."""
return self.layer.kernel.assign(self.w)
class SpectralNormalizationConv2D(tf.keras.layers.Wrapper):
"""Implements spectral normalization for Conv2D layer based on [3]."""
def __init__(self,
layer,
iteration=1,
norm_multiplier=0.95,
training=True,
aggregation=tf.VariableAggregation.MEAN,
legacy_mode=False,
**kwargs):
"""Initializer.
Args:
layer: (tf.keras.layers.Layer) A TF Keras layer to apply normalization to.
iteration: (int) The number of power iteration to perform to estimate
weight matrix's singular value.
norm_multiplier: (float) Multiplicative constant to threshold the
normalization. Usually under normalization, the singular value will
converge to this value.
training: (bool) Whether to perform power iteration to update the singular
value estimate.
aggregation: (tf.VariableAggregation) Indicates how a distributed variable
will be aggregated. Accepted values are constants defined in the class
tf.VariableAggregation.
legacy_mode: (bool) Whether to use the legacy implementation where the
dimension of the u and v vectors are set to the batch size. It should
not be enabled unless for backward compatibility reasons.
**kwargs: (dict) Other keyword arguments for the layers.Wrapper class.
"""
self.iteration = iteration
self.do_power_iteration = training
self.aggregation = aggregation
self.norm_multiplier = norm_multiplier
self.legacy_mode = legacy_mode
# Set layer attributes.
layer._name += '_spec_norm'
if not isinstance(layer, tf.keras.layers.Conv2D):
raise ValueError(
'layer must be a `tf.keras.layer.Conv2D` instance. You passed: {input}'
.format(input=layer))
super().__init__(layer, **kwargs)
def build(self, input_shape): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
if not self.layer.built:
self.layer.build(input_shape)
self.layer.kernel._aggregation = self.aggregation # pylint: disable=protected-access
self._dtype = self.layer.kernel.dtype
# Shape (kernel_size_1, kernel_size_2, in_channel, out_channel).
self.w = self.layer.kernel
self.w_shape = self.w.shape.as_list()
self.strides = self.layer.strides
# Set the dimensions of u and v vectors.
batch_size = input_shape[0]
uv_dim = batch_size if self.legacy_mode else 1
# Resolve shapes.
in_height = input_shape[1]
in_width = input_shape[2]
in_channel = self.w_shape[2]
out_height = in_height // self.strides[0]
out_width = in_width // self.strides[1]
out_channel = self.w_shape[3]
self.in_shape = (uv_dim, in_height, in_width, in_channel)
self.out_shape = (uv_dim, out_height, out_width, out_channel)
self.v = self.add_weight(
shape=self.in_shape,
initializer=tf.initializers.random_normal(),
trainable=False,
name='v',
dtype=self.dtype,
aggregation=self.aggregation)
self.u = self.add_weight(
shape=self.out_shape,
initializer=tf.initializers.random_normal(),
trainable=False,
name='u',
dtype=self.dtype,
aggregation=self.aggregation)
super().build()
def call(self, inputs):
u_update_op, v_update_op, w_update_op = self.update_weights()
output = self.layer(inputs)
w_restore_op = self.restore_weights()
# Register update ops.
self.add_update(u_update_op)
self.add_update(v_update_op)
self.add_update(w_update_op)
self.add_update(w_restore_op)
return output
def update_weights(self):
"""Computes power iteration for convolutional filters based on [3]."""
# Initialize u, v vectors.
u_hat = self.u
v_hat = self.v
if self.do_power_iteration:
for _ in range(self.iteration):
# Updates v.
v_ = tf.nn.conv2d_transpose(
u_hat,
self.w,
output_shape=self.in_shape,
strides=self.strides,
padding='SAME')
v_hat = tf.nn.l2_normalize(tf.reshape(v_, [1, -1]))
v_hat = tf.reshape(v_hat, v_.shape)
# Updates u.
u_ = tf.nn.conv2d(v_hat, self.w, strides=self.strides, padding='SAME')
u_hat = tf.nn.l2_normalize(tf.reshape(u_, [1, -1]))
u_hat = tf.reshape(u_hat, u_.shape)
v_w_hat = tf.nn.conv2d(v_hat, self.w, strides=self.strides, padding='SAME')
sigma = tf.matmul(tf.reshape(v_w_hat, [1, -1]), tf.reshape(u_hat, [-1, 1]))
# Convert sigma from a 1x1 matrix to a scalar.
sigma = tf.reshape(sigma, [])
u_update_op = self.u.assign(u_hat)
v_update_op = self.v.assign(v_hat)
w_norm = tf.cond((self.norm_multiplier / sigma) < 1, lambda: # pylint:disable=g-long-lambda
(self.norm_multiplier / sigma) * self.w, lambda: self.w)
w_update_op = self.layer.kernel.assign(w_norm)
return u_update_op, v_update_op, w_update_op
def restore_weights(self):
"""Restores layer weights to maintain gradient update (See Alg 1 of [1])."""
return self.layer.kernel.assign(self.w)
| 10,728 | 35.003356 | 106 | py |
models | models-master/official/nlp/modeling/layers/transformer_encoder_block.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based TransformerEncoder block layer."""
from typing import Any, Optional
from absl import logging
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling.layers import util
@tf.keras.utils.register_keras_serializable(package="Text")
class TransformerEncoderBlock(tf.keras.layers.Layer):
"""TransformerEncoderBlock layer.
This layer implements the Transformer Encoder from
"Attention Is All You Need". (https://arxiv.org/abs/1706.03762),
which combines a `tf.keras.layers.MultiHeadAttention` layer with a
two-layer feedforward network.
References:
[Attention Is All You Need](https://arxiv.org/abs/1706.03762)
[BERT: Pre-training of Deep Bidirectional Transformers for Language
Understanding](https://arxiv.org/abs/1810.04805)
"""
def __init__(self,
num_attention_heads,
inner_dim,
inner_activation,
output_range=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_bias=True,
norm_first=False,
norm_epsilon=1e-12,
output_dropout=0.0,
attention_dropout=0.0,
inner_dropout=0.0,
attention_initializer=None,
attention_axes=None,
use_query_residual=True,
key_dim=None,
value_dim=None,
output_last_dim=None,
diff_q_kv_att_layer_norm=False,
return_attention_scores=False,
**kwargs):
"""Initializes `TransformerEncoderBlock`.
Note: If `output_last_dim` is used and `use_query_residual` is `True`, the
`output_last_dim`'s value must equal the first input's last dimension for
the query residual connection to work. This is because the residual
connection after the multi-head-attention requires their dimensions to
match. If `use_query_residual` is `False`, the `output_last_dim` dictactes
the last dimension of the output of this module and the
multi-head-attention.
E.g. let's say input dims are `[batch_size, seq_dim, input_last_dim]`.
Scenario 1: If `output_last_dim` is not `None`, then the output dims of this
module would be `[batch_size, seq_dim, output_last_dim]`. Note `key_dim` is
overriden by `output_last_dim`.
Scenario 2: If `output_last_dim` is `None` and `key_dim` is not `None`, then
the output dims of this module would be `[batch_size, seq_dim, key_dim]`.
Scenario 3: If the `output_last_dim` and `key_dim` are both `None`, the
output dims would be `[batch_size, seq_dim, input_last_dim]`.
Args:
num_attention_heads: Number of attention heads.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network.
output_range: the sequence output range, [0, output_range) for slicing the
target sequence. `None` means the target sequence is not sliced.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
use_bias: Whether to enable use_bias in attention layer. If set False,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate
dense layers. If set False, output of attention and intermediate dense
layers is normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: Dropout probability for within the attention layer.
inner_dropout: Dropout probability for the first Dense layer in a
two-layer feedforward network.
attention_initializer: Initializer for kernels of attention layers. If set
`None`, attention layers use kernel_initializer as initializer for
kernel.
attention_axes: axes over which the attention is applied. `None` means
attention over all axes, but batch, heads, and features.
use_query_residual: Toggle to execute residual connection after attention.
key_dim: `key_dim` for the `tf.keras.layers.MultiHeadAttention`. If
`None`, we use the first `input_shape`'s last dim.
value_dim: `value_dim` for the `tf.keras.layers.MultiHeadAttention`.
output_last_dim: Final dimension of the output of this module. This also
dictates the value for the final dimension of the multi-head-attention.
When it's `None`, we use, in order of decreasing precedence, `key_dim` *
`num_heads` or the first `input_shape`'s last dim as the output's last
dim.
diff_q_kv_att_layer_norm: If `True`, create a separate attention layer
norm layer for query and key-value if `norm_first` is `True`. Invalid to
set to `True` if `norm_first` is `False`.
return_attention_scores: If `True`, the output of this layer will be a
tuple and additionally contain the attention scores in the shape of
`[batch_size, num_attention_heads, seq_dim, seq_dim]`.
**kwargs: keyword arguments.
"""
util.filter_kwargs(kwargs)
super().__init__(**kwargs)
# Deprecation warning.
if output_range is not None:
logging.warning("`output_range` is available as an argument for `call()`."
"The `output_range` as __init__ argument is deprecated.")
self._num_heads = num_attention_heads
self._inner_dim = inner_dim
self._inner_activation = inner_activation
self._attention_dropout_rate = attention_dropout
self._output_dropout_rate = output_dropout
self._output_range = output_range
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._inner_dropout = inner_dropout
self._use_query_residual = use_query_residual
self._key_dim = key_dim
self._value_dim = value_dim
self._output_last_dim = output_last_dim
self._diff_q_kv_att_layer_norm = diff_q_kv_att_layer_norm
self._return_attention_scores = return_attention_scores
if attention_initializer:
self._attention_initializer = tf.keras.initializers.get(
attention_initializer)
else:
self._attention_initializer = tf_utils.clone_initializer(
self._kernel_initializer)
self._attention_axes = attention_axes
if self._diff_q_kv_att_layer_norm and not self._norm_first:
raise ValueError("Setting `diff_q_and_kv_attention_layer_norm` to True"
"when `norm_first` is False is invalid.")
def build(self, input_shape):
if isinstance(input_shape, tf.TensorShape):
input_tensor_shape = input_shape
elif isinstance(input_shape, (list, tuple)):
input_tensor_shape = tf.TensorShape(input_shape[0])
else:
raise ValueError(
"The type of input shape argument is not supported, got: %s" %
type(input_shape))
einsum_equation = "abc,cd->abd"
if len(input_tensor_shape.as_list()) > 3:
einsum_equation = "...bc,cd->...bd"
hidden_size = input_tensor_shape[-1]
if hidden_size % self._num_heads != 0:
logging.warning(
"The input size (%d) is not a multiple of the number of attention "
"heads (%d)", hidden_size, self._num_heads)
if self._key_dim is None:
self._key_dim = int(hidden_size // self._num_heads)
if self._output_last_dim is None:
last_output_shape = hidden_size
else:
last_output_shape = self._output_last_dim
common_kwargs = dict(
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
self._attention_layer = tf.keras.layers.MultiHeadAttention(
num_heads=self._num_heads,
key_dim=self._key_dim,
value_dim=self._value_dim,
dropout=self._attention_dropout_rate,
use_bias=self._use_bias,
kernel_initializer=self._attention_initializer,
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
attention_axes=self._attention_axes,
output_shape=self._output_last_dim,
name="self_attention",
**common_kwargs)
self._attention_dropout = tf.keras.layers.Dropout(
rate=self._attention_dropout_rate)
# Use float32 in layernorm for numeric stability.
# It is probably safe in mixed_float16, but we haven't validated this yet.
self._attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32))
self._attention_layer_norm_kv = self._attention_layer_norm
if self._diff_q_kv_att_layer_norm:
self._attention_layer_norm_kv = (
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm_kv",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32))
self._intermediate_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=(None, self._inner_dim),
bias_axes="d",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
name="intermediate",
**common_kwargs)
policy = tf.keras.mixed_precision.global_policy()
if policy.name == "mixed_bfloat16":
# bfloat16 causes BERT with the LAMB optimizer to not converge
# as well, so we use float32.
# TODO(b/154538392): Investigate this.
policy = tf.float32
self._intermediate_activation_layer = tf.keras.layers.Activation(
self._inner_activation, dtype=policy)
self._inner_dropout_layer = tf.keras.layers.Dropout(
rate=self._inner_dropout)
self._output_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=(None, last_output_shape),
bias_axes="d",
name="output",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs)
self._output_dropout = tf.keras.layers.Dropout(
rate=self._output_dropout_rate)
# Use float32 in layernorm for numeric stability.
self._output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32)
super().build(input_shape)
def get_config(self):
config = {
"num_attention_heads": self._num_heads,
"inner_dim": self._inner_dim,
"inner_activation": self._inner_activation,
"output_dropout": self._output_dropout_rate,
"attention_dropout": self._attention_dropout_rate,
"output_range": self._output_range,
"kernel_initializer": tf_utils.serialize_initializer(
self._kernel_initializer, use_legacy_format=True
),
"bias_initializer": tf_utils.serialize_initializer(
self._bias_initializer, use_legacy_format=True
),
"kernel_regularizer": tf_utils.serialize_regularizer(
self._kernel_regularizer, use_legacy_format=True
),
"bias_regularizer": tf_utils.serialize_regularizer(
self._bias_regularizer, use_legacy_format=True
),
"activity_regularizer": tf_utils.serialize_regularizer(
self._activity_regularizer, use_legacy_format=True
),
"kernel_constraint": tf_utils.serialize_constraint(
self._kernel_constraint, use_legacy_format=True
),
"bias_constraint": tf_utils.serialize_constraint(
self._bias_constraint, use_legacy_format=True
),
"use_bias": self._use_bias,
"norm_first": self._norm_first,
"norm_epsilon": self._norm_epsilon,
"inner_dropout": self._inner_dropout,
"attention_initializer": tf_utils.serialize_initializer(
self._attention_initializer, use_legacy_format=True
),
"attention_axes": self._attention_axes,
"use_query_residual": self._use_query_residual,
"key_dim": self._key_dim,
"value_dim": self._value_dim,
"output_last_dim": self._output_last_dim,
"diff_q_kv_att_layer_norm": self._diff_q_kv_att_layer_norm,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs: Any, output_range: Optional[tf.Tensor] = None) -> Any:
"""Transformer self-attention encoder block call.
Args:
inputs: a single tensor or a list of tensors. `input tensor` as the single
sequence of embeddings. [`input tensor`, `attention mask`] to have the
additional attention mask. [`query tensor`, `key value tensor`,
`attention mask`] to have separate input streams for the query, and
key/value to the multi-head attention.
output_range: the sequence output range, [0, output_range) for slicing the
target sequence. `None` means the target sequence is not sliced. If you
would like to have no change to the model training, it is better to only
set the `output_range` for serving.
Returns:
An output tensor with the same dimensions as input/query tensor.
"""
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
input_tensor, attention_mask = inputs
key_value = None
elif len(inputs) == 3:
input_tensor, key_value, attention_mask = inputs
else:
raise ValueError("Unexpected inputs to %s with length at %d" %
(self.__class__, len(inputs)))
else:
input_tensor, key_value, attention_mask = (inputs, None, None)
if output_range is None:
output_range = self._output_range
if output_range:
if self._norm_first:
source_tensor = input_tensor[:, 0:output_range, :]
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm_kv(key_value)
target_tensor = input_tensor[:, 0:output_range, :]
if attention_mask is not None:
attention_mask = attention_mask[:, 0:output_range, :]
else:
if self._norm_first:
source_tensor = input_tensor
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm_kv(key_value)
target_tensor = input_tensor
if key_value is None:
key_value = input_tensor
if self._return_attention_scores:
attention_output, attention_scores = self._attention_layer(
query=target_tensor,
value=key_value,
attention_mask=attention_mask,
return_attention_scores=True)
else:
attention_output = self._attention_layer(
query=target_tensor, value=key_value, attention_mask=attention_mask)
attention_output = self._attention_dropout(attention_output)
if self._norm_first:
# Important to not combine `self._norm_first` and
# `self._use_query_residual` into one if clause because else is only for
# `_norm_first == False`.
if self._use_query_residual:
attention_output = source_tensor + attention_output
else:
if self._use_query_residual:
attention_output = target_tensor + attention_output
attention_output = self._attention_layer_norm(attention_output)
if self._norm_first:
source_attention_output = attention_output
attention_output = self._output_layer_norm(attention_output)
inner_output = self._intermediate_dense(attention_output)
inner_output = self._intermediate_activation_layer(inner_output)
inner_output = self._inner_dropout_layer(inner_output)
layer_output = self._output_dense(inner_output)
layer_output = self._output_dropout(layer_output)
if self._norm_first:
layer_output = source_attention_output + layer_output
else:
# During mixed precision training, layer norm output is always fp32 for
# now. Casts fp32 for the subsequent add.
layer_output = tf.cast(layer_output, tf.float32)
layer_output = self._output_layer_norm(layer_output + attention_output)
if self._return_attention_scores:
return layer_output, attention_scores
else:
return layer_output
| 18,367 | 43.474576 | 80 | py |
models | models-master/official/nlp/modeling/layers/kernel_attention.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based kernel attention layer."""
import functools
import math
import tensorflow as tf
from official.modeling import tf_utils
_NUMERIC_STABLER = 1e-6
class KernelMask(tf.keras.layers.Layer):
"""Creates kernel attention mask.
inputs: from_tensor: 2D or 3D Tensor of shape
[batch_size, from_seq_length, ...].
mask: a Tensor of shape [batch_size, from_seq_length] which indicates
which part of the inputs we should not attend.
Returns:
float Tensor of shape [batch_size, from_seq_length] that KernelAttention
takes as mask.
"""
def call(self, inputs, mask):
mask = tf.cast(mask, inputs.dtype)
return mask
def pad_to_chunk_length(tensor, axis, chunk_length, padding=None):
"""Pads a tensor so that shape[axis] is divisible by chunk_length.
Args:
tensor: Input tensor to pad.
axis: Axis to pad along.
chunk_length: The output tensor will have shape[axis] divisible by
chunk_length.
padding: Pad the input tensor across the axis from either left or right if
padding is set to "left" or "right"; applies no padding if padding is set
to None. In the latter case, the axis dimension of the input tensor must
be divisible by the chunk_length.
Returns:
Padded tensor with shape[axis] divisible by chunk_length.
"""
if padding is None:
return tensor
shape = tf.shape(tensor)
rank = tf.rank(tensor)
if axis < 0:
axis += rank
axis_length = shape[axis]
pad_length = -axis_length % chunk_length
if padding == "right":
axis_paddings = [[0, pad_length]]
elif padding == "left":
axis_paddings = [[pad_length, 0]]
else:
raise ValueError(
"Illegal padding value; must be one of \"left\", \"right\" or None.")
paddings = tf.concat([
tf.zeros([axis, 2], dtype=tf.int32), axis_paddings,
tf.zeros([rank - axis - 1, 2], dtype=tf.int32)
],
axis=0)
return tf.pad(tensor, paddings)
def split_tensor_into_chunks(tensor, axis, chunk_length):
"""Reshape tensor along given axis using chunk_length.
Args:
tensor: Input tensor.
axis: Reshape tensor along this axis.
chunk_length: Split the axis into [axis/chunk_length, chunk_length]
Returns:
Reshaped tensor.
"""
shape = tf.shape(tensor)
num_chunks = shape[axis] // chunk_length
new_shape = tf.concat(
[shape[:axis], [num_chunks, chunk_length], shape[(axis + 1):]], axis=0)
return tf.reshape(tensor, new_shape)
def rectangular_window_sum(tensor, window_length):
"""Summarizes tensor elements over a sliding rectangular window.
Sums elements of the input tensor of shape [B, T', C', H, dim]
across a rectangular window sliding along the dimension T'.
Args:
tensor: Tensor of shape `[B, T', C', H, dim]`.
window_length: The length of the rectangular window.
Returns:
A tensor of shape [B, T', C', H, dim] containing sums over the
window.
"""
tensor_cumsum = tf.cumsum(tensor, axis=-4)
tensor_winsum = tensor_cumsum - tf.pad(
tensor_cumsum,
[[0, 0], [window_length, 0], [0, 0], [0, 0], [0, 0]])[:, :-window_length]
return tensor_winsum
def weighted_window_sum(tensor, window_length, window_weights):
"""Summarizes tensor elements over a sliding weighted window.
Computes a weighted sum of elements of the input tensor of shape [B,
T', C', H, dim] across a window sliding along the dimension T'.
Args:
tensor: Tensor of shape `[B, T', C', H, dim]`.
window_length: The length of the window.
window_weights: Tensor of shape [window_length] containing window weights.
Returns:
A tensor of shape [B, T', C', H, dim] containing sums over the
window.
"""
# Flatten the last three dimensions of the [B, T', C', H, dim] shape
# into a single channels dimension.
tensor_shape = tf.shape(tensor)
tensor_2d = tf.reshape(tensor, [tensor_shape[0], tensor_shape[1], 1, -1])
# Apply the same weights to all channels.
conv_filter = tf.tile(
tf.reshape(window_weights, [-1, 1, 1, 1]),
multiples=[1, 1, tf.shape(tensor_2d)[-1], 1])
tensor_winsum_2d = tf.nn.depthwise_conv2d(
tensor_2d,
conv_filter,
strides=[1, 1, 1, 1],
padding=[[0, 0], [window_length - 1, 0], [0, 0], [0, 0]])
# Unflatten the channels dimension into the original shape.
tensor_winsum = tf.reshape(tensor_winsum_2d, tensor_shape)
return tensor_winsum
def causal_windowed_performer_attention(query_matrix,
key_matrix,
value_matrix,
chunk_length,
window_length,
window_decay=None,
padding=None,
cache=None):
"""Applies windowed causal kernel attention with query, key, value tensors.
We partition the T-length input sequence into N chunks, each of
chunk_length tokens (thus: T = N * chunk_length). Within each chunk,
we apply bidirectional (non-causal) Performers’ implicit attention
and we model relationships between different chunks using
Performers’ causal attention. We consider windowed causal variant of
performer, where the current chunk attends only to the window of
window_length of the most recent chunks.
Below is an example with T=9, chunk_length=3, window_length=2. In
this example 1 indicates attention is computed between the pair
while 0 indicates attention is not computed between the pairs:
111000000
111000000
111000000
111111000
111111000
111111000
000111111
000111111
000111111
User can ensure sequence_length is divisible by chunk_length or use
padding="left"/"right" to pad the sequence length either at the left
or right respectively and make it divisible by chunk_length.
Args:
query_matrix: Kernel query `Tensor` of shape `[B, T, H, dim]`.
key_matrix: Kernel key `Tensor` of shape `[B, T, H, dim]`.
value_matrix: Value `Tensor` of shape `[B, T, H, out_dim]`.
chunk_length: Length of each chunk in tokens.
window_length: Length of attention window in chunks.
window_decay: Float window decay factor or `None`. If set, exponentially
decay past attention window values by this factor before summation.
padding: Pad the query, value and key input tensors across the axis from
either left or right if padding is set to "left" or "right"; apply no
padding if padding is set to None. In the latter case, the axis dimension
of the query, value and key input tensors must be divisible by the
chunk_length.
cache: Cache to accumulate history in memory. Used at inferecne time
(streaming, decoding) for causal attention.
Returns:
Window causal performer attention of shape `[B, T, H, out_dim]`.
"""
if cache is None: # Training
old_shape = tf.shape(value_matrix)
query_matrix = pad_to_chunk_length(query_matrix, -3, chunk_length, padding)
key_matrix = pad_to_chunk_length(key_matrix, -3, chunk_length, padding)
value_matrix = pad_to_chunk_length(value_matrix, -3, chunk_length, padding)
new_shape = tf.shape(value_matrix)
chunked_query_matrix = split_tensor_into_chunks(
query_matrix, -3,
chunk_length) # [-1, T//chunk_length, chunk_length, N, dim]
chunked_key_matrix = split_tensor_into_chunks(
key_matrix, -3,
chunk_length) # [-1, T//chunk_length, chunk_length, N, dim]
chunked_value_matrix = split_tensor_into_chunks(
value_matrix, -3,
chunk_length) # [-1, T//chunk_length, chunk_length, N, out_dim]
kp_v = tf.einsum("BTCHD,BTCHO->BTHDO", chunked_key_matrix,
chunked_value_matrix)
k_sum = tf.math.reduce_sum(chunked_key_matrix, axis=-3, keepdims=True)
if window_decay is None:
kp_v_winsum = rectangular_window_sum(kp_v, window_length)
k_winsum = rectangular_window_sum(k_sum, window_length)
else:
# Compute exponentially decaying weights.
decaying_weights = tf.math.pow(
tf.convert_to_tensor(window_decay, dtype=value_matrix.dtype),
tf.range(window_length - 1, -1, delta=-1, dtype=value_matrix.dtype))
kp_v_winsum = weighted_window_sum(kp_v, window_length, decaying_weights)
k_winsum = weighted_window_sum(k_sum, window_length, decaying_weights)
numerator = tf.einsum(
"BTCHD,BTHDO->BTCHO", chunked_query_matrix, kp_v_winsum)
k_winsum = tf.squeeze(k_winsum, -3)
denominator = tf.einsum("BTCHD,BTHD->BTCH", chunked_query_matrix, k_winsum)
denominator = tf.expand_dims(denominator, -1) + _NUMERIC_STABLER
attention = numerator / denominator
attention = tf.reshape(attention, new_shape)
start = tf.zeros([old_shape.shape[0]], dtype=old_shape.dtype)
attention = tf.slice(attention, start, old_shape)
# Queued window cache (drop instead of decay) not yet supported.
else: # Streaming
if window_decay is None or window_decay > 1.0 or window_decay < 0.0:
raise ValueError("window_decay should be in (0.0, 1.0) and not None.")
kv = window_decay * cache["kv"] + tf.einsum(
"BTHD,BTHO->BHOD", key_matrix, value_matrix)
cache["kv"] = kv
k_sum = window_decay * cache["k_sum"] + tf.reduce_sum(key_matrix, axis=1)
cache["k_sum"] = k_sum
denominator = tf.einsum("BTHD,BHD->BTH", query_matrix, k_sum)
# The below is equivalent to but converts to TF Lite better than:
# tf.einsum("BTHD,BTH->BTHD",
# query_matrix, 1.0 / (denominator + _NUMERIC_STABLER))
inverse_denominator = 1.0 / (denominator + _NUMERIC_STABLER)
# Add another dimension to align for the broadcast multiplication.
fused_query_denominator = query_matrix * tf.expand_dims(inverse_denominator,
-1)
attention = tf.einsum("BTHD,BHOD->BTHO", fused_query_denominator, kv)
return attention
def create_projection_matrix(m, d, seed=None):
r"""Constructs the matrix of random projections.
Constructs a matrix of random orthogonal projections. Each projection vector
has direction chosen uniformly at random length taken from the
\chi(d) distribution.).
Args:
m: number of random projections.
d: dimensionality of each random projection.
seed: random seed used to construct projections. If not, we use the stateful
api.
Returns:
The matrix of random projections of the shape [m, d].
"""
nb_full_blocks = math.ceil(m / d)
block_list = tf.TensorArray(
tf.float32, size=tf.cast(nb_full_blocks, dtype=tf.int32))
stateful = False
if seed is None:
stateful = True
# dummy seed to make sure the graph compiles though the path is not taken.
seed = tf.constant([0, 1])
current_seed = seed
for i in range(nb_full_blocks):
if stateful:
unstructured_block = tf.random.normal((d, d))
else:
unstructured_block = tf.random.stateless_normal((d, d), seed=current_seed)
current_seed = tf.random.stateless_uniform([2],
seed=current_seed,
minval=None,
dtype=tf.int32)
q, _ = tf.linalg.qr(unstructured_block)
q = tf.transpose(q)
block_list = block_list.write(i, q)
final_matrix = block_list.concat()[:m]
if stateful is None:
multiplier = tf.norm(tf.random.normal((m, d)), axis=1)
else:
multiplier = tf.norm(
tf.random.stateless_normal((m, d), seed=current_seed), axis=1)
return tf.linalg.matmul(tf.linalg.diag(multiplier), final_matrix)
def _generalized_kernel(x, y, is_query, projection_matrix, f, h):
"""Generalized kernel in RETHINKING ATTENTION WITH PERFORMERS.
Args:
x: The feature being transformed with shape [B, T, N ,H].
y: The extra stats-tensor of shape [B, T, N ,H].
is_query: True if x is a query-tensor.
projection_matrix: The matrix with shape [M, H] that we projecct x to, where
M is the number of projections.
f: A non-linear function applied on x or projected x.
h: A muliplier which is a function of x applied after projected and
transformed. Only applied if projection_matrix is not None.
Returns:
Transformed feature.
"""
del y
del is_query
if projection_matrix is None:
return h(x) * f(x)
else:
x_projected = tf.einsum("BTNH,MH->BTNM", x, projection_matrix)
return h(x) * f(x_projected) / tf.math.sqrt(
tf.cast(tf.shape(projection_matrix)[0], tf.float32))
def expplus(data_orig,
other_data,
is_query,
projection_matrix=None,
numerical_stabilizer=0.000001,
normalize_data=True,
numerical_renormalizer=True,
extra_renormalize_exp_fun=False):
"""FAVOR++ mechanism from the CRT paper: https://arxiv.org/abs/2205.15317 .
Args:
data_orig: data tensor of shape [B,T,H,D] for which random features aree to
be computed
other_data: additional tensor of the shape [B,F,H,D] used to collect stats
to determine the exact instantiation of the random feature mechanism
is_query: boolean indicating whether <data_orig> tensor is a query tensor
projection_matrix: tensor of the shape [M,D] encoding random projections for
random features (M stands for the number of random features)
numerical_stabilizer: numerical stabilizer for the kernel features
normalize_data: whether to sqrt-d-normalize queries/keys as in the regular
attention
numerical_renormalizer: whether to apply additional renormalization for
numerical stability
extra_renormalize_exp_fun: extra renormalizer for the exponential mapping
applied to construct random features
Returns:
Random feature map tensor for the unbiased softmax-kernel estimation.
"""
data = data_orig
if projection_matrix is None:
return data_orig
projection_matrix = tf.cast(projection_matrix, data.dtype)
if normalize_data:
data_normalizer = 1.0 / tf.math.sqrt(
(tf.math.sqrt(tf.dtypes.cast(data.shape[-1], data.dtype))))
else:
data_normalizer = 1.0
lengths = tf.math.square(data)
lengths = tf.reduce_sum(lengths, axis=tf.keras.backend.ndim(data) - 1)
lengths = tf.expand_dims(lengths, axis=tf.keras.backend.ndim(data) - 1)
lengths = tf.math.sqrt(lengths)
data /= lengths
ratio = 1.0 / tf.math.sqrt(
tf.dtypes.cast(projection_matrix.shape[0], data.dtype))
data_dash = tf.einsum("blhd,md->blhm", data_normalizer * data,
projection_matrix)
diag_data = tf.math.square(data)
diag_data = tf.math.reduce_sum(
diag_data, axis=tf.keras.backend.ndim(data) - 1)
diag_data = (diag_data / 2.0) * data_normalizer * data_normalizer
diag_data = tf.expand_dims(diag_data, axis=tf.keras.backend.ndim(data) - 1)
# Calculating coefficients A, B of the FAVOR++ mechanism:
_, l, _, _ = tf_utils.get_shape_list(data_orig)
l = tf.cast(l, dtype=tf.float32)
first_sum_of_squares = tf.math.square(data)
first_sum_of_squares = tf.math.reduce_sum(
first_sum_of_squares, axis=(1, -1), keepdims=True)
first_sum_of_squares *= (data_normalizer * data_normalizer)
first_sum_of_squares /= l # data.shape[1]
second_sum_of_squares = tf.math.square(other_data)
second_sum_of_squares = tf.math.reduce_sum(
second_sum_of_squares, axis=(1, -1), keepdims=True)
second_sum_of_squares *= (data_normalizer * data_normalizer)
second_sum_of_squares /= l # other_data.shape[1]
data_sum = tf.math.reduce_sum(data, axis=(1,), keepdims=True)
other_data_sum = tf.math.reduce_sum(other_data, axis=(1,), keepdims=True)
d_prod = tf.einsum("blhd,blhd->blh", data_sum, other_data_sum)
d_prod = tf.expand_dims(d_prod, axis=-1)
d_prod *= (data_normalizer * data_normalizer)
d_prod *= (2.0 / (l * l))
ave = first_sum_of_squares + second_sum_of_squares + d_prod
dim = projection_matrix.shape[-1]
a_coeff = (1.0 / (4.0 * ave)) * (
tf.math.sqrt((2.0 * ave + dim) *
(2.0 * ave + dim) + 8.0 * dim * ave) - 2.0 * ave - dim)
a_coeff = (1.0 - 1.0 / a_coeff) / 8.0
b_coeff = tf.math.sqrt(1.0 - 4.0 * a_coeff)
d_coeff = tf.math.pow(1.0 - 4.0 * a_coeff, dim / 4.0)
a_coeff = tf.stop_gradient(a_coeff)
b_coeff = tf.stop_gradient(b_coeff)
d_coeff = tf.stop_gradient(d_coeff)
# Calculating diag_omega for the FAVOR++ mechanism:
diag_omega = tf.math.square(projection_matrix)
diag_omega = tf.math.reduce_sum(
diag_omega, axis=tf.keras.backend.ndim(projection_matrix) - 1)
diag_omega = tf.expand_dims(diag_omega, axis=0)
diag_omega = tf.expand_dims(diag_omega, axis=0)
diag_omega = tf.expand_dims(diag_omega, axis=0)
diag_omega = a_coeff * diag_omega
if numerical_renormalizer:
if is_query:
last_dims_t = (len(data_dash.shape) - 1,)
stab = b_coeff * tf.math.reduce_max(
data_dash, axis=last_dims_t, keepdims=True)
else:
stab = b_coeff * tf.math.reduce_max(data_dash, keepdims=True)
if extra_renormalize_exp_fun:
extra_stab = tf.reduce_max(diag_data, axis=1, keepdims=True)
stab = tf.math.maximum(stab, extra_stab)
data_dash = ratio * d_coeff * (
tf.math.exp(b_coeff * data_dash - stab - diag_data + diag_omega) +
numerical_stabilizer)
else:
data_dash = ratio * d_coeff * (
tf.math.exp(b_coeff * data_dash - diag_data + diag_omega) +
numerical_stabilizer)
return data_dash
# pylint: disable=g-long-lambda
_CAUSAL_SUPPORT_TRANSFORM_MAP = {
"elu":
functools.partial(
_generalized_kernel,
f=lambda x: tf.keras.activations.elu(x) + 1,
h=lambda x: 1),
"relu":
functools.partial(
_generalized_kernel,
# Improve numerical stability and avoid NaNs in some cases by adding
# a tiny epsilon.
f=lambda x: tf.keras.activations.relu(x) + 1e-3,
h=lambda x: 1),
"square":
functools.partial(_generalized_kernel, f=tf.math.square, h=lambda x: 1),
"exp":
functools.partial(
_generalized_kernel,
# Avoid exp explosion by shifting.
f=lambda x: tf.math.exp(x - tf.math.reduce_max(
x, axis=[1, 2, 3], keepdims=True)),
h=lambda x: tf.math.exp(-0.5 * tf.math.reduce_sum(
tf.math.square(x), axis=-1, keepdims=True)),
),
"expmod":
functools.partial(
_generalized_kernel,
# Avoid exp explosion by shifting.
f=lambda x: tf.math.exp(x - tf.math.reduce_max(
x, axis=[1, 2, 3], keepdims=True)),
h=lambda x: tf.math.exp(-0.5 * tf.math.sqrt(
tf.cast(tf.shape(x)[-1], tf.float32))),
),
"identity":
functools.partial(_generalized_kernel, f=lambda x: x, h=lambda x: 1)
}
_NON_CAUSAL_SUPPORT_TRANSFORM_MAP = {
"expplus": expplus,
}
_TRANSFORM_MAP = {
**_CAUSAL_SUPPORT_TRANSFORM_MAP,
**_NON_CAUSAL_SUPPORT_TRANSFORM_MAP
}
# pylint: enable=g-long-lambda
class KernelAttention(tf.keras.layers.MultiHeadAttention):
"""A variant of efficient transformers which replaces softmax with kernels.
This module combines ideas from the two following papers:
Rethinking Attention with Performers
(https://arxiv.org/abs/2009.14794)
- exp (Lemma 1, positive), relu
- random/deterministic projection
Chefs' Random Tables: Non-Trigonometric Random Features
(https://arxiv.org/abs/2205.15317)
- expplus (OPRF mechanism)
Transformers are RNNs: Fast Autoregressive Transformers with Linear Attention
(https://arxiv.org/abs/2006.16236)
- elu
with the theory of approximating angular Performer kernels from go/performer.
The module enables computing efficient attention in both: long sequence and
shorter sequence regimes. In the former setting, the attention matrix is never
explicitly computed and instead its low-rank decomposition obtained with given
kernel feature maps is leveraged to conduct attention module calculations
(see: https://arxiv.org/abs/2006.16236). In the latter setting, attention
matrix is constructed, but kernel features providing dimensionality reduction
are applied, resulting in more efficient computation of the attention matrix.
"""
def __init__(self,
feature_transform="exp",
num_random_features=256,
seed=0,
redraw=False,
is_short_seq=False,
begin_kernel=0,
scale=None,
scale_by_length=False,
use_causal_windowed=False,
causal_chunk_length=1,
causal_window_length=3,
causal_window_decay=None,
causal_padding=None,
**kwargs):
r"""Constructor of KernelAttention.
Args:
feature_transform: A non-linear transform of the keys and queries.
Possible transforms are "elu", "relu", "square", "exp", "expplus",
"expmod", "identity".
num_random_features: Number of random features to be used for projection.
if num_random_features <= 0, no production is used before transform.
seed: The seed to begin drawing random features. Once the seed is set, the
psedo number generation is determinisitc. Users should pass different
seed for different layers. For multi-worker, each layer will use the
same projection at each step.
redraw: Whether to redraw projection every forward pass during training.
The argument is only effective when num_random_features > 0.
is_short_seq: boolean predicate indicating whether input data consists of
very short sequences or not; in most cases this should be False (default
option).
begin_kernel: Apply kernel_attention after this sequence id and apply
softmax attention before this.
scale: The value to scale the dot product as described in `Attention Is
All You Need`. If None, we use 1/sqrt(dk) as described in the paper.
scale_by_length: boolean predicate indicating whether additionally scale
the dot product based on key length. Set as log_512^(n) to stablize
attention entropy against length. Refer to
https://kexue.fm/archives/8823 for details.
use_causal_windowed: If true perform windowed causal attention. See
causal_windowed_performer_attention function docstring for more details.
causal_chunk_length: Length of each chunk in tokens.
causal_window_length: Length of attention window in chunks.
causal_window_decay: Float window decay factor or `None`. If set,
exponentially decay past attention window values by this factor before
summation.
causal_padding: Pad the query, value and key input tensors across the axis
from either left or right if padding is set to "left" or "right"; apply
no padding if padding is set to None. In the latter case, the axis
dimension of the query, value and key input tensors must be divisible by
the chunk_length.
**kwargs: The same arguments `MultiHeadAttention` layer.
"""
if feature_transform not in _TRANSFORM_MAP:
raise ValueError("Unsupported feature_transform. The supported "
"feature_transform are %s. "
"Got '%s'." % (_TRANSFORM_MAP.keys(), feature_transform))
if num_random_features <= 0 and redraw:
raise ValueError(
"There is nothing to redraw when num_random_features <= 0.")
self._feature_transform = feature_transform
self._num_random_features = num_random_features
self._redraw = redraw
self._is_short_seq = is_short_seq
self._begin_kernel = begin_kernel
self._scale_by_length = scale_by_length
# We use the seed for two scenarios:
# 1. inference
# 2. no redraw
self._seed = seed
super().__init__(**kwargs)
if scale is None:
self._scale = 1.0 / math.sqrt(float(self._key_dim))
else:
self._scale = scale
self._projection_matrix = None
if num_random_features > 0:
self._projection_matrix = create_projection_matrix(
self._num_random_features, self._key_dim,
tf.constant([self._seed, self._seed + 1]))
self.use_causal_windowed = use_causal_windowed
self.causal_chunk_length = causal_chunk_length
self.causal_window_length = causal_window_length
self.causal_window_decay = causal_window_decay
self.causal_padding = causal_padding
if self.use_causal_windowed and self._is_short_seq:
raise ValueError(
"use_causal_windowed and short_seq methods are mutually exclusive")
def _compute_attention(self,
query,
key,
value,
feature_transform,
is_short_seq,
attention_mask=None,
cache=None,
training=False,
numeric_stabler=_NUMERIC_STABLER):
"""Applies kernel attention with query, key, value tensors.
This function defines the computation inside `call` with projected
multi-head Q, K, V inputs. Users can override this function for customized
attention implementation.
Args:
query: Projected query `Tensor` of shape `[B, T, N, key_dim]`.
key: Projected key `Tensor` of shape `[B, S, N, key_dim]`.
value: Projected value `Tensor` of shape `[B, S, N, value_dim]`.
feature_transform: A non-linear transform of the keys and quries.
is_short_seq: boolean predicate indicating whether input data consists of
short or long sequences; usually short sequence is defined as having
length L <= 1024.
attention_mask: a boolean mask of shape `[B, S]`, that prevents attenting
to masked positions. Note that the mask is only appied to the keys. User
may want to mask the output if query contains pads.
cache: Cache to accumulate history in memory. Used at inferecne time
(streaming, decoding) for causal attention.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
numeric_stabler: A scalar value added to avoid divide by 0.
Returns:
attention_output: Multi-headed outputs of attention computation.
"""
projection_matrix = None
if self._num_random_features > 0:
if self._redraw and training:
projection_matrix = create_projection_matrix(self._num_random_features,
self._key_dim)
else:
projection_matrix = self._projection_matrix
if self._scale_by_length:
scale = tf.math.log(tf.reduce_sum(attention_mask,
axis=-1)) * self._scale / math.log(512)
scale = tf.reshape(scale, [-1, 1, 1, 1])
else:
scale = self._scale
if is_short_seq:
# Note: Applying scalar multiply at the smaller end of einsum improves
# XLA performance, but may introduce slight numeric differences in
# the Transformer attention head.
query = query * scale
else:
# Note: we suspect spliting the scale to key, query yields smaller
# approximation variance when random projection is used.
# For simplicity, we also split when there's no random projection.
key *= tf.math.sqrt(scale)
query *= tf.math.sqrt(scale)
key_prime = _TRANSFORM_MAP[feature_transform](key, query, False,
projection_matrix)
query_prime = _TRANSFORM_MAP[feature_transform](query, key, True,
projection_matrix)
if attention_mask is not None:
key_prime = tf.einsum("BSNH,BS->BSNH", key_prime, attention_mask)
if is_short_seq:
attention_scores = tf.einsum("BTNH,BSNH->BTSN", query_prime, key_prime)
attention_scores = tf.nn.softmax(attention_scores, axis=2)
attention_output = tf.einsum("BTSN,BSNH->BTNH", attention_scores, value)
elif self.use_causal_windowed:
attention_output = causal_windowed_performer_attention(
query_prime,
key_prime,
value,
chunk_length=self.causal_chunk_length,
window_length=self.causal_window_length,
window_decay=self.causal_window_decay,
padding=self.causal_padding,
cache=cache)
else:
kv = tf.einsum("BSNH,BSND->BNDH", key_prime, value)
denominator = 1.0 / (
tf.einsum("BTNH,BNH->BTN", query_prime,
tf.reduce_sum(key_prime, axis=1)) + _NUMERIC_STABLER)
attention_output = tf.einsum("BTNH,BNDH,BTN->BTND", query_prime, kv,
denominator)
return attention_output
def _build_from_signature(self, query, value, key=None):
super()._build_from_signature(query=query, value=value, key=key) # pytype: disable=attribute-error # typed-keras
if self._begin_kernel > 0:
common_kwargs = dict(
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
self._output_dense_softmax = self._make_output_dense(
self._query_shape.rank - 1,
common_kwargs,
name="attention_output_softmax")
self._dropout_softmax = tf.keras.layers.Dropout(rate=self._dropout)
def call(self, query, value, key=None, attention_mask=None, cache=None,
training=False):
"""Compute attention with kernel mechanism.
Args:
query: Query `Tensor` of shape `[B, T, dim]`.
value: Value `Tensor` of shape `[B, S, dim]`.
key: Optional key `Tensor` of shape `[B, S, dim]`. If not given, will use
`value` for both `key` and `value`, which is the most common case.
attention_mask: a boolean mask of shape `[B, S]`, that prevents attenting
to masked positions. Note that the mask is only appied to the keys. User
may want to mask the output if query contains pads.
cache: Cache to accumulate history in memory. Used at inferecne time
(streaming, decoding) for causal attention.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Returns:
Multi-headed outputs of attention computation.
"""
if cache is not None:
if training:
raise ValueError(
"Cache is not supported when training is True.")
if not self.use_causal_windowed:
raise ValueError(
"Cache is not supported for non use_causal_windowed case.")
if self._begin_kernel:
raise ValueError(
"Cache is not supported when begin_kernel is set since the bahvior "
"is too complicated.")
if self._feature_transform in _NON_CAUSAL_SUPPORT_TRANSFORM_MAP:
raise ValueError("Cache is not supported for feature_transform %s" %
(self._feature_transform))
if not self._built_from_signature:
self._build_from_signature(query=query, value=value, key=key)
if key is None:
key = value
# N = `num_attention_heads`
# H = `size_per_head`
# `query` = [B, T, N ,H]
query = self._query_dense(query)
# `key` = [B, S, N, H]
key = self._key_dense(key)
# `value` = [B, S, N, D]
value = self._value_dense(value)
if self._begin_kernel > 0:
attention_output_softmax = self._compute_attention(
query[:, :self._begin_kernel], key, value, "identity", True,
attention_mask, training)
attention_output_softmax = self._dropout_softmax(attention_output_softmax)
attention_output_softmax = self._output_dense_softmax(
attention_output_softmax)
attention_output_kernel = self._compute_attention(
query[:, self._begin_kernel:], key, value, self._feature_transform,
self._is_short_seq, attention_mask, training)
attention_output_kernel = self._dropout_layer(attention_output_kernel)
attention_output_kernel = self._output_dense(attention_output_kernel)
attention_output = tf.concat(
[attention_output_softmax, attention_output_kernel], axis=1)
else:
attention_output = self._compute_attention(query, key, value,
self._feature_transform,
self._is_short_seq,
attention_mask,
cache,
training)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_output = self._dropout_layer(attention_output)
attention_output = self._output_dense(attention_output)
return attention_output
def get_config(self):
config = {
"feature_transform": self._feature_transform,
"num_random_features": self._num_random_features,
"seed": self._seed,
"redraw": self._redraw,
"is_short_seq": self._is_short_seq,
"begin_kernel": self._begin_kernel,
"scale": self._scale,
"scale_by_length": self._scale_by_length,
"use_causal_windowed": self.use_causal_windowed,
"causal_chunk_length": self.causal_chunk_length,
"causal_window_length": self.causal_window_length,
"causal_window_decay": self.causal_window_decay,
"causal_padding": self.causal_padding,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| 34,871 | 40.317536 | 118 | py |
models | models-master/official/nlp/modeling/layers/position_embedding.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based positional embedding layer."""
# pylint: disable=g-classes-have-attributes
import math
from typing import Optional
import tensorflow as tf
from official.modeling import tf_utils
Initializer = tf.keras.initializers.Initializer
@tf.keras.utils.register_keras_serializable(package="Text")
class PositionEmbedding(tf.keras.layers.Layer):
"""Creates a positional embedding.
Example:
```python
position_embedding = PositionEmbedding(max_length=100)
inputs = tf.keras.Input((100, 32), dtype=tf.float32)
outputs = position_embedding(inputs)
```
Args:
max_length: The maximum size of the dynamic sequence.
initializer: The initializer to use for the embedding weights. Defaults to
"glorot_uniform".
seq_axis: The axis of the input tensor where we add the embeddings.
Reference: This layer creates a positional embedding as described in
[BERT: Pre-training of Deep Bidirectional Transformers for Language
Understanding](https://arxiv.org/abs/1810.04805).
"""
def __init__(self,
max_length,
initializer="glorot_uniform",
seq_axis=1,
**kwargs):
super().__init__(**kwargs)
if max_length is None:
raise ValueError(
"`max_length` must be an Integer, not `None`."
)
self._max_length = max_length
self._initializer = tf.keras.initializers.get(initializer)
self._seq_axis = seq_axis
def get_config(self):
config = {
"max_length": self._max_length,
"initializer": tf.keras.initializers.serialize(self._initializer),
"seq_axis": self._seq_axis,
}
base_config = super(PositionEmbedding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
dimension_list = input_shape
width = dimension_list[-1]
weight_sequence_length = self._max_length
self._position_embeddings = self.add_weight(
"embeddings",
shape=[weight_sequence_length, width],
initializer=self._initializer)
super().build(input_shape)
def call(self, inputs):
input_shape = tf.shape(inputs)
actual_seq_len = input_shape[self._seq_axis]
position_embeddings = self._position_embeddings[:actual_seq_len, :]
new_shape = [1 for _ in inputs.get_shape().as_list()]
new_shape[self._seq_axis] = actual_seq_len
new_shape[-1] = position_embeddings.get_shape().as_list()[-1]
position_embeddings = tf.reshape(position_embeddings, new_shape)
return tf.broadcast_to(position_embeddings, input_shape)
@tf.keras.utils.register_keras_serializable(package="Text")
class RelativePositionEmbedding(tf.keras.layers.Layer):
"""Creates a positional embedding.
This layer calculates the position encoding as a mix of sine and cosine
functions with geometrically increasing wavelengths. Defined and formulized in
"Attention is All You Need", section 3.5.
(https://arxiv.org/abs/1706.03762).
Args:
hidden_size: Size of the hidden layer.
min_timescale: Minimum scale that will be applied at each position
max_timescale: Maximum scale that will be applied at each position.
"""
def __init__(self,
hidden_size: int,
min_timescale: float = 1.0,
max_timescale: float = 1.0e4,
**kwargs):
# We need to have a default dtype of float32, since the inputs (which Keras
# usually uses to infer the dtype) will always be int32.
# We compute the positional encoding in float32 even if the model uses
# float16, as many of the ops used, like log and exp, are numerically
# unstable in float16.
if "dtype" not in kwargs:
kwargs["dtype"] = "float32"
super().__init__(**kwargs)
self._hidden_size = hidden_size
self._min_timescale = min_timescale
self._max_timescale = max_timescale
def get_config(self):
config = {
"hidden_size": self._hidden_size,
"min_timescale": self._min_timescale,
"max_timescale": self._max_timescale,
}
base_config = super(RelativePositionEmbedding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, length=None):
"""Implements call() for the layer.
Args:
inputs: An tensor whose second dimension will be used as `length`. If
`None`, the other `length` argument must be specified.
length: An optional integer specifying the number of positions. If both
`inputs` and `length` are spcified, `length` must be equal to the second
dimension of `inputs`.
Returns:
A tensor in shape of `(length, hidden_size)`.
"""
if inputs is None and length is None:
raise ValueError("If inputs is None, `length` must be set in "
"RelativePositionEmbedding().")
if inputs is not None:
input_shape = tf_utils.get_shape_list(inputs)
if length is not None and length != input_shape[1]:
raise ValueError(
"If inputs is not None, `length` must equal to input_shape[1].")
length = input_shape[1]
position = tf.cast(tf.range(length), tf.float32)
num_timescales = self._hidden_size // 2
min_timescale, max_timescale = self._min_timescale, self._max_timescale
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(tf.cast(num_timescales, tf.float32) - 1))
inv_timescales = min_timescale * tf.exp(
tf.cast(tf.range(num_timescales), tf.float32) *
-log_timescale_increment)
scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(
inv_timescales, 0)
position_embeddings = tf.concat(
[tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
return position_embeddings
def _relative_position_bucket(relative_position,
bidirectional=True,
num_buckets=32,
max_distance=128):
"""Translate relative position to a bucket number for relative attention.
The relative position is defined as memory_position - query_position, i.e.
the distance in tokens from the attending position to the attended-to
position.
If `bidirectional=False`, then positive relative positions are invalid.
We use smaller buckets for small absolute relative_position and larger
buckets for larger absolute relative_positions.
All relative positions >=max_distance map to the same bucket.
All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences
than the model has been trained on.
Args:
relative_position: An int32 Tensor
bidirectional: A boolean - whether the attention is bidirectional
num_buckets: An integer
max_distance: An integer
Returns:
A Tensor with the same shape as relative_position, containing int32
values in the range [0, num_buckets)
"""
ret = 0
n = -relative_position
if bidirectional:
num_buckets //= 2
ret += tf.cast(tf.math.less(n, 0), tf.int32) * num_buckets
n = tf.math.abs(n)
else:
n = tf.math.maximum(n, 0)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = tf.math.less(n, max_exact)
val_if_large = max_exact + tf.dtypes.cast(
tf.math.log(tf.cast(n, tf.float32) / max_exact) /
math.log(max_distance / max_exact) * (num_buckets - max_exact),
tf.int32,
)
val_if_large = tf.math.minimum(val_if_large, num_buckets - 1)
ret += tf.where(is_small, n, val_if_large)
return ret
@tf.keras.utils.register_keras_serializable(package="Text")
class RelativePositionBias(tf.keras.layers.Layer):
"""Relative position embedding via per-head bias in T5 style.
Reference implementation in MeshTF:
https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L1000
This layer implements the relative position bias used in "Exploring the Limits
of Transfer Learning with a Unified Text-to-Text Transformer"
(https://arxiv.org/abs/1910.10683)
"""
def __init__(self,
num_heads: int,
relative_attention_num_buckets: int = 32,
relative_attention_max_distance: int = 128,
bidirectional: bool = True,
embeddings_initializer: Optional[Initializer] = None,
**kwargs):
super().__init__(**kwargs)
self.num_heads = num_heads
self.relative_attention_num_buckets = relative_attention_num_buckets
self.bidirectional = bidirectional
self.relative_attention_max_distance = relative_attention_max_distance
if embeddings_initializer:
self._embed_init = embeddings_initializer
else:
self._embed_init = tf.keras.initializers.TruncatedNormal(stddev=1.0)
with tf.name_scope(self.name):
self._relative_attention_bias = self.add_weight(
"rel_embedding",
shape=[self.relative_attention_num_buckets, self.num_heads],
initializer=self._embed_init,
dtype=self.dtype,
trainable=True)
def get_config(self):
config = {
"num_heads":
self.num_heads,
"relative_attention_num_buckets":
self.relative_attention_num_buckets,
"relative_attention_max_distance":
self.relative_attention_max_distance,
"bidirectional":
self.bidirectional,
"embeddings_initializer":
tf.keras.initializers.serialize(self._embed_init),
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, query: tf.Tensor, key: tf.Tensor):
"""Implements the forward pass.
Args:
query: query input tensor shape [batch, query length, hidden size].
key: key input tensor shape [batch, key length, hidden size].
Returns:
A tensor in shape of [batch, heads, query length, key length].
"""
batch_size, qlen = tf_utils.get_shape_list(query)[:2]
klen = tf_utils.get_shape_list(key)[1]
context_position = tf.range(qlen)[:, None]
memory_position = tf.range(klen)[None, :]
relative_position = memory_position - context_position
rp_bucket = _relative_position_bucket(
relative_position,
bidirectional=self.bidirectional,
num_buckets=self.relative_attention_num_buckets,
max_distance=self.relative_attention_max_distance)
values = tf.nn.embedding_lookup(self._relative_attention_bias, rp_bucket)
values = tf.expand_dims(
tf.transpose(values, [2, 0, 1]),
axis=0) # shape (1, num_heads, qlen, klen)
values = tf.tile(values, [batch_size, 1, 1, 1])
return values
| 11,337 | 35.811688 | 104 | py |
models | models-master/official/nlp/modeling/layers/talking_heads_attention_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the attention layer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import talking_heads_attention
# This test is revised base on attention.MultiHeadAttentionTest.
class TalkingHeadsAttentionTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("key_value_same_proj", None, None, [40, 80]),
("key_value_different_proj", 32, 60, [40, 60]),
)
def test_non_masked_attention(self, value_dim, output_shape, output_dims):
"""Test that the attention layer can be created without a mask tensor."""
test_layer = talking_heads_attention.TalkingHeadsAttention(
num_heads=12,
key_dim=64,
value_dim=value_dim,
output_shape=output_shape)
# Create a 3-dimensional input (the first dimension is implicit).
query = tf.keras.Input(shape=(40, 80))
value = tf.keras.Input(shape=(20, 80))
output = test_layer(query=query, value=value)
self.assertEqual(output.shape.as_list(), [None] + output_dims)
def test_non_masked_self_attention(self):
"""Test with one input (self-attenntion) and no mask tensor."""
test_layer = talking_heads_attention.TalkingHeadsAttention(
num_heads=12, key_dim=64)
# Create a 3-dimensional input (the first dimension is implicit).
query = tf.keras.Input(shape=(40, 80))
output = test_layer(query=query, value=query)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
def test_attention_scores(self):
"""Test attention outputs with coefficients."""
test_layer = talking_heads_attention.TalkingHeadsAttention(
num_heads=12, key_dim=64)
# Create a 3-dimensional input (the first dimension is implicit).
query = tf.keras.Input(shape=(40, 80))
output, coef = test_layer(query=query, value=query,
return_attention_scores=True)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
self.assertEqual(coef.shape.as_list(), [None, 12, 40, 40])
@parameterized.named_parameters(("with_bias", True), ("no_bias", False))
def test_masked_attention(self, use_bias):
"""Test with a mask tensor."""
test_layer = talking_heads_attention.TalkingHeadsAttention(
num_heads=12, key_dim=2, use_bias=use_bias)
# Create a 3-dimensional input (the first dimension is implicit).
batch_size = 3
query = tf.keras.Input(shape=(4, 8))
value = tf.keras.Input(shape=(2, 8))
mask_tensor = tf.keras.Input(shape=(4, 2))
output = test_layer(query=query, value=value, attention_mask=mask_tensor)
# Create a model containing the test layer.
model = tf.keras.Model([query, value, mask_tensor], output)
# Generate data for the input (non-mask) tensors.
from_data = 10 * np.random.random_sample((batch_size, 4, 8))
to_data = 10 * np.random.random_sample((batch_size, 2, 8))
# Invoke the data with a random set of mask data. This should mask at least
# one element.
mask_data = np.random.randint(2, size=(batch_size, 4, 2))
masked_output_data = model.predict([from_data, to_data, mask_data])
# Invoke the same data, but with a null mask (where no elements are masked).
null_mask_data = np.ones((batch_size, 4, 2))
unmasked_output_data = model.predict([from_data, to_data, null_mask_data])
# Because one data is masked and one is not, the outputs should not be the
# same.
self.assertNotAllClose(masked_output_data, unmasked_output_data)
# Tests the layer with three inputs: Q, K, V.
key = tf.keras.Input(shape=(2, 8))
output = test_layer(
query=query, value=value, key=key, attention_mask=mask_tensor)
model = tf.keras.Model([query, value, key, mask_tensor], output)
masked_output_data = model.predict([from_data, to_data, to_data, mask_data])
unmasked_output_data = model.predict(
[from_data, to_data, to_data, null_mask_data])
# Because one data is masked and one is not, the outputs should not be the
# same.
self.assertNotAllClose(masked_output_data, unmasked_output_data)
if use_bias:
self.assertLen(test_layer._query_dense.trainable_variables, 2)
self.assertLen(test_layer._output_dense.trainable_variables, 2)
else:
self.assertLen(test_layer._query_dense.trainable_variables, 1)
self.assertLen(test_layer._output_dense.trainable_variables, 1)
def test_initializer(self):
"""Test with a specified initializer."""
test_layer = talking_heads_attention.TalkingHeadsAttention(
num_heads=12,
key_dim=64,
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Create a 3-dimensional input (the first dimension is implicit).
query = tf.keras.Input(shape=(40, 80))
output = test_layer(query=query, value=query)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
@parameterized.named_parameters(
("4d_inputs_one_free_batch", [3, 4], [3, 2], [4, 2], (2,)),
("4D_inputs_2D_attention", [3, 4], [3, 2], [3, 4, 3, 2], (1, 2)),
("5D_inputs_2D_attention", [5, 3, 4], [5, 3, 2], [3, 4, 3, 2], (2, 3)))
def test_high_dim_attention(self, q_dims, v_dims, mask_dims, attention_axes):
"""Test with a mask tensor."""
test_layer = talking_heads_attention.TalkingHeadsAttention(
num_heads=12, key_dim=2, attention_axes=attention_axes)
batch_size, hidden_size = 3, 8
# Generate data for the input (non-mask) tensors.
query_shape = [batch_size] + q_dims + [hidden_size]
value_shape = [batch_size] + v_dims + [hidden_size]
mask_shape = [batch_size] + mask_dims
query = 10 * np.random.random_sample(query_shape)
value = 10 * np.random.random_sample(value_shape)
# Invoke the data with a random set of mask data. This should mask at least
# one element.
mask_data = np.random.randint(2, size=mask_shape).astype("bool")
output = test_layer(query=query, value=value, attention_mask=mask_data)
# Invoke the same data, but with a null mask (where no elements are masked).
null_mask_data = np.ones(mask_shape)
unmasked_output = test_layer(
query=query, value=value, attention_mask=null_mask_data)
# Because one data is masked and one is not, the outputs should not be the
# same.
self.assertNotAllClose(output, unmasked_output)
if __name__ == "__main__":
tf.test.main()
| 7,012 | 43.106918 | 80 | py |
models | models-master/official/nlp/modeling/layers/rezero_transformer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based rezero-transformer block layer (Transformer with ReZero)."""
# pylint: disable=g-classes-have-attributes
from typing import Optional
from absl import logging
import gin
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling.layers import util
@tf.keras.utils.register_keras_serializable(package="Text")
@gin.configurable
class ReZeroTransformer(tf.keras.layers.Layer):
"""Transformer layer with ReZero.
This layer implements the Transformer from "Attention Is All You Need".
(https://arxiv.org/abs/1706.03762).
The residual connection implements the ReZero method.
(https://arxiv.org/abs/2003.04887)
Args:
num_attention_heads: Number of attention heads.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network.
dropout_rate: Dropout probability for the post-attention and output dropout.
attention_dropout_rate: Dropout probability for within the attention layer.
output_range: the sequence output range, [0, output_range) by slicing the
target sequence. `None` means the target sequence is not sliced.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
use_layer_norm: If add layer_norm on top of the ReZero.
share_rezero: If attention layer and FFN layer share the same alpha.
"""
def __init__(self,
num_attention_heads,
inner_dim=768,
inner_activation=tf_utils.get_activation("gelu"),
dropout_rate=0.0,
attention_dropout_rate=0.0,
output_range=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_layer_norm=False,
share_rezero=True,
**kwargs):
# attention_dropout will override attention_dropout_rate.
# This is to unify the input params with TransformerEncoderBlock.
attention_dropout_rate = kwargs.pop("attention_dropout",
attention_dropout_rate)
dropout_rate = kwargs.pop("output_dropout", dropout_rate)
inner_dim = kwargs.pop("intermediate_size", inner_dim)
inner_activation = kwargs.pop("intermediate_activation", inner_activation)
util.filter_kwargs(kwargs)
super().__init__(**kwargs)
# Deprecation warning.
if output_range is not None:
logging.warning("`output_range` is avaliable as an argument for `call()`."
"The `output_range` as __init__ argument is deprecated.")
self._num_heads = num_attention_heads
self._inner_dim = inner_dim
self._inner_activation = inner_activation
self._attention_dropout_rate = attention_dropout_rate
self._dropout_rate = dropout_rate
self._output_range = output_range
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
self._use_layer_norm = use_layer_norm
self._share_rezero = share_rezero
def build(self, input_shape):
if isinstance(input_shape, tf.TensorShape):
input_tensor_shape = input_shape
elif isinstance(input_shape, (list, tuple)):
input_tensor_shape = tf.TensorShape(input_shape[0])
else:
raise ValueError(
"The type of input shape argument is not supported, got: %s" %
type(input_shape))
if len(input_tensor_shape.as_list()) != 3:
raise ValueError("TransformerLayer expects a three-dimensional input of "
"shape [batch, sequence, width].")
batch_size, sequence_length, hidden_size = input_tensor_shape
if len(input_shape) == 2:
mask_tensor_shape = tf.TensorShape(input_shape[1])
expected_mask_tensor_shape = tf.TensorShape(
[batch_size, sequence_length, sequence_length])
if not expected_mask_tensor_shape.is_compatible_with(mask_tensor_shape):
raise ValueError("When passing a mask tensor to TransformerLayer, the "
"mask tensor must be of shape [batch, "
"sequence_length, sequence_length] (here %s). Got a "
"mask tensor of shape %s." %
(expected_mask_tensor_shape, mask_tensor_shape))
if hidden_size % self._num_heads != 0:
raise ValueError(
"The input size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, self._num_heads))
self._attention_head_size = int(hidden_size // self._num_heads)
common_kwargs = dict(
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
self._attention_layer = tf.keras.layers.MultiHeadAttention(
num_heads=self._num_heads,
key_dim=self._attention_head_size,
dropout=self._attention_dropout_rate,
name="self_attention",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs)
self._attention_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
if self._use_layer_norm:
# Use float32 in layernorm for numeric stability.
# It is probably safe in mixed_float16, but we haven't validated this yet.
self._attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=1e-12,
dtype=tf.float32))
self._intermediate_dense = tf.keras.layers.EinsumDense(
"abc,cd->abd",
output_shape=(None, self._inner_dim),
bias_axes="d",
name="intermediate",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs)
policy = tf.keras.mixed_precision.global_policy()
if policy.name == "mixed_bfloat16":
# bfloat16 causes BERT with the LAMB optimizer to not converge
# as well, so we use float32.
# TODO(b/154538392): Investigate this.
policy = tf.float32
self._inner_activation_layer = tf.keras.layers.Activation(
self._inner_activation, dtype=policy)
self._output_dense = tf.keras.layers.EinsumDense(
"abc,cd->abd",
output_shape=(None, hidden_size),
bias_axes="d",
name="output",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs)
self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
if self._use_layer_norm:
# Use float32 in layernorm for numeric stability.
self._output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32)
self._rezero_a = self.add_weight(
name="rezero_alpha",
initializer=tf.keras.initializers.Zeros(),
trainable=True,
dtype=tf.float32)
if self._share_rezero:
self._rezero_a_ffn = self._rezero_a
else:
self._rezero_a_ffn = self.add_weight(
name="rezero_alpha_ffn",
initializer=tf.keras.initializers.Zeros(),
trainable=True,
dtype=tf.float32)
super().build(input_shape)
def get_config(self):
config = {
"num_attention_heads":
self._num_heads,
"inner_dim":
self._inner_dim,
"inner_activation":
self._inner_activation,
"dropout_rate":
self._dropout_rate,
"attention_dropout_rate":
self._attention_dropout_rate,
"output_range":
self._output_range,
"use_layer_norm":
self._use_layer_norm,
"share_rezero":
self._share_rezero,
"kernel_initializer":
tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer":
tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer":
tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer":
tf.keras.regularizers.serialize(self._bias_regularizer),
"activity_regularizer":
tf.keras.regularizers.serialize(self._activity_regularizer),
"kernel_constraint":
tf.keras.constraints.serialize(self._kernel_constraint),
"bias_constraint":
tf.keras.constraints.serialize(self._bias_constraint),
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def reset_rezero(self):
self._rezero_a.assign(0.)
if not self._share_rezero:
self._rezero_a_ffn.assign(0.)
def call(self, inputs, output_range: Optional[tf.Tensor] = None) -> tf.Tensor:
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
input_tensor, attention_mask = inputs
key_value = None
elif len(inputs) == 3:
input_tensor, key_value, attention_mask = inputs
else:
raise ValueError("Unexpected inputs to %s with length at %d" %
(self.__class__, len(inputs)))
else:
input_tensor, key_value, attention_mask = (inputs, None, None)
if output_range is None:
output_range = self._output_range
if output_range:
target_tensor = input_tensor[:, 0:output_range, :]
if attention_mask is not None:
attention_mask = attention_mask[:, 0:output_range, :]
else:
target_tensor = input_tensor
if key_value is None:
key_value = input_tensor
attention_output = self._attention_layer(
query=target_tensor, value=key_value, attention_mask=attention_mask)
attention_output = self._attention_dropout(attention_output)
attention_output = target_tensor + self._rezero_a * attention_output
if self._use_layer_norm:
attention_output = self._attention_layer_norm(attention_output)
else:
attention_output = tf.cast(attention_output, tf.float32)
intermediate_output = self._intermediate_dense(attention_output)
intermediate_output = self._inner_activation_layer(intermediate_output)
layer_output = self._output_dense(intermediate_output)
layer_output = self._output_dropout(layer_output)
# During mixed precision training, attention_output is from layer norm and
# is always fp32 for now. Cast layer_output to fp32 for the subsequent add.
layer_output = attention_output + tf.cast(self._rezero_a_ffn * layer_output,
tf.float32)
if self._use_layer_norm:
layer_output = self._output_layer_norm(layer_output)
return layer_output
| 12,529 | 41.764505 | 80 | py |
models | models-master/official/nlp/modeling/layers/mixing_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mixing.py."""
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import mixing
class MixingTest(tf.test.TestCase):
def test_base_mixing_layer(self):
inputs = tf.random.uniform((3, 8, 16),
minval=0,
maxval=10,
dtype=tf.float32)
with self.assertRaisesRegex(NotImplementedError, "Abstract method"):
_ = mixing.MixingLayer()(query=inputs, value=inputs)
def test_fourier_layer(self):
batch_size = 4
max_seq_length = 8
hidden_dim = 16
inputs = tf.random.uniform((batch_size, max_seq_length, hidden_dim),
minval=0,
maxval=10,
dtype=tf.float32)
outputs = mixing.FourierTransformLayer(use_fft=True)(
query=inputs, value=inputs)
self.assertEqual(outputs.shape, (batch_size, max_seq_length, hidden_dim))
def test_hartley_layer(self):
batch_size = 3
max_seq_length = 16
hidden_dim = 4
inputs = tf.random.uniform((batch_size, max_seq_length, hidden_dim),
minval=0,
maxval=12,
dtype=tf.float32)
outputs = mixing.HartleyTransformLayer(use_fft=True)(
query=inputs, value=inputs)
self.assertEqual(outputs.shape, (batch_size, max_seq_length, hidden_dim))
def test_linear_mixing_layer(self):
batch_size = 2
max_seq_length = 4
hidden_dim = 3
inputs = tf.ones((batch_size, max_seq_length, hidden_dim), dtype=tf.float32)
outputs = mixing.LinearTransformLayer(
kernel_initializer=tf.keras.initializers.Ones())(
query=inputs, value=inputs)
# hidden_dim * (max_seq_length * 1) = 12.
expected_outputs = [
[
[12., 12., 12.],
[12., 12., 12.],
[12., 12., 12.],
[12., 12., 12.],
],
[
[12., 12., 12.],
[12., 12., 12.],
[12., 12., 12.],
[12., 12., 12.],
],
]
np.testing.assert_allclose(outputs, expected_outputs, rtol=1e-6, atol=1e-6)
def test_pick_fourier_transform(self):
# Ensure we don't hit an edge case which exceeds the fixed numerical error.
tf.random.set_seed(1)
np.random.seed(1)
batch_size = 3
max_seq_length = 4
hidden_dim = 8
fft = mixing._pick_fourier_transform(
use_fft=True, max_seq_length=max_seq_length, hidden_dim=hidden_dim)
dft_matmul = mixing._pick_fourier_transform(
use_fft=False, max_seq_length=max_seq_length, hidden_dim=hidden_dim)
inputs = tf.random.uniform([batch_size, max_seq_length, hidden_dim])
inputs = tf.cast(inputs, tf.complex64)
np.testing.assert_allclose(
fft(inputs), dft_matmul(inputs), rtol=1e-6, atol=1e-6)
if __name__ == "__main__":
tf.test.main()
| 3,549 | 31.272727 | 80 | py |
models | models-master/official/nlp/modeling/layers/bigbird_attention.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based bigbird attention layer."""
import numpy as np
import tensorflow as tf
MAX_SEQ_LEN = 4096
def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_blocked_mask: 2D Tensor of shape [batch_size,
from_seq_length//from_block_size, from_block_size].
to_blocked_mask: int32 Tensor of shape [batch_size,
to_seq_length//to_block_size, to_block_size].
Returns:
float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4,
from_block_size, 3*to_block_size].
"""
exp_blocked_to_pad = tf.concat([
to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:,
3:-1]
], 2)
band_mask = tf.einsum("BLQ,BLK->BLQK", from_blocked_mask[:, 2:-2],
exp_blocked_to_pad)
band_mask = tf.expand_dims(band_mask, 1)
return band_mask
def bigbird_block_rand_mask(from_seq_length,
to_seq_length,
from_block_size,
to_block_size,
num_rand_blocks,
last_idx=-1):
"""Create adjacency list of random attention.
Args:
from_seq_length: int. length of from sequence.
to_seq_length: int. length of to sequence.
from_block_size: int. size of block in from sequence.
to_block_size: int. size of block in to sequence.
num_rand_blocks: int. Number of random chunks per row.
last_idx: if -1 then num_rand_blocks blocks chosen anywhere in to sequence,
if positive then num_rand_blocks blocks choosen only upto last_idx.
Returns:
adjacency list of size from_seq_length//from_block_size-2 by num_rand_blocks
"""
assert from_seq_length//from_block_size == to_seq_length//to_block_size, \
"Error the number of blocks needs to be same!"
rand_attn = np.zeros(
(from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32)
middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32)
last = to_seq_length // to_block_size - 1
if last_idx > (2 * to_block_size):
last = (last_idx // to_block_size) - 1
r = num_rand_blocks # shorthand
for i in range(1, from_seq_length // from_block_size - 1):
start = i - 2
end = i
if i == 1:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r]
elif i == 2:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r]
elif i == from_seq_length // from_block_size - 3:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]
# Missing -3: should have been sliced till last-3
elif i == from_seq_length // from_block_size - 2:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]
# Missing -4: should have been sliced till last-4
else:
if start > last:
start = last
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]
elif (end + 1) == last:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]
else:
rand_attn[i - 1, :] = np.random.permutation(
np.concatenate((middle_seq[:start], middle_seq[end + 1:last])))[:r]
return rand_attn
def create_rand_mask_from_inputs(from_blocked_mask, to_blocked_mask, rand_attn,
num_attention_heads, num_rand_blocks,
batch_size, from_seq_length, from_block_size):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_blocked_mask: 2D Tensor of shape [batch_size,
from_seq_length//from_block_size, from_block_size].
to_blocked_mask: int32 Tensor of shape [batch_size,
to_seq_length//to_block_size, to_block_size].
rand_attn: [batch_size, num_attention_heads,
from_seq_length//from_block_size-2, num_rand_blocks]
num_attention_heads: int. Number of attention heads.
num_rand_blocks: int. Number of random chunks per row.
batch_size: int. Batch size for computation.
from_seq_length: int. length of from sequence.
from_block_size: int. size of block in from sequence.
Returns:
float Tensor of shape [batch_size, num_attention_heads,
from_seq_length//from_block_size-2,
from_block_size, num_rand_blocks*to_block_size].
"""
num_windows = from_seq_length // from_block_size - 2
rand_mask = tf.reshape(
tf.gather(to_blocked_mask, rand_attn, batch_dims=1), [
batch_size, num_attention_heads, num_windows,
num_rand_blocks * from_block_size
])
rand_mask = tf.einsum("BLQ,BHLK->BHLQK", from_blocked_mask[:, 1:-1],
rand_mask)
return rand_mask
def bigbird_block_sparse_attention(
query_layer, key_layer, value_layer, band_mask, from_mask, to_mask,
from_blocked_mask, to_blocked_mask, rand_attn, num_attention_heads,
num_rand_blocks, size_per_head, batch_size, from_seq_length, to_seq_length,
from_block_size, to_block_size):
"""BigBird attention sparse calculation using blocks in linear time.
Assumes from_seq_length//from_block_size == to_seq_length//to_block_size.
Args:
query_layer: float Tensor of shape [batch_size, num_attention_heads,
from_seq_length, size_per_head]
key_layer: float Tensor of shape [batch_size, num_attention_heads,
to_seq_length, size_per_head]
value_layer: float Tensor of shape [batch_size, num_attention_heads,
to_seq_length, size_per_head]
band_mask: (optional) int32 Tensor of shape [batch_size, 1,
from_seq_length//from_block_size-4, from_block_size, 3*to_block_size]. The
values should be 1 or 0. The attention scores will effectively be set to
-infinity for any positions in the mask that are 0, and will be unchanged
for positions that are 1.
from_mask: (optional) int32 Tensor of shape [batch_size, 1, from_seq_length,
1]. The values should be 1 or 0. The attention scores will effectively be
set to -infinity for any positions in the mask that are 0, and will be
unchanged for positions that are 1.
to_mask: (optional) int32 Tensor of shape [batch_size, 1, 1, to_seq_length].
The values should be 1 or 0. The attention scores will effectively be set
to -infinity for any positions in the mask that are 0, and will be
unchanged for positions that are 1.
from_blocked_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length//from_block_size, from_block_size]. Same as from_mask,
just reshaped.
to_blocked_mask: (optional) int32 Tensor of shape [batch_size,
to_seq_length//to_block_size, to_block_size]. Same as to_mask, just
reshaped.
rand_attn: [batch_size, num_attention_heads,
from_seq_length//from_block_size-2, num_rand_blocks]
num_attention_heads: int. Number of attention heads.
num_rand_blocks: int. Number of random chunks per row.
size_per_head: int. Size of each attention head.
batch_size: int. Batch size for computation.
from_seq_length: int. length of from sequence.
to_seq_length: int. length of to sequence.
from_block_size: int. size of block in from sequence.
to_block_size: int. size of block in to sequence.
Returns:
float Tensor of shape [batch_size, from_seq_length, num_attention_heads,
size_per_head].
"""
rand_attn = tf.expand_dims(rand_attn, 0)
rand_attn = tf.repeat(rand_attn, batch_size, 0)
rand_mask = create_rand_mask_from_inputs(
from_blocked_mask,
to_blocked_mask,
rand_attn,
num_attention_heads,
num_rand_blocks,
batch_size,
from_seq_length,
from_block_size,
)
# Define shorthands
h = num_attention_heads
r = num_rand_blocks
d = size_per_head
b = batch_size
m = from_seq_length
n = to_seq_length
wm = from_block_size
wn = to_block_size
dtype = query_layer.dtype
query_layer = tf.transpose(query_layer, perm=[0, 2, 1, 3])
key_layer = tf.transpose(key_layer, perm=[0, 2, 1, 3])
value_layer = tf.transpose(value_layer, perm=[0, 2, 1, 3])
blocked_query_matrix = tf.reshape(query_layer, (b, h, m // wm, wm, -1))
blocked_key_matrix = tf.reshape(key_layer, (b, h, n // wn, wn, -1))
blocked_value_matrix = tf.reshape(value_layer, (b, h, n // wn, wn, -1))
gathered_key = tf.reshape(
tf.gather(blocked_key_matrix, rand_attn, batch_dims=2, name="gather_key"),
(b, h, m // wm - 2, r * wn, -1)) # [b, h, n//wn-2, r, wn, -1]
gathered_value = tf.reshape(
tf.gather(
blocked_value_matrix, rand_attn, batch_dims=2, name="gather_value"),
(b, h, m // wm - 2, r * wn, -1)) # [b, h, n//wn-2, r, wn, -1]
first_product = tf.einsum(
"BHQD,BHKD->BHQK", blocked_query_matrix[:, :, 0],
key_layer) # [b, h, wm, -1] x [b, h, n, -1] ==> [b, h, wm, n]
first_product = tf.multiply(first_product, 1.0 / np.sqrt(d))
first_product += (1.0 - tf.cast(to_mask, dtype=dtype)) * -10000.0
first_attn_weights = tf.nn.softmax(first_product) # [b, h, wm, n]
first_context_layer = tf.einsum(
"BHQK,BHKD->BHQD", first_attn_weights,
value_layer) # [b, h, wm, n] x [b, h, n, -1] ==> [b, h, wm, -1]
first_context_layer = tf.expand_dims(first_context_layer, 2)
second_key_mat = tf.concat([
blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, 1],
blocked_key_matrix[:, :, 2], blocked_key_matrix[:, :,
-1], gathered_key[:, :, 0]
], 2) # [b, h, (4+r)*wn, -1]
second_value_mat = tf.concat([
blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, 1],
blocked_value_matrix[:, :, 2], blocked_value_matrix[:, :, -1],
gathered_value[:, :, 0]
], 2) # [b, h, (4+r)*wn, -1]
second_product = tf.einsum(
"BHQD,BHKD->BHQK", blocked_query_matrix[:, :, 1], second_key_mat
) # [b, h, wm, -1] x [b, h, (4+r)*wn, -1] ==> [b, h, wm, (4+r)*wn]
second_seq_pad = tf.concat([
to_mask[:, :, :, :3 * wn], to_mask[:, :, :, -wn:],
tf.ones([b, 1, 1, r * wn], dtype=dtype)
], 3)
second_rand_pad = tf.concat([
tf.ones([b, h, wm, 4 * wn], dtype=dtype), rand_mask[:, :, 0]
], 3)
second_product = tf.multiply(second_product, 1.0 / np.sqrt(d))
second_product += (1.0 -
tf.minimum(second_seq_pad, second_rand_pad)) * -10000.0
second_attn_weights = tf.nn.softmax(second_product) # [b , h, wm, (4+r)*wn]
second_context_layer = tf.einsum(
"BHQK,BHKD->BHQD", second_attn_weights, second_value_mat
) # [b, h, wm, (4+r)*wn] x [b, h, (4+r)*wn, -1] ==> [b, h, wm, -1]
second_context_layer = tf.expand_dims(second_context_layer, 2)
exp_blocked_key_matrix = tf.concat([
blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2],
blocked_key_matrix[:, :, 3:-1]
], 3) # [b, h, m//wm-4, 3*wn, -1]
exp_blocked_value_matrix = tf.concat([
blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2],
blocked_value_matrix[:, :, 3:-1]
], 3) # [b, h, m//wm-4, 3*wn, -1]
middle_query_matrix = blocked_query_matrix[:, :, 2:-2]
inner_band_product = tf.einsum(
"BHLQD,BHLKD->BHLQK", middle_query_matrix, exp_blocked_key_matrix
) # [b, h, m//wm-4, wm, -1] x [b, h, m//wm-4, 3*wn, -1]
# ==> [b, h, m//wm-4, wm, 3*wn]
inner_band_product = tf.multiply(inner_band_product, 1.0 / np.sqrt(d))
rand_band_product = tf.einsum(
"BHLQD,BHLKD->BHLQK", middle_query_matrix,
gathered_key[:, :,
1:-1]) # [b, h, m//wm-4, wm, -1] x [b, h, m//wm-4, r*wn, -1]
# ==> [b, h, m//wm-4, wm, r*wn]
rand_band_product = tf.multiply(rand_band_product, 1.0 / np.sqrt(d))
first_band_product = tf.einsum(
"BHLQD,BHKD->BHLQK", middle_query_matrix, blocked_key_matrix[:, :, 0]
) # [b, h, m//wm-4, wm, -1] x [b, h, wn, -1] ==> [b, h, m//wm-4, wm, wn]
first_band_product = tf.multiply(first_band_product, 1.0 / np.sqrt(d))
last_band_product = tf.einsum(
"BHLQD,BHKD->BHLQK", middle_query_matrix, blocked_key_matrix[:, :, -1]
) # [b, h, m//wm-4, wm, -1] x [b, h, wn, -1] ==> [b, h, m//wm-4, wm, wn]
last_band_product = tf.multiply(last_band_product, 1.0 / np.sqrt(d))
inner_band_product += (1.0 - band_mask) * -10000.0
first_band_product += (1.0 -
tf.expand_dims(to_mask[:, :, :, :wn], 3)) * -10000.0
last_band_product += (1.0 -
tf.expand_dims(to_mask[:, :, :, -wn:], 3)) * -10000.0
rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * -10000.0
band_product = tf.concat([
first_band_product, inner_band_product, rand_band_product,
last_band_product
], -1) # [b, h, m//wm-4, wm, (5+r)*wn]
attn_weights = tf.nn.softmax(band_product) # [b, h, m//wm-4, wm, (5+r)*wn]
context_layer = tf.einsum(
"BHLQK,BHLKD->BHLQD", attn_weights[:, :, :, :,
wn:4 * wn], exp_blocked_value_matrix
) # [b, h, m//wm-4, wm, 3*wn] x [b, h, m//wm-4, 3*wn, -1]
# ==> [b, h, m//wm-4, wm, -1]
context_layer += tf.einsum(
"BHLQK,BHLKD->BHLQD", attn_weights[:, :, :, :,
4 * wn:-wn], gathered_value[:, :, 1:-1]
) # [b, h, m//wm-4, wm, r*wn] x [b, h, m//wm-4, r*wn, -1]
# ==> [b, h, m//wm-4, wm, -1]
context_layer += tf.einsum(
"BHLQK,BHKD->BHLQD", attn_weights[:, :, :, :, :wn],
blocked_value_matrix[:, :, 0]
) # [b, h, m//wm-4, wm, wn] x [b, h, wn, -1] ==> [b, h, m//wm-4, wm, -1]
context_layer += tf.einsum(
"BHLQK,BHKD->BHLQD", attn_weights[:, :, :, :,
-wn:], blocked_value_matrix[:, :, -1]
) # [b, h, m//wm-4, wm, wn] x [b, h, wn, -1] ==> [b, h, m//wm-4, wm, -1]
second_last_key_mat = tf.concat([
blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, -3],
blocked_key_matrix[:, :, -2], blocked_key_matrix[:, :, -1],
gathered_key[:, :, -1]
], 2) # [b, h, (4+r)*wn, -1]
second_last_value_mat = tf.concat([
blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, -3],
blocked_value_matrix[:, :, -2], blocked_value_matrix[:, :, -1],
gathered_value[:, :, -1]
], 2) # [b, h, (4+r)*wn, -1]
second_last_product = tf.einsum(
"BHQD,BHKD->BHQK", blocked_query_matrix[:, :, -2], second_last_key_mat
) # [b, h, wm, -1] x [b, h, (4+r)*wn, -1] ==> [b, h, wm, (4+r)*wn]
second_last_seq_pad = tf.concat([
to_mask[:, :, :, :wn], to_mask[:, :, :, -3 * wn:],
tf.ones([b, 1, 1, r * wn], dtype=dtype)
], 3)
second_last_rand_pad = tf.concat(
[tf.ones([b, h, wm, 4 * wn], dtype=dtype), rand_mask[:, :, -1]], 3)
second_last_product = tf.multiply(second_last_product, 1.0 / np.sqrt(d))
second_last_product += (
1.0 - tf.minimum(second_last_seq_pad, second_last_rand_pad)) * -10000.0
second_last_attn_weights = tf.nn.softmax(
second_last_product) # [b, h, wm, (4+r)*wn]
second_last_context_layer = tf.einsum(
"BHQK,BHKD->BHQD", second_last_attn_weights, second_last_value_mat
) # [b, h, wm, (4+r)*wn] x [b, h, (4+r)*wn, -1] ==> [b, h, wm, -1]
second_last_context_layer = tf.expand_dims(second_last_context_layer, 2)
last_product = tf.einsum(
"BHQD,BHKD->BHQK", blocked_query_matrix[:, :, -1],
key_layer) # [b, h, wm, -1] x [b, h, n, -1] ==> [b, h, wm, n]
last_product = tf.multiply(last_product, 1.0 / np.sqrt(d))
last_product += (1.0 - to_mask) * -10000.0
last_attn_weights = tf.nn.softmax(last_product) # [b, h, wm, n]
last_context_layer = tf.einsum(
"BHQK,BHKD->BHQD", last_attn_weights,
value_layer) # [b, h, wm, n] x [b, h, n, -1] ==> [b, h, wm, -1]
last_context_layer = tf.expand_dims(last_context_layer, 2)
context_layer = tf.concat([
first_context_layer, second_context_layer, context_layer,
second_last_context_layer, last_context_layer
], 2)
context_layer = tf.reshape(context_layer, (b, h, m, -1)) * from_mask
context_layer = tf.transpose(context_layer, (0, 2, 1, 3))
return context_layer
class BigBirdMasks(tf.keras.layers.Layer):
"""Creates bigbird attention masks."""
def __init__(self, block_size, **kwargs):
super().__init__(**kwargs)
self._block_size = block_size
def call(self, inputs, mask):
encoder_shape = tf.shape(mask)
mask = tf.cast(mask, inputs.dtype)
batch_size, seq_length = encoder_shape[0], encoder_shape[1]
# reshape for blocking
blocked_encoder_mask = tf.reshape(
mask, (batch_size, seq_length // self._block_size, self._block_size))
encoder_from_mask = tf.reshape(mask, (batch_size, 1, seq_length, 1))
encoder_to_mask = tf.reshape(mask, (batch_size, 1, 1, seq_length))
band_mask = create_band_mask_from_inputs(blocked_encoder_mask,
blocked_encoder_mask)
return [band_mask, encoder_from_mask, encoder_to_mask, blocked_encoder_mask]
@tf.keras.utils.register_keras_serializable(package="Text")
class BigBirdAttention(tf.keras.layers.MultiHeadAttention):
"""BigBird, a sparse attention mechanism.
This layer follows the paper "Big Bird: Transformers for Longer Sequences"
(https://arxiv.org/abs/2007.14062).
It reduces this quadratic dependency of attention
computation to linear.
Arguments are the same as `MultiHeadAttention` layer.
"""
def __init__(self,
num_rand_blocks=3,
from_block_size=64,
to_block_size=64,
max_rand_mask_length=MAX_SEQ_LEN,
seed=None,
**kwargs):
super().__init__(**kwargs)
self._num_rand_blocks = num_rand_blocks
self._from_block_size = from_block_size
self._to_block_size = to_block_size
self._seed = seed
# Generates random attention.
np.random.seed(self._seed)
# pylint: disable=g-complex-comprehension
rand_attn = [
bigbird_block_rand_mask(
max_rand_mask_length,
max_rand_mask_length,
from_block_size,
to_block_size,
num_rand_blocks,
last_idx=1024) for _ in range(self._num_heads)
]
# pylint: enable=g-complex-comprehension
rand_attn = np.stack(rand_attn, axis=0)
self.rand_attn = tf.constant(rand_attn, dtype=tf.int32)
def _compute_attention(self, query, key, value, attention_mask=None):
(band_mask, encoder_from_mask, encoder_to_mask,
blocked_encoder_mask) = attention_mask
query_shape = tf.shape(query)
from_seq_length = query_shape[1]
to_seq_length = tf.shape(key)[1]
rand_attn = self.rand_attn[:, :(from_seq_length // self._from_block_size -
2)]
return bigbird_block_sparse_attention(
query,
key,
value,
band_mask,
encoder_from_mask,
encoder_to_mask,
blocked_encoder_mask,
blocked_encoder_mask,
num_attention_heads=self._num_heads,
num_rand_blocks=self._num_rand_blocks,
size_per_head=self._key_dim,
batch_size=query_shape[0],
from_seq_length=from_seq_length,
to_seq_length=to_seq_length,
from_block_size=self._from_block_size,
to_block_size=self._to_block_size,
rand_attn=rand_attn)
def call(self, query, value, key=None, attention_mask=None, **kwargs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
if not self._built_from_signature:
self._build_from_signature(query=query, value=value, key=key)
if key is None:
key = value
# N = `num_attention_heads`
# H = `size_per_head`
# `query` = [B, T, N ,H]
query = self._query_dense(query)
# `key` = [B, S, N, H]
key = self._key_dense(key)
# `value` = [B, S, N, H]
value = self._value_dense(value)
attention_output = self._compute_attention(query, key, value,
attention_mask)
attention_output.set_shape([None, None, self._num_heads, self._value_dim])
attention_output = self._output_dense(attention_output)
return attention_output
def get_config(self):
config = {
"num_rand_blocks": self._num_rand_blocks,
"from_block_size": self._from_block_size,
"to_block_size": self._to_block_size,
"seed": self._seed
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| 21,101 | 41.803245 | 147 | py |
models | models-master/official/nlp/modeling/layers/reuse_transformer_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-based transformer block layer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import reuse_transformer
@parameterized.named_parameters(
('base', reuse_transformer.ReuseTransformer))
class ReuseTransformerLayerTest(tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(ReuseTransformerLayerTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy('float32')
def test_layer_creation(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor, _ = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_creation_with_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor, _ = test_layer([data_tensor, mask_tensor])
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_invocation(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# Create a model from the test layer.
model = tf.keras.Model(data_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = np.random.random_sample(
(batch_size, sequence_length, width))
_ = model.predict(input_data)
def test_layer_invocation_with_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_layer_output_range(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
batch_size = 6
input_data = np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor, _ = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
output_range=1)
_ = new_layer([input_data, mask_data])
new_layer.set_weights(test_layer.get_weights())
new_output_tensor, _ = new_layer([input_data, mask_data])
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=0.002, rtol=0.01)
def test_layer_output_range_with_relative_pe(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu',
use_relative_pe=True)
sequence_length = 21
width = 80
batch_size = 6
input_data = np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor, _ = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
output_range=1,
use_relative_pe=True)
_ = new_layer([input_data, mask_data])
new_layer.set_weights(test_layer.get_weights())
new_output_tensor, _ = new_layer([input_data, mask_data])
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=0.002, rtol=0.01)
def test_layer_output_range_without_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048,
inner_activation='relu', norm_first=True)
sequence_length = 21
width = 80
batch_size = 6
input_data = np.random.random_sample(
(batch_size, sequence_length, width))
output_tensor, _ = test_layer(input_data)
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
output_range=1,
norm_first=True)
_ = new_layer(input_data)
new_layer.set_weights(test_layer.get_weights())
new_output_tensor, _ = new_layer(input_data)
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=0.002, rtol=0.01)
def test_layer_output_range_with_pre_norm(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048,
inner_activation='relu', norm_first=True)
sequence_length = 21
width = 80
batch_size = 6
input_data = np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor, _ = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
output_range=1,
norm_first=True)
_ = new_layer([input_data, mask_data])
new_layer.set_weights(test_layer.get_weights())
new_output_tensor, _ = new_layer([input_data, mask_data])
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=0.002, rtol=0.01)
def test_layer_invocation_with_float16_dtype(self, transformer_cls):
tf.keras.mixed_precision.set_global_policy('mixed_float16')
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = (np.random.random_sample(
(batch_size, sequence_length, width)))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_transform_with_initializer(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output, _ = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list())
def test_dynamic_layer_sequence(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Create a 3-dimensional input (the first dimension is implicit).
width = 30
input_tensor = tf.keras.Input(shape=(None, width))
output_tensor, _ = test_layer(input_tensor)
model = tf.keras.Model(input_tensor, output_tensor)
input_length = 17
input_data = np.ones((1, input_length, width))
output_data = model.predict(input_data)
self.assertAllEqual([1, input_length, width], output_data.shape)
class ReuseTransformerArgumentTest(tf.test.TestCase, parameterized.TestCase):
def test_use_bias_norm_first(self):
num_attention_heads = 2
hidden_size = 16
encoder_block = reuse_transformer.ReuseTransformer(
num_attention_heads=num_attention_heads,
inner_dim=32,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
attention_initializer=tf.keras.initializers.RandomUniform(
minval=0., maxval=1.))
# Forward path.
dummy_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 4], dtype=tf.float32)
inputs = [dummy_tensor, dummy_mask]
output, _ = encoder_block(inputs)
self.assertEqual(output.shape, (2, 4, hidden_size))
def test_get_config(self):
num_attention_heads = 2
encoder_block = reuse_transformer.ReuseTransformer(
num_attention_heads=num_attention_heads,
inner_dim=32,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
attention_initializer=tf.keras.initializers.RandomUniform(
minval=0., maxval=1.))
encoder_block_config = encoder_block.get_config()
new_encoder_block = reuse_transformer.ReuseTransformer.from_config(
encoder_block_config)
self.assertEqual(encoder_block_config, new_encoder_block.get_config())
@parameterized.parameters({'attention_axes': None}, {'attention_axes': [1]},
{'attention_axes': [2]}, {'attention_axes': [1, 2]})
def test_several_attention_axes(self, attention_axes):
test_layer = reuse_transformer.ReuseTransformer(
inner_dim=32,
inner_activation='relu',
output_dropout=0.1,
attention_dropout=0.1,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
inner_dropout=0.1,
num_attention_heads=10,
attention_axes=attention_axes)
num_rows = 21
num_cols = 13
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(num_rows, num_cols, width))
output_tensor, _ = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
@parameterized.named_parameters(
('plain', False, False, False),
('plain_returnscore', False, True, False),
('plain_with_relative_pe', False, False, True),
('reuse_all', True, False, False),
('reuse_all_returnscore', True, True, False),
('reuse_all_with_relative_pe', True, False, True),
('reuse_5', 5, False, False),
('reuse_5_returnscore', 5, True, False),
('reuse_5_with_relative_pe', 5, False, True),)
def test_layer_invocation_with_mask(self, reuse_attention,
return_attention_scores, use_relative_pe):
test_layer = reuse_transformer.ReuseTransformer(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
reuse_attention=reuse_attention,
use_relative_pe=use_relative_pe)
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
return_scores_tensor = tf.keras.Input(shape=(1,))
reuse_attention_scores = tf.keras.Input(
shape=(10, sequence_length, sequence_length))
output_tensor, _ = test_layer(
[data_tensor, mask_tensor, reuse_attention_scores])
# Create a model from the test layer.
model = tf.keras.Model(
([data_tensor, mask_tensor, reuse_attention_scores],
return_scores_tensor), output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
reuse_scores = np.random.rand(
batch_size, 10, sequence_length, sequence_length)
_ = model.predict([input_data, mask_data, reuse_scores],
return_attention_scores)
@parameterized.named_parameters(
('without_relative_pe_with_pe_max_seq_length_10', False, 10),
('with_relative_pe_with_pe_max_seq_length_10', True, 10),
('without_relative_pe_with_pe_max_seq_length_100', False, 100),
('with_relative_pe_with_pe_max_seq_length_100', True, 100))
def test_layer_invocation_with_float16_with_relative_pe(
self, use_relative_pe, pe_max_seq_length):
tf.keras.mixed_precision.set_global_policy('mixed_float16')
test_layer = reuse_transformer.ReuseTransformer(
num_attention_heads=10, inner_dim=2048, inner_activation='relu',
use_relative_pe=use_relative_pe, pe_max_seq_length=pe_max_seq_length)
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = (np.random.random_sample(
(batch_size, sequence_length, width)))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
if __name__ == '__main__':
tf.test.main()
| 17,478 | 40.916067 | 80 | py |
models | models-master/official/nlp/modeling/layers/transformer_xl.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based Transformer XL layer."""
from absl import logging
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling.layers import relative_attention
def _cache_memory(current_state, previous_state, memory_length, reuse_length=0):
"""Caches hidden states into memory.
Args:
current_state: `Tensor`, the current state.
previous_state: `Tensor`, the previous state.
memory_length: `int`, the number of tokens to cache.
reuse_length: `int`, the number of tokens in the current batch to be cached
and reused in the future.
Returns:
A `Tensor`, representing the cached state with stopped gradients.
"""
if memory_length is None or memory_length == 0:
return None
else:
if reuse_length > 0:
current_state = current_state[:, :reuse_length, :]
if previous_state is None:
new_mem = current_state[:, -memory_length:, :]
else:
new_mem = tf.concat(
[previous_state, current_state], 1)[:, -memory_length:, :]
return tf.stop_gradient(new_mem)
@tf.keras.utils.register_keras_serializable(package="Text")
class TransformerXLBlock(tf.keras.layers.Layer):
"""Transformer XL block.
This implements a Transformer XL block from "Transformer-XL: Attentive
Language Models Beyond a Fixed-Length Context"
(https://arxiv.org/abs/1901.02860).
This block is further extended to allow for the Transformer-XL
re-parameterization in "XLNet: Generalized Autoregressive Pretraining for
Language Understanding" (https://arxiv.org/abs/1906.08237).
Given an input stream, this block computes attention, applies dropouts and
layer norms and feeds into the FFN network.
**Note: This layer is currently experimental.
Attributes:
vocab_size: The size of the token vocabulary.
hidden_size: The size of the transformer hidden layers.
num_attention_heads: The number of attention heads.
head_size: The dimension size of each attention head.
inner_size: The inner size for the transformer layers.
dropout_rate: Dropout rate for the output of this layer.
attention_dropout_rate: Dropout rate on attention probabilities.
two_stream: Whether or not to use `TwoStreamRelativeAttention` used in the
XLNet pretrainer. If `False`, then it will use
`MultiHeadRelativeAttention` as in Transformer XL.
norm_epsilon: Epsilon value to initialize normalization layers.
inner_activation: The activation to use for the inner
FFN layers.
kernel_initializer: Initializer for dense layer kernels.
inner_dropout: Dropout probability for the inner dropout
layer.
"""
def __init__(self,
vocab_size,
hidden_size,
num_attention_heads,
head_size,
inner_size,
dropout_rate,
attention_dropout_rate,
two_stream=False,
norm_epsilon=1e-12,
inner_activation="relu",
kernel_initializer="variance_scaling",
inner_dropout=0.0,
**kwargs):
"""Initializes TransformerXLBlock layer."""
super().__init__(**kwargs)
self._vocab_size = vocab_size
self._num_heads = num_attention_heads
self._head_size = head_size
self._hidden_size = hidden_size
self._inner_size = inner_size
self._dropout_rate = dropout_rate
self._attention_dropout_rate = attention_dropout_rate
self._inner_activation = inner_activation
self._norm_epsilon = norm_epsilon
self._kernel_initializer = kernel_initializer
self._inner_dropout = inner_dropout
self._two_stream = two_stream
if two_stream:
self._attention_layer_type = relative_attention.TwoStreamRelativeAttention
else:
self._attention_layer_type = relative_attention.MultiHeadRelativeAttention
def build(self, input_shape):
input_tensor = input_shape[0] if len(input_shape) == 2 else input_shape
input_tensor_shape = tf.TensorShape(input_tensor)
if len(input_tensor_shape.as_list()) != 3:
raise ValueError("TransformerLayer expects a three-dimensional input of "
"shape [batch, sequence, width].")
batch_size, sequence_length, hidden_size = input_tensor_shape
if len(input_shape) == 2:
mask_tensor_shape = tf.TensorShape(input_shape[1])
expected_mask_tensor_shape = tf.TensorShape(
[batch_size, sequence_length, sequence_length])
if not expected_mask_tensor_shape.is_compatible_with(mask_tensor_shape):
raise ValueError("When passing a mask tensor to TransformerXLBlock, "
"the mask tensor must be of shape [batch, "
"sequence_length, sequence_length] (here %s). Got a "
"mask tensor of shape %s." %
(expected_mask_tensor_shape, mask_tensor_shape))
if hidden_size % self._num_heads != 0:
raise ValueError(
"The input size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, self._num_heads))
self._attention_layer = self._attention_layer_type(
num_heads=self._num_heads,
key_dim=self._head_size,
value_dim=self._head_size,
dropout=self._attention_dropout_rate,
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
name="rel_attn")
self._attention_dropout = tf.keras.layers.Dropout(
rate=self._attention_dropout_rate)
self._attention_layer_norm = tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32)
self._inner_dense = tf.keras.layers.EinsumDense(
"abc,cd->abd",
output_shape=(None, self._inner_size),
bias_axes="d",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
name="inner")
self._inner_activation_layer = tf.keras.layers.Activation(
self._inner_activation)
self._inner_dropout_layer = tf.keras.layers.Dropout(
rate=self._inner_dropout)
self._output_dense = tf.keras.layers.EinsumDense(
"abc,cd->abd",
output_shape=(None, hidden_size),
bias_axes="d",
name="output",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer))
self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
self._output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon)
super().build(input_shape)
def get_config(self):
config = {
"vocab_size":
self._vocab_size,
"hidden_size":
self._hidden_size,
"num_attention_heads":
self._num_heads,
"head_size":
self._head_size,
"inner_size":
self._inner_size,
"dropout_rate":
self._dropout_rate,
"attention_dropout_rate":
self._attention_dropout_rate,
"two_stream":
self._two_stream,
"norm_epsilon":
self._norm_epsilon,
"inner_activation":
self._inner_activation,
"kernel_initializer":
self._kernel_initializer,
"inner_dropout":
self._inner_dropout,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self,
content_stream,
content_attention_bias,
positional_attention_bias,
relative_position_encoding=None,
segment_matrix=None,
segment_encoding=None,
segment_attention_bias=None,
state=None,
content_attention_mask=None,
query_stream=None,
query_attention_mask=None,
target_mapping=None):
"""Implements `call` for the Layer.
Args:
content_stream: `Tensor`, the input content stream. This is the standard
input to Transformer XL and is commonly referred to as `h` in XLNet.
content_attention_bias: Bias `Tensor` for content based attention of shape
`[num_heads, dim]`.
positional_attention_bias: Bias `Tensor` for position based attention of
shape `[num_heads, dim]`.
relative_position_encoding: Relative positional encoding `Tensor` of shape
`[B, L, dim]`.
segment_matrix: Optional `Tensor` of shape `[B, S, S + M]`. Used in XLNet,
but not in Transformer XL.
segment_encoding: Optional `Tensor` of shape `[2, num_heads, dim]`. Used
in XLNet, but not in Transformer XL.
segment_attention_bias: Optional bias `Tensor` for segment based attention
of shape `[num_heads, dim]`.
state: Optional `Tensor` of shape `[B, M, E]`, where M is the length of
the state or memory. If passed, this is also attended over as in
Transformer XL.
content_attention_mask: Optional `Tensor` representing the mask that is
added to content attention logits. If state is not None, the mask source
sequence dimension should extend M.
query_stream: Optional `Tensor`, the query stream. This is introduced in
`TwoStreamRelativeAttention`/XLNet pretrainer. This is ignored if
`two_stream` is `False`.
query_attention_mask: Optional `Tensor` representing the mask that is
added to query attention logits. If state is not None, the mask source
sequence dimension should extend M.
target_mapping: Optional `Tensor` representing the target mapping when
calculating query attention.
Returns:
A `dict` object, containing the key value pairs for `content_attention`
and (if `two_stream` is `True`) `query_attention`.
"""
if not self._two_stream and query_stream is not None:
logging.warning("`query_stream` was provided but two stream attention is "
"disabled. `query_stream` will be ignored.")
if self._two_stream:
attention_kwargs = dict(
content_stream=content_stream,
query_stream=query_stream,
query_attention_mask=query_attention_mask,
target_mapping=target_mapping,
content_attention_mask=content_attention_mask)
else:
attention_kwargs = dict(
query=content_stream,
value=content_stream,
key=content_stream,
attention_mask=content_attention_mask)
common_attention_kwargs = dict(
content_attention_bias=content_attention_bias,
relative_position_encoding=relative_position_encoding,
positional_attention_bias=positional_attention_bias,
segment_matrix=segment_matrix,
segment_encoding=segment_encoding,
segment_attention_bias=segment_attention_bias,
state=state)
attention_kwargs.update(common_attention_kwargs)
attention_output = self._attention_layer(**attention_kwargs)
if self._two_stream:
attention_streams = attention_output
input_streams = [content_stream, query_stream]
else:
attention_streams = [attention_output]
input_streams = [content_stream]
attention_keys = ["content_attention", "query_attention"]
attention_output = {}
for attention_stream, input_stream, attention_key in zip(
attention_streams, input_streams, attention_keys):
attention_stream = self._attention_dropout(attention_stream)
attention_stream = self._attention_layer_norm(
attention_stream + input_stream)
inner_output = self._inner_dense(attention_stream)
inner_output = self._inner_activation_layer(
inner_output)
inner_output = self._inner_dropout_layer(
inner_output)
layer_output = self._output_dense(inner_output)
layer_output = self._output_dropout(layer_output)
layer_output = self._output_layer_norm(layer_output + attention_stream)
attention_output[attention_key] = layer_output
return attention_output
class TransformerXL(tf.keras.layers.Layer):
"""Transformer XL.
This layer combines multiple Transformer XL blocks from "Transformer-XL:
Attentive Language Models Beyond a Fixed-Length Context"
(https://arxiv.org/abs/1901.02860).
This layer handles the attention biases as well as memory caching and reuse
as in Transformer XL and XLNet.
Attributes:
vocab_size: The number of tokens in vocabulary.
num_layers: The number of layers.
hidden_size: The hidden size.
num_attention_heads: The number of attention heads.
head_size: The dimension size of each attention head.
inner_size: The hidden size in feed-forward layers.
dropout_rate: Dropout rate used in each Transformer XL block.
attention_dropout_rate: Dropout rate on attention probabilities.
two_stream: Whether or not to use `TwoStreamRelativeAttention` used
in the XLNet pretrainer. If `False`, then it will use
`MultiHeadRelativeAttention` as in Transformer XL.
initializer: The initializer to use for attention biases.
tie_attention_biases: Whether or not to tie biases together. If `True`, then
each Transformer XL block shares the same trainable attention bias. If
`False`, then each block has its own attention bias. This is usually set
to `True`.
memory_length: The number of tokens to cache.
reuse_length: The number of tokens in the current batch to be cached
and reused in the future.
inner_activation: The activation to use in the inner layers
for Transformer XL blocks. Typically "relu" or "gelu".
"""
def __init__(self,
vocab_size,
num_layers,
hidden_size,
num_attention_heads,
head_size,
inner_size,
dropout_rate,
attention_dropout_rate,
initializer,
two_stream=False,
tie_attention_biases=True,
memory_length=None,
reuse_length=None,
inner_activation="relu",
**kwargs):
"""Initializes TransformerXL."""
super().__init__(**kwargs)
self._vocab_size = vocab_size
self._initializer = initializer
self._num_layers = num_layers
self._hidden_size = hidden_size
self._num_attention_heads = num_attention_heads
self._head_size = head_size
self._inner_size = inner_size
self._inner_activation = inner_activation
self._dropout_rate = dropout_rate
self._attention_dropout_rate = attention_dropout_rate
self._tie_attention_biases = tie_attention_biases
self._two_stream = two_stream
self._memory_length = memory_length
self._reuse_length = reuse_length
if self._tie_attention_biases:
attention_bias_shape = [self._num_attention_heads, self._head_size]
else:
attention_bias_shape = [self._num_layers, self._num_attention_heads,
self._head_size]
self.content_attention_bias = self.add_weight(
"content_attention_bias",
shape=attention_bias_shape,
dtype=tf.float32,
initializer=tf_utils.clone_initializer(self._initializer))
self.positional_attention_bias = self.add_weight(
"positional_attention_bias",
shape=attention_bias_shape,
dtype=tf.float32,
initializer=tf_utils.clone_initializer(self._initializer))
self.segment_attention_bias = self.add_weight(
"segment_attention_bias",
shape=attention_bias_shape,
dtype=tf.float32,
initializer=tf_utils.clone_initializer(self._initializer))
self.transformer_xl_layers = []
for i in range(self._num_layers):
self.transformer_xl_layers.append(
TransformerXLBlock(
vocab_size=self._vocab_size,
hidden_size=self._head_size * self._num_attention_heads,
num_attention_heads=self._num_attention_heads,
head_size=self._head_size,
inner_size=self._inner_size,
dropout_rate=self._dropout_rate,
attention_dropout_rate=self._attention_dropout_rate,
norm_epsilon=1e-12,
inner_activation=self._inner_activation,
two_stream=self._two_stream,
kernel_initializer="variance_scaling",
name="layer_%d" % i))
self.output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
def get_config(self):
config = {
"vocab_size":
self._vocab_size,
"num_layers":
self._num_layers,
"hidden_size":
self._hidden_size,
"num_attention_heads":
self._num_attention_heads,
"head_size":
self._head_size,
"inner_size":
self._inner_size,
"dropout_rate":
self._dropout_rate,
"attention_dropout_rate":
self._attention_dropout_rate,
"initializer":
self._initializer,
"two_stream":
self._two_stream,
"tie_attention_biases":
self._tie_attention_biases,
"memory_length":
self._memory_length,
"reuse_length":
self._reuse_length,
"inner_activation":
self._inner_activation,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self,
content_stream,
relative_position_encoding,
segment_matrix=None,
segment_embedding=None,
state=None,
content_attention_mask=None,
query_stream=None,
query_attention_mask=None,
target_mapping=None):
"""Implements call() for the layer.
Args:
content_stream: `Tensor`, the input content stream. This is the standard
input to Transformer XL and is commonly referred to as `h` in XLNet.
relative_position_encoding: Relative positional encoding `Tensor` of shape
`[B, L, dim]`.
segment_matrix: Optional `Tensor` of shape `[B, S, S + M]`. Used in XLNet,
but not in Transformer XL.
segment_embedding: Optional `Tensor` of shape `[2, num_heads, dim]`. Used
in XLNet, but not in Transformer XL.
state: Optional `Tensor` of shape `[B, M, E]`, where M is the length of
the state or memory. If passed, this is also attended over as in
Transformer XL.
content_attention_mask: Optional `Tensor` representing the mask that is
added to content attention logits. If state is not None, the mask source
sequence dimension should extend M.
query_stream: Optional `Tensor`, the query stream. This is introduced in
`TwoStreamRelativeAttention`/XLNet pretrainer. This is ignored if
`two_stream` is `False`.
query_attention_mask: Optional `Tensor` representing the mask that is
added to query attention logits. If state is not None, the mask source
sequence dimension should extend M.
target_mapping: Optional `Tensor` representing the target mapping when
calculating query attention.
Returns:
A tuple consisting of the attention output and the list of cached memory
states.
The attention output is `content_attention` if `two_stream` is `False`,
otherwise it is `query_attention`.
"""
new_mems = []
if state is None:
state = [None] * self._num_layers
for i in range(self._num_layers):
# cache new mems
new_mems.append(
_cache_memory(content_stream, state[i],
self._memory_length, self._reuse_length))
# segment bias
if segment_matrix is None:
segment_attention_bias = None
segment_encoding = None
else:
segment_attention_bias = (self.segment_attention_bias
if self._tie_attention_biases
else self.segment_attention_bias[i])
segment_encoding = segment_embedding[i]
content_attention_bias = (self.content_attention_bias
if self._tie_attention_biases
else self.content_attention_bias[i])
positional_attention_bias = (self.positional_attention_bias
if self._tie_attention_biases
else self.positional_attention_bias[i])
transformer_xl_layer = self.transformer_xl_layers[i]
transformer_xl_output = transformer_xl_layer(
content_stream=content_stream,
content_attention_bias=content_attention_bias,
positional_attention_bias=positional_attention_bias,
relative_position_encoding=relative_position_encoding,
segment_matrix=segment_matrix,
segment_encoding=segment_encoding,
segment_attention_bias=segment_attention_bias,
state=state[i],
content_attention_mask=content_attention_mask,
query_attention_mask=query_attention_mask,
query_stream=query_stream,
target_mapping=target_mapping)
content_stream = transformer_xl_output["content_attention"]
if self._two_stream:
query_stream = transformer_xl_output["query_attention"]
else:
query_stream = None
if self._two_stream:
output_stream = query_stream
else:
output_stream = content_stream
return output_stream, new_mems
| 22,180 | 38.538324 | 80 | py |
models | models-master/official/nlp/modeling/layers/masked_lm.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Masked language model network."""
# pylint: disable=g-classes-have-attributes
import tensorflow as tf
@tf.keras.utils.register_keras_serializable(package='Text')
class MaskedLM(tf.keras.layers.Layer):
"""Masked language model network head for BERT modeling.
This layer implements a masked language model based on the provided
transformer based encoder. It assumes that the encoder network being passed
has a "get_embedding_table()" method.
Example:
```python
encoder=modeling.networks.BertEncoder(...)
lm_layer=MaskedLM(embedding_table=encoder.get_embedding_table())
```
Args:
embedding_table: The embedding table from encoder network.
activation: The activation, if any, for the dense layer.
initializer: The initializer for the dense layer. Defaults to a Glorot
uniform initializer.
output: The output style for this layer. Can be either 'logits' or
'predictions'.
"""
def __init__(self,
embedding_table,
activation=None,
initializer='glorot_uniform',
output='logits',
name=None,
**kwargs):
super().__init__(name=name, **kwargs)
self.embedding_table = embedding_table
self.activation = activation
self.initializer = tf.keras.initializers.get(initializer)
if output not in ('predictions', 'logits'):
raise ValueError(
('Unknown `output` value "%s". `output` can be either "logits" or '
'"predictions"') % output)
self._output_type = output
def build(self, input_shape):
self._vocab_size, hidden_size = self.embedding_table.shape
self.dense = tf.keras.layers.Dense(
hidden_size,
activation=self.activation,
kernel_initializer=self.initializer,
name='transform/dense')
self.layer_norm = tf.keras.layers.LayerNormalization(
axis=-1, epsilon=1e-12, name='transform/LayerNorm')
self.bias = self.add_weight(
'output_bias/bias',
shape=(self._vocab_size,),
initializer='zeros',
trainable=True)
super().build(input_shape)
def call(self, sequence_data, masked_positions):
masked_lm_input = self._gather_indexes(sequence_data, masked_positions)
lm_data = self.dense(masked_lm_input)
lm_data = self.layer_norm(lm_data)
lm_data = tf.matmul(lm_data, self.embedding_table, transpose_b=True)
logits = tf.nn.bias_add(lm_data, self.bias)
masked_positions_length = masked_positions.shape.as_list()[1] or tf.shape(
masked_positions)[1]
logits = tf.reshape(logits,
[-1, masked_positions_length, self._vocab_size])
if self._output_type == 'logits':
return logits
return tf.nn.log_softmax(logits)
def get_config(self):
raise NotImplementedError('MaskedLM cannot be directly serialized because '
'it has variable sharing logic.')
def _gather_indexes(self, sequence_tensor, positions):
"""Gathers the vectors at the specific positions, for performance.
Args:
sequence_tensor: Sequence output of shape
(`batch_size`, `seq_length`, num_hidden) where num_hidden is number of
hidden units.
positions: Positions ids of tokens in sequence to mask for pretraining
of with dimension (batch_size, num_predictions) where
`num_predictions` is maximum number of tokens to mask out and predict
per each sequence.
Returns:
Masked out sequence tensor of shape (batch_size * num_predictions,
num_hidden).
"""
sequence_shape = tf.shape(sequence_tensor)
batch_size, seq_length = sequence_shape[0], sequence_shape[1]
width = sequence_tensor.shape.as_list()[2] or sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(
positions + tf.cast(flat_offsets, positions.dtype), [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
| 4,798 | 37.392 | 80 | py |
models | models-master/official/nlp/modeling/layers/bigbird_attention_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.projects.bigbird.attention."""
import tensorflow as tf
from official.nlp.modeling.layers import bigbird_attention as attention
class BigbirdAttentionTest(tf.test.TestCase):
def test_attention(self):
num_heads = 12
key_dim = 64
seq_length = 1024
batch_size = 2
block_size = 64
mask_layer = attention.BigBirdMasks(block_size=block_size)
encoder_inputs_mask = tf.zeros((batch_size, seq_length), dtype=tf.int32)
test_layer = attention.BigBirdAttention(
num_heads=num_heads,
key_dim=key_dim,
from_block_size=block_size,
to_block_size=block_size,
seed=0)
query = tf.random.normal(
shape=(batch_size, seq_length, key_dim))
masks = mask_layer(query, tf.cast(encoder_inputs_mask, dtype=tf.float64))
value = query
output = test_layer(
query=query,
value=value,
attention_mask=masks)
self.assertEqual(output.shape, [batch_size, seq_length, key_dim])
def test_config(self):
num_heads = 12
key_dim = 64
block_size = 64
test_layer = attention.BigBirdAttention(
num_heads=num_heads,
key_dim=key_dim,
from_block_size=block_size,
to_block_size=block_size,
seed=0)
print(test_layer.get_config())
new_layer = attention.BigBirdAttention.from_config(
test_layer.get_config())
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(test_layer.get_config(), new_layer.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,196 | 31.308824 | 79 | py |
models | models-master/official/nlp/modeling/layers/attention_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the attention layer."""
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import attention
def _create_cache(batch_size, init_decode_length, num_heads, head_size):
return {
"key":
tf.zeros([batch_size, init_decode_length, num_heads, head_size],
dtype=tf.float32),
"value":
tf.zeros([batch_size, init_decode_length, num_heads, head_size],
dtype=tf.float32)
}
class CachedAttentionTest(tf.test.TestCase):
def test_masked_attention(self):
"""Test with a mask tensor."""
num_heads, head_size = 2, 2
# Create a 3-dimensional input (the first dimension is implicit).
from_seq_length = 4
batch_size = 3
# GPU/CPU case.
init_decode_length = 0
# Directly tests the keras layer.
cache = _create_cache(batch_size, init_decode_length, num_heads, head_size)
layer = attention.CachedAttention(num_heads=num_heads, key_dim=head_size)
# Generate data for the input (non-mask) tensors.
from_data = tf.zeros((batch_size, from_seq_length, 8), dtype=np.float32)
# Invoke the data with a random set of mask data. This should mask at least
# one element.
mask_data = np.random.randint(
2, size=(batch_size, from_seq_length, from_seq_length))
masked_output_data, cache = layer(
query=from_data, value=from_data, attention_mask=mask_data, cache=cache)
self.assertEqual(masked_output_data.shape, (3, 4, 8))
self.assertEqual(cache["value"].shape, (3, 4, 2, 2))
# Tests inputs without cache.
masked_output_data, cache = layer(
query=from_data, value=from_data, attention_mask=mask_data)
self.assertEqual(masked_output_data.shape, (3, 4, 8))
self.assertIsNone(cache)
def test_padded_decode(self):
"""Test with a mask tensor."""
num_heads, head_size = 2, 2
from_seq_length = 4
# TPU decoding should pre-allocate the entire sequence.
batch_size = 3
init_decode_length = from_seq_length
# Directly tests the keras layer.
cache = _create_cache(batch_size, init_decode_length, num_heads, head_size)
layer = attention.CachedAttention(num_heads=num_heads, key_dim=head_size)
# Generate data for the input (non-mask) tensors.
from_data = tf.zeros((batch_size, from_seq_length, 8), dtype=np.float32)
decode_loop_step = 2
mask_data = np.random.randint(
2, size=(batch_size, from_seq_length, from_seq_length), dtype=np.int32)
# Testing the invocation directly as Keras cannot consume inputs correctly.
masked_output_data, cache = layer(
query=from_data,
value=from_data,
attention_mask=mask_data,
cache=cache,
decode_loop_step=decode_loop_step)
self.assertEqual(masked_output_data.shape, (3, 4, 8))
self.assertEqual(cache["value"].shape, (3, 4, 2, 2))
if __name__ == "__main__":
tf.test.main()
| 3,526 | 36.126316 | 80 | py |
models | models-master/official/nlp/modeling/layers/tn_transformer_expand_condense.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TN-BERT TNTransformerExpandCondense employing Expand-Condense layer instead of Dense."""
# pylint: disable=g-classes-have-attributes
# Import libraries
import gin
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling.layers.tn_expand_condense import TNExpandCondense
@tf.keras.utils.register_keras_serializable(package="Text")
@gin.configurable
class TNTransformerExpandCondense(tf.keras.layers.Layer):
"""Transformer layer using tensor network Expand-Condense layer.
This layer implements the Transformer from transformer.py, with a single
tensor network layer replacing the usual intermediate and output Dense
layers.
Args:
num_attention_heads: Number of attention heads.
intermediate_size: Size of the intermediate layer.
intermediate_activation: Activation for the intermediate layer.
dropout_rate: Dropout probability for the post-attention and output dropout.
attention_dropout_rate: Dropout probability for within the attention layer.
output_range: the sequence output range, [0, output_range) by slicing the
target sequence. `None` means the target sequence is not sliced.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
use_bias: Whether to enable use_bias in attention layer. If set to False,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate dense
layers. If set False, output of attention and intermediate dense layers is
normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
intermediate_dropout: Dropout probability for intermediate_dropout_layer.
attention_initializer: Initializer for kernels of attention layers. If set
`None`, attention layers use kernel_initializer as initializer for kernel.
"""
def __init__(self,
num_attention_heads,
intermediate_size,
intermediate_activation,
dropout_rate=0.0,
attention_dropout_rate=0.0,
output_range=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_bias=True,
norm_first=False,
norm_epsilon=1e-12,
intermediate_dropout=0.0,
attention_initializer=None,
**kwargs):
super().__init__(**kwargs)
self._num_heads = num_attention_heads
self._intermediate_size = intermediate_size
self._intermediate_activation = intermediate_activation
self._attention_dropout_rate = attention_dropout_rate
self._dropout_rate = dropout_rate
self._output_range = output_range
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._intermediate_dropout = intermediate_dropout
if attention_initializer:
self._attention_initializer = tf.keras.initializers.get(
attention_initializer)
else:
self._attention_initializer = tf_utils.clone_initializer(
self._kernel_initializer)
def build(self, input_shape):
input_tensor = input_shape[0] if len(input_shape) == 2 else input_shape
input_tensor_shape = tf.TensorShape(input_tensor)
if len(input_tensor_shape.as_list()) != 3:
raise ValueError(
"TNTransformerExpandCondense expects a three-dimensional input of "
"shape [batch, sequence, width].")
batch_size, sequence_length, hidden_size = input_tensor_shape
if len(input_shape) == 2:
mask_tensor_shape = tf.TensorShape(input_shape[1])
expected_mask_tensor_shape = tf.TensorShape(
[batch_size, sequence_length, sequence_length])
if not expected_mask_tensor_shape.is_compatible_with(mask_tensor_shape):
raise ValueError(
"When passing a mask tensor to TNTransformerExpandCondense, the "
"mask tensor must be of shape [batch, "
"sequence_length, sequence_length] (here %s). Got a "
"mask tensor of shape %s." %
(expected_mask_tensor_shape, mask_tensor_shape))
if hidden_size % self._num_heads != 0:
raise ValueError(
"The input size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, self._num_heads))
self._attention_head_size = int(hidden_size // self._num_heads)
common_kwargs = dict(
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
self._attention_layer = tf.keras.layers.MultiHeadAttention(
num_heads=self._num_heads,
key_dim=self._attention_head_size,
dropout=self._attention_dropout_rate,
use_bias=self._use_bias,
kernel_initializer=self._attention_initializer,
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
name="self_attention",
**common_kwargs)
self._attention_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
# Use float32 in layernorm for numeric stability.
# It is probably safe in mixed_float16, but we haven't validated this yet.
self._attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32))
# Substitute Dense layers with a single Expand-Condense layer.
self._output_dense = TNExpandCondense(
4,
use_bias=True,
activation=self._intermediate_activation,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer)
self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
# Use float32 in layernorm for numeric stability.
self._output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32)
super().build(input_shape)
def get_config(self):
config = {
"num_attention_heads":
self._num_heads,
"intermediate_size":
self._intermediate_size,
"intermediate_activation":
self._intermediate_activation,
"dropout_rate":
self._dropout_rate,
"attention_dropout_rate":
self._attention_dropout_rate,
"output_range":
self._output_range,
"kernel_initializer":
tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer":
tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer":
tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer":
tf.keras.regularizers.serialize(self._bias_regularizer),
"activity_regularizer":
tf.keras.regularizers.serialize(self._activity_regularizer),
"kernel_constraint":
tf.keras.constraints.serialize(self._kernel_constraint),
"bias_constraint":
tf.keras.constraints.serialize(self._bias_constraint),
"use_bias":
self._use_bias,
"norm_first":
self._norm_first,
"norm_epsilon":
self._norm_epsilon,
"intermediate_dropout":
self._intermediate_dropout,
"attention_initializer":
tf.keras.initializers.serialize(self._attention_initializer)
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
if isinstance(inputs, (list, tuple)) and len(inputs) == 2:
input_tensor, attention_mask = inputs
else:
input_tensor, attention_mask = (inputs, None)
if self._output_range:
target_tensor = input_tensor[:, 0:self._output_range, :]
attention_mask = attention_mask[:, 0:self._output_range, :]
else:
if self._norm_first:
source_tensor = input_tensor
input_tensor = self._attention_layer_norm(input_tensor)
target_tensor = input_tensor
attention_output = self._attention_layer(
query=target_tensor, value=input_tensor, attention_mask=attention_mask)
attention_output = self._attention_dropout(attention_output)
if self._norm_first:
attention_output = source_tensor + attention_output
else:
attention_output = self._attention_layer_norm(target_tensor +
attention_output)
if self._norm_first:
source_attention_output = attention_output
attention_output = self._output_layer_norm(attention_output)
layer_output = self._output_dense(attention_output)
layer_output = self._output_dropout(layer_output)
# During mixed precision training, attention_output is from layer norm and
# is always fp32 for now. Cast layer_output to fp32 for the subsequent
# add.
layer_output = tf.cast(layer_output, tf.float32)
if self._norm_first:
layer_output = source_attention_output + layer_output
else:
layer_output = self._output_layer_norm(layer_output + attention_output)
return layer_output
| 11,022 | 42.058594 | 91 | py |
models | models-master/official/nlp/modeling/layers/tn_expand_condense.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ExpandCondense tensor network layer used in TN-BERT."""
# pylint: disable=g-classes-have-attributes
from typing import List, Optional, Text, Any, Dict
import tensorflow as tf
from official.modeling import tf_utils
Layer = tf.keras.layers.Layer
activations = tf.keras.activations
initializers = tf.keras.initializers
@tf.keras.utils.register_keras_serializable(package='Text')
class TNExpandCondense(Layer):
"""A TPU-optimized TensorNetwork layer.
Designed for use in models that currently use Dense layers to achieve
up projection followed by down projection.
This layer is a TPU-optimized combination of 3 operations:
Expand, Apply Activation, and Condense. The layer projects up from
`input_shape[-1]` to `input_shape[-1] * proj_multiplier`, applies
`self.activation`, and then condenses back to `input_shape[-1]`.
Note the input shape and output shape will be identical.
Args:
proj_multiplier: Positive integer, multiple of `input_shape[-1]` to project
up to. Must be one of `[2, 4, 6, 8]`.
use_bias: Boolean, whether the layer uses a bias vector.
activation: Activation function to use between Expand and Condense. If you
don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
kernel_initializer: Initializer for the weight matrices.
bias_initializer: Initializer for the bias vector.
Input shape:
N-D tensor with shape: `(batch_size, ..., input_shape[-1])`.
Output shape:
N-D tensor with shape: `(batch_size, ..., input_shape[-1])`.
"""
def __init__(self,
proj_multiplier: int,
use_bias: Optional[bool] = True,
activation: Optional[Text] = 'relu',
kernel_initializer: Optional[Text] = 'glorot_uniform',
bias_initializer: Optional[Text] = 'zeros',
**kwargs) -> None:
# Allow specification of input_dim instead of input_shape,
# for compatability with Keras layers that support this
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super().__init__(**kwargs)
assert proj_multiplier in [
2, 4, 6, 8, 10, 12
], 'proj_multiplier needs to be one of [2, 4, 6, 8, 10, 12]'
self.proj_multiplier = proj_multiplier
self.use_bias = use_bias
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
def build(self, input_shape: List[int]) -> None:
# Disable the attribute-defined-outside-init violations in this function
# pylint: disable=attribute-defined-outside-init
if input_shape[-1] is None:
raise ValueError(
'The last dimension of the inputs to `TNExpandCondense` '
'should be defined. Found `None`.')
super().build(input_shape)
self.proj_size = self.proj_multiplier * input_shape[-1]
assert (self.proj_size // input_shape[-1]) * input_shape[
-1] == self.proj_size, (f'{self.proj_size} / {input_shape[-1]} must be '
f'round')
assert (input_shape[-1] // 128
) * 128 == input_shape[-1], f'{input_shape[-1]} / 128 must be round'
self.w1 = self.add_weight(
name='w1',
shape=(input_shape[-1], input_shape[-1]),
trainable=True,
initializer=tf_utils.clone_initializer(self.kernel_initializer))
self.w2 = self.add_weight(
name='w2',
shape=(128, (128 * (self.proj_size // input_shape[-1]))),
trainable=True,
initializer=tf_utils.clone_initializer(self.kernel_initializer))
self.w3 = self.add_weight(
name='w3',
shape=(128 * (self.proj_size // input_shape[-1]), 128),
trainable=True,
initializer=tf_utils.clone_initializer(self.kernel_initializer))
self.w4 = self.add_weight(
name='w4',
shape=(input_shape[-1] // 128, 128, input_shape[-1]),
trainable=True,
initializer=tf_utils.clone_initializer(self.kernel_initializer))
if self.use_bias:
self.bias = self.add_weight(
name='b',
shape=(input_shape[-1] // 128, 1,
128 * (self.proj_size // input_shape[-1])),
trainable=True,
initializer=self.bias_initializer)
else:
self.bias = None
def call(self, inputs: tf.Tensor, **kwargs):
orig_shape = tf.shape(inputs)
input_dim = inputs.shape[-1]
tmp = tf.reshape(inputs, (-1, input_dim))
# Shape is (BatchSeq, input_dim)
# Expansion network
tmp = tf.einsum('ab,Qb->aQ', self.w1, tmp)
# Note: Letter Q will always represent the BatchSeq axis.
tmp = tf.reshape(tmp, (input_dim // 128, 128, -1))
tmp = tf.einsum('abQ,bd->aQd', tmp, self.w2)
# Apply activation and then Condense
tmp = self.activation(tmp + self.bias)
tmp = tf.einsum('aQd,db->aQb', tmp, self.w3)
tmp = tf.einsum('aQb,abd->Qd', tmp, self.w4)
out = tf.reshape(tmp, orig_shape)
return out
def compute_output_shape(self, input_shape: List[int]) -> List[int]:
return input_shape
def get_config(self) -> Dict[Any, Any]:
"""Returns the config of the layer.
The same layer can be reinstantiated later
(without its trained weights) from this configuration.
Returns:
Python dictionary containing the configuration of the layer.
"""
config = {}
# Include the layer-specific arguments
args = ['proj_multiplier', 'use_bias']
for arg in args:
config[arg] = getattr(self, arg)
# Serialize the activation
config['activation'] = activations.serialize(getattr(self, 'activation'))
# Serialize the initializers
decomp_initializers = ['kernel_initializer', 'bias_initializer']
for initializer_arg in decomp_initializers:
config[initializer_arg] = initializers.serialize(
getattr(self, initializer_arg))
# Get base config
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| 6,703 | 35.63388 | 80 | py |
models | models-master/official/nlp/modeling/layers/mobile_bert_layers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MobileBERT embedding and transformer layers."""
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling.layers import on_device_embedding
from official.nlp.modeling.layers import position_embedding
@tf.keras.utils.register_keras_serializable(package='Text')
class NoNorm(tf.keras.layers.Layer):
"""Apply element-wise linear transformation to the last dimension."""
def __init__(self, name=None):
super().__init__(name=name)
def build(self, shape):
kernal_size = shape[-1]
self.bias = self.add_weight('beta',
shape=[kernal_size],
initializer='zeros')
self.scale = self.add_weight('gamma',
shape=[kernal_size],
initializer='ones')
def call(self, feature):
output = feature * self.scale + self.bias
return output
def _get_norm_layer(normalization_type='no_norm', name=None):
"""Get normlization layer.
Args:
normalization_type: String. The type of normalization_type, only
`no_norm` and `layer_norm` are supported.
name: Name for the norm layer.
Returns:
layer norm class.
"""
if normalization_type == 'no_norm':
layer = NoNorm(name=name)
elif normalization_type == 'layer_norm':
layer = tf.keras.layers.LayerNormalization(
name=name,
axis=-1,
epsilon=1e-12,
dtype=tf.float32)
else:
raise NotImplementedError('Only "no_norm" and "layer_norm" and supported.')
return layer
@tf.keras.utils.register_keras_serializable(package='Text')
class MobileBertEmbedding(tf.keras.layers.Layer):
"""Performs an embedding lookup for MobileBERT.
This layer includes word embedding, token type embedding, position embedding.
"""
def __init__(self,
word_vocab_size,
word_embed_size,
type_vocab_size,
output_embed_size,
max_sequence_length=512,
normalization_type='no_norm',
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
dropout_rate=0.1,
**kwargs):
"""Class initialization.
Args:
word_vocab_size: Number of words in the vocabulary.
word_embed_size: Word embedding size.
type_vocab_size: Number of word types.
output_embed_size: Embedding size for the final embedding output.
max_sequence_length: Maximum length of input sequence.
normalization_type: String. The type of normalization_type, only
`no_norm` and `layer_norm` are supported.
initializer: The initializer to use for the embedding weights and
linear projection weights.
dropout_rate: Dropout rate.
**kwargs: keyword arguments.
"""
super().__init__(**kwargs)
self.word_vocab_size = word_vocab_size
self.word_embed_size = word_embed_size
self.type_vocab_size = type_vocab_size
self.output_embed_size = output_embed_size
self.max_sequence_length = max_sequence_length
self.normalization_type = normalization_type
self.initializer = tf.keras.initializers.get(initializer)
self.dropout_rate = dropout_rate
self.word_embedding = on_device_embedding.OnDeviceEmbedding(
self.word_vocab_size,
self.word_embed_size,
initializer=tf_utils.clone_initializer(self.initializer),
name='word_embedding')
self.type_embedding = on_device_embedding.OnDeviceEmbedding(
self.type_vocab_size,
self.output_embed_size,
initializer=tf_utils.clone_initializer(self.initializer),
name='type_embedding')
self.pos_embedding = position_embedding.PositionEmbedding(
max_length=max_sequence_length,
initializer=tf_utils.clone_initializer(self.initializer),
name='position_embedding')
self.word_embedding_proj = tf.keras.layers.EinsumDense(
'abc,cd->abd',
output_shape=[None, self.output_embed_size],
kernel_initializer=tf_utils.clone_initializer(self.initializer),
bias_axes='d',
name='embedding_projection')
self.layer_norm = _get_norm_layer(normalization_type, 'embedding_norm')
self.dropout_layer = tf.keras.layers.Dropout(
self.dropout_rate,
name='embedding_dropout')
def get_config(self):
config = {
'word_vocab_size': self.word_vocab_size,
'word_embed_size': self.word_embed_size,
'type_vocab_size': self.type_vocab_size,
'output_embed_size': self.output_embed_size,
'max_sequence_length': self.max_sequence_length,
'normalization_type': self.normalization_type,
'initializer': tf.keras.initializers.serialize(self.initializer),
'dropout_rate': self.dropout_rate
}
base_config = super(MobileBertEmbedding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, input_ids, token_type_ids=None):
word_embedding_out = self.word_embedding(input_ids)
word_embedding_out = tf.concat(
[tf.pad(word_embedding_out[:, 1:], ((0, 0), (0, 1), (0, 0))),
word_embedding_out,
tf.pad(word_embedding_out[:, :-1], ((0, 0), (1, 0), (0, 0)))],
axis=2)
word_embedding_out = self.word_embedding_proj(word_embedding_out)
pos_embedding_out = self.pos_embedding(word_embedding_out)
embedding_out = word_embedding_out + pos_embedding_out
if token_type_ids is not None:
type_embedding_out = self.type_embedding(token_type_ids)
embedding_out += type_embedding_out
embedding_out = self.layer_norm(embedding_out)
embedding_out = self.dropout_layer(embedding_out)
return embedding_out
@tf.keras.utils.register_keras_serializable(package='Text')
class MobileBertTransformer(tf.keras.layers.Layer):
"""Transformer block for MobileBERT.
An implementation of one layer (block) of Transformer with bottleneck and
inverted-bottleneck for MobilerBERT.
Original paper for MobileBERT:
https://arxiv.org/pdf/2004.02984.pdf
"""
def __init__(self,
hidden_size=512,
num_attention_heads=4,
intermediate_size=512,
intermediate_act_fn='relu',
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
intra_bottleneck_size=128,
use_bottleneck_attention=False,
key_query_shared_bottleneck=True,
num_feedforward_networks=4,
normalization_type='no_norm',
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
**kwargs):
"""Class initialization.
Args:
hidden_size: Hidden size for the Transformer input and output tensor.
num_attention_heads: Number of attention heads in the Transformer.
intermediate_size: The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: Dropout probability for the hidden layers.
attention_probs_dropout_prob: Dropout probability of the attention
probabilities.
intra_bottleneck_size: Size of bottleneck.
use_bottleneck_attention: Use attention inputs from the bottleneck
transformation. If true, the following `key_query_shared_bottleneck`
will be ignored.
key_query_shared_bottleneck: Whether to share linear transformation for
keys and queries.
num_feedforward_networks: Number of stacked feed-forward networks.
normalization_type: The type of normalization_type, only `no_norm` and
`layer_norm` are supported. `no_norm` represents the element-wise
linear transformation for the student model, as suggested by the
original MobileBERT paper. `layer_norm` is used for the teacher model.
initializer: The initializer to use for the embedding weights and
linear projection weights.
**kwargs: keyword arguments.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.intermediate_act_fn = intermediate_act_fn
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.intra_bottleneck_size = intra_bottleneck_size
self.use_bottleneck_attention = use_bottleneck_attention
self.key_query_shared_bottleneck = key_query_shared_bottleneck
self.num_feedforward_networks = num_feedforward_networks
self.normalization_type = normalization_type
self.initializer = tf.keras.initializers.get(initializer)
if intra_bottleneck_size % num_attention_heads != 0:
raise ValueError(
(f'The bottleneck size {intra_bottleneck_size} is not a multiple '
f'of the number of attention heads {num_attention_heads}.'))
attention_head_size = int(intra_bottleneck_size / num_attention_heads)
self.block_layers = {}
# add input bottleneck
dense_layer_2d = tf.keras.layers.EinsumDense(
'abc,cd->abd',
output_shape=[None, self.intra_bottleneck_size],
bias_axes='d',
kernel_initializer=tf_utils.clone_initializer(self.initializer),
name='bottleneck_input/dense')
layer_norm = _get_norm_layer(self.normalization_type,
name='bottleneck_input/norm')
self.block_layers['bottleneck_input'] = [dense_layer_2d,
layer_norm]
if self.key_query_shared_bottleneck:
dense_layer_2d = tf.keras.layers.EinsumDense(
'abc,cd->abd',
output_shape=[None, self.intra_bottleneck_size],
bias_axes='d',
kernel_initializer=tf_utils.clone_initializer(self.initializer),
name='kq_shared_bottleneck/dense')
layer_norm = _get_norm_layer(self.normalization_type,
name='kq_shared_bottleneck/norm')
self.block_layers['kq_shared_bottleneck'] = [dense_layer_2d,
layer_norm]
# add attention layer
attention_layer = tf.keras.layers.MultiHeadAttention(
num_heads=self.num_attention_heads,
key_dim=attention_head_size,
value_dim=attention_head_size,
dropout=self.attention_probs_dropout_prob,
output_shape=self.intra_bottleneck_size,
kernel_initializer=tf_utils.clone_initializer(self.initializer),
name='attention')
layer_norm = _get_norm_layer(self.normalization_type,
name='attention/norm')
self.block_layers['attention'] = [attention_layer,
layer_norm]
# add stacked feed-forward networks
self.block_layers['ffn'] = []
for ffn_layer_idx in range(self.num_feedforward_networks):
layer_prefix = f'ffn_layer_{ffn_layer_idx}'
layer_name = layer_prefix + '/intermediate_dense'
intermediate_layer = tf.keras.layers.EinsumDense(
'abc,cd->abd',
activation=self.intermediate_act_fn,
output_shape=[None, self.intermediate_size],
bias_axes='d',
kernel_initializer=tf_utils.clone_initializer(self.initializer),
name=layer_name)
layer_name = layer_prefix + '/output_dense'
output_layer = tf.keras.layers.EinsumDense(
'abc,cd->abd',
output_shape=[None, self.intra_bottleneck_size],
bias_axes='d',
kernel_initializer=tf_utils.clone_initializer(self.initializer),
name=layer_name)
layer_name = layer_prefix + '/norm'
layer_norm = _get_norm_layer(self.normalization_type,
name=layer_name)
self.block_layers['ffn'].append([intermediate_layer,
output_layer,
layer_norm])
# add output bottleneck
bottleneck = tf.keras.layers.EinsumDense(
'abc,cd->abd',
output_shape=[None, self.hidden_size],
activation=None,
bias_axes='d',
kernel_initializer=tf_utils.clone_initializer(self.initializer),
name='bottleneck_output/dense')
dropout_layer = tf.keras.layers.Dropout(
self.hidden_dropout_prob,
name='bottleneck_output/dropout')
layer_norm = _get_norm_layer(self.normalization_type,
name='bottleneck_output/norm')
self.block_layers['bottleneck_output'] = [bottleneck,
dropout_layer,
layer_norm]
def get_config(self):
config = {
'hidden_size': self.hidden_size,
'num_attention_heads': self.num_attention_heads,
'intermediate_size': self.intermediate_size,
'intermediate_act_fn': self.intermediate_act_fn,
'hidden_dropout_prob': self.hidden_dropout_prob,
'attention_probs_dropout_prob': self.attention_probs_dropout_prob,
'intra_bottleneck_size': self.intra_bottleneck_size,
'use_bottleneck_attention': self.use_bottleneck_attention,
'key_query_shared_bottleneck': self.key_query_shared_bottleneck,
'num_feedforward_networks': self.num_feedforward_networks,
'normalization_type': self.normalization_type,
'initializer': tf.keras.initializers.serialize(self.initializer),
}
base_config = super(MobileBertTransformer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self,
input_tensor,
attention_mask=None,
return_attention_scores=False):
"""Implementes the forward pass.
Args:
input_tensor: Float tensor of shape
`(batch_size, seq_length, hidden_size)`.
attention_mask: (optional) int32 tensor of shape
`(batch_size, seq_length, seq_length)`, with 1 for positions that can
be attended to and 0 in positions that should not be.
return_attention_scores: If return attention score.
Returns:
layer_output: Float tensor of shape
`(batch_size, seq_length, hidden_size)`.
attention_scores (Optional): Only when return_attention_scores is True.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
input_width = input_tensor.shape.as_list()[-1]
if input_width != self.hidden_size:
raise ValueError(
(f'The width of the input tensor {input_width} != '
f'hidden size {self.hidden_size}'))
prev_output = input_tensor
# input bottleneck
dense_layer = self.block_layers['bottleneck_input'][0]
layer_norm = self.block_layers['bottleneck_input'][1]
layer_input = dense_layer(prev_output)
layer_input = layer_norm(layer_input)
if self.use_bottleneck_attention:
key_tensor = layer_input
query_tensor = layer_input
value_tensor = layer_input
elif self.key_query_shared_bottleneck:
dense_layer = self.block_layers['kq_shared_bottleneck'][0]
layer_norm = self.block_layers['kq_shared_bottleneck'][1]
shared_attention_input = dense_layer(prev_output)
shared_attention_input = layer_norm(shared_attention_input)
key_tensor = shared_attention_input
query_tensor = shared_attention_input
value_tensor = prev_output
else:
key_tensor = prev_output
query_tensor = prev_output
value_tensor = prev_output
# attention layer
attention_layer = self.block_layers['attention'][0]
layer_norm = self.block_layers['attention'][1]
attention_output, attention_scores = attention_layer(
query_tensor,
value_tensor,
key_tensor,
attention_mask,
return_attention_scores=True,
)
attention_output = layer_norm(attention_output + layer_input)
# stacked feed-forward networks
layer_input = attention_output
for ffn_idx in range(self.num_feedforward_networks):
intermediate_layer = self.block_layers['ffn'][ffn_idx][0]
output_layer = self.block_layers['ffn'][ffn_idx][1]
layer_norm = self.block_layers['ffn'][ffn_idx][2]
intermediate_output = intermediate_layer(layer_input)
layer_output = output_layer(intermediate_output)
layer_output = layer_norm(layer_output + layer_input)
layer_input = layer_output
# output bottleneck
bottleneck = self.block_layers['bottleneck_output'][0]
dropout_layer = self.block_layers['bottleneck_output'][1]
layer_norm = self.block_layers['bottleneck_output'][2]
layer_output = bottleneck(layer_output)
layer_output = dropout_layer(layer_output)
layer_output = layer_norm(layer_output + prev_output)
if return_attention_scores:
return layer_output, attention_scores
else:
return layer_output
@tf.keras.utils.register_keras_serializable(package='Text')
class MobileBertMaskedLM(tf.keras.layers.Layer):
"""Masked language model network head for BERT modeling.
This layer implements a masked language model based on the provided
transformer based encoder. It assumes that the encoder network being passed
has a "get_embedding_table()" method. Different from canonical BERT's masked
LM layer, when the embedding width is smaller than hidden_size, it adds an
extra output weights in shape [vocab_size, (hidden_size - embedding_width)].
"""
def __init__(self,
embedding_table,
activation=None,
initializer='glorot_uniform',
output='logits',
output_weights_use_proj=False,
**kwargs):
"""Class initialization.
Args:
embedding_table: The embedding table from encoder network.
activation: The activation, if any, for the dense layer.
initializer: The initializer for the dense layer. Defaults to a Glorot
uniform initializer.
output: The output style for this layer. Can be either `logits` or
`predictions`.
output_weights_use_proj: Use projection instead of concating extra output
weights, this may reduce the MLM task accuracy but will reduce the model
params as well.
**kwargs: keyword arguments.
"""
super().__init__(**kwargs)
self.embedding_table = embedding_table
self.activation = activation
self.initializer = tf.keras.initializers.get(initializer)
if output not in ('predictions', 'logits'):
raise ValueError(
('Unknown `output` value "%s". `output` can be either "logits" or '
'"predictions"') % output)
self._output_type = output
self._output_weights_use_proj = output_weights_use_proj
def build(self, input_shape):
self._vocab_size, embedding_width = self.embedding_table.shape
hidden_size = input_shape[-1]
self.dense = tf.keras.layers.Dense(
hidden_size,
activation=self.activation,
kernel_initializer=tf_utils.clone_initializer(self.initializer),
name='transform/dense')
if hidden_size > embedding_width:
if self._output_weights_use_proj:
self.extra_output_weights = self.add_weight(
'output_weights_proj',
shape=(embedding_width, hidden_size),
initializer=tf_utils.clone_initializer(self.initializer),
trainable=True)
else:
self.extra_output_weights = self.add_weight(
'extra_output_weights',
shape=(self._vocab_size, hidden_size - embedding_width),
initializer=tf_utils.clone_initializer(self.initializer),
trainable=True)
elif hidden_size == embedding_width:
self.extra_output_weights = None
else:
raise ValueError(
'hidden size %d cannot be smaller than embedding width %d.' %
(hidden_size, embedding_width))
self.layer_norm = tf.keras.layers.LayerNormalization(
axis=-1, epsilon=1e-12, name='transform/LayerNorm')
self.bias = self.add_weight(
'output_bias/bias',
shape=(self._vocab_size,),
initializer='zeros',
trainable=True)
super(MobileBertMaskedLM, self).build(input_shape)
def call(self, sequence_data, masked_positions):
masked_lm_input = self._gather_indexes(sequence_data, masked_positions)
lm_data = self.dense(masked_lm_input)
lm_data = self.layer_norm(lm_data)
if self.extra_output_weights is None:
lm_data = tf.matmul(lm_data, self.embedding_table, transpose_b=True)
else:
if self._output_weights_use_proj:
lm_data = tf.matmul(
lm_data, self.extra_output_weights, transpose_b=True)
lm_data = tf.matmul(lm_data, self.embedding_table, transpose_b=True)
else:
lm_data = tf.matmul(
lm_data,
tf.concat([self.embedding_table, self.extra_output_weights],
axis=1),
transpose_b=True)
logits = tf.nn.bias_add(lm_data, self.bias)
masked_positions_length = masked_positions.shape.as_list()[1] or tf.shape(
masked_positions)[1]
logits = tf.reshape(logits,
[-1, masked_positions_length, self._vocab_size])
if self._output_type == 'logits':
return logits
return tf.nn.log_softmax(logits)
def get_config(self):
raise NotImplementedError('MaskedLM cannot be directly serialized because '
'it has variable sharing logic.')
def _gather_indexes(self, sequence_tensor, positions):
"""Gathers the vectors at the specific positions.
Args:
sequence_tensor: Sequence output of `BertModel` layer of shape
`(batch_size, seq_length, num_hidden)` where `num_hidden` is number of
hidden units of `BertModel` layer.
positions: Positions ids of tokens in sequence to mask for pretraining
of with dimension `(batch_size, num_predictions)` where
`num_predictions` is maximum number of tokens to mask out and predict
per each sequence.
Returns:
Masked out sequence tensor of shape
`(batch_size * num_predictions, num_hidden)`.
"""
sequence_shape = tf.shape(sequence_tensor)
batch_size, seq_length = sequence_shape[0], sequence_shape[1]
width = sequence_tensor.shape.as_list()[2] or sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
| 23,471 | 39.75 | 80 | py |
models | models-master/official/nlp/modeling/layers/transformer_scaffold.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based transformer scaffold layer."""
# pylint: disable=g-classes-have-attributes
from absl import logging
import gin
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling.layers import attention
from official.nlp.modeling.layers import util
@tf.keras.utils.register_keras_serializable(package="Text")
@gin.configurable
class TransformerScaffold(tf.keras.layers.Layer):
"""Transformer scaffold layer.
This layer implements the Transformer from "Attention Is All You Need".
(https://arxiv.org/abs/1706.03762), with a customizable attention layer and
feedforward layer option. Users can pass a class to
`attention_cls`/`feedforward_cls` and associated config to
`attention_cfg`/`feedforward_cfg`, in which case the scaffold will
instantiate the class with the config, or pass a class instance to
`attention_cls`/`feedforward_cls`.
Args:
num_attention_heads: Number of attention heads.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network.
attention_cls: A class to instantiate attention layer, or a layer instance.
attention_cfg: The config with which to instantiate `attention_cls`. Ignored
if attention_cls is a layer instance or None. If `attention_cls` is a
class, but `attention_cfg` is None, following kwargs will be used to
instantiate the attention instance: {
"num_heads": num_attention_heads,
"key_dim": int(hidden_size // num_attention_heads),
"dropout": attention_dropout_rate,
"name": "self_attention" }, where `hidden_size` is the input tensor's
last dimension.
feedforward_cls: A class to instantiate feedforward layer, or a layer
instance. If None, will use the standard feedforward layer as described in
"Attention Is All You Need" paper. If not None, the instantiated
feedforward layer is expected to take the output of attention as input and
its output is this transformer layer's output.
feedforward_cfg: The config with which to instantiate `feedforward_cls`.
Ignored if feedforward_cls is a layer instance or is None. If
`feedforward_cls` is a class, but `feedforward_cfg` is None, following
kwargs will be used to instantiate the feedforward instance: {
"inner_dim": inner_dim,
"inner_activation": inner_activation,
"dropout": dropout_rate,
"name": "feedforward" }.
dropout_rate: Dropout probability for the post-attention and output dropout.
attention_dropout_rate: Dropout probability for within the attention layer.
norm_first: Whether to normalize inputs to attention and intermediate
dense layers. If set False, output of attention and intermediate dense
layers is normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
"""
def __init__(self,
num_attention_heads,
inner_dim=768,
inner_activation=tf_utils.get_activation("gelu"),
attention_cls=attention.MultiHeadAttention,
attention_cfg=None,
feedforward_cls=None,
feedforward_cfg=None,
dropout_rate=0.0,
attention_dropout_rate=0.0,
norm_first=False,
norm_epsilon=1e-12,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
inner_dim = kwargs.pop("intermediate_size", inner_dim)
inner_activation = kwargs.pop("inner_activation", inner_activation)
util.filter_kwargs(kwargs)
super().__init__(**kwargs)
self._attention_cfg = attention_cfg
self._attention_cls = attention_cls
self._feedforward_cls = feedforward_cls
self._feedforward_cfg = feedforward_cfg
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._num_heads = num_attention_heads
self._inner_dim = inner_dim
self._inner_activation = inner_activation
self._attention_dropout_rate = attention_dropout_rate
self._dropout_rate = dropout_rate
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
def build(self, input_shape):
if isinstance(input_shape, tf.TensorShape):
input_tensor_shape = input_shape
elif isinstance(input_shape, (list, tuple)):
input_tensor_shape = tf.TensorShape(input_shape[0])
else:
raise ValueError(
"The type of input shape argument is not supported, got: %s" %
type(input_shape))
if len(input_tensor_shape.as_list()) != 3:
raise ValueError(
"TransformerScaffold expects a three-dimensional input of "
"shape [batch, sequence, width].")
hidden_size = input_tensor_shape[-1]
if hidden_size % self._num_heads != 0:
raise ValueError(
"The input size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, self._num_heads))
self._attention_head_size = int(hidden_size // self._num_heads)
common_kwargs = dict(
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
def get_layer_instance(instance_or_cls, config, default_config):
if isinstance(instance_or_cls, tf.keras.layers.Layer):
return instance_or_cls
elif isinstance(instance_or_cls, dict):
return get_layer_instance(
tf.keras.utils.deserialize_keras_object(instance_or_cls),
config,
default_config,
)
else:
if config is None:
return instance_or_cls(**default_config)
else:
return instance_or_cls(**config)
default_attention_cfg = {
"kernel_initializer": tf_utils.clone_initializer(
self._kernel_initializer),
"bias_initializer": tf_utils.clone_initializer(self._bias_initializer),
"num_heads": self._num_heads,
"key_dim": self._attention_head_size,
"dropout": self._attention_dropout_rate,
"name": "self_attention"
}
default_attention_cfg.update(common_kwargs)
self._attention_layer = get_layer_instance(
self._attention_cls,
config=self._attention_cfg,
default_config=default_attention_cfg)
if self._feedforward_cls is not None:
default_feedforward_cfg = {
"kernel_initializer": tf_utils.clone_initializer(
self._kernel_initializer),
"bias_initializer": tf_utils.clone_initializer(
self._bias_initializer),
"inner_dim": self._inner_dim,
"inner_activation": self._inner_activation,
# TODO(hongkuny): try to update all ffn block args.
"intermediate_size": self._inner_dim,
"intermediate_activation": self._inner_activation,
"dropout": self._dropout_rate,
"name": "feedforward",
}
default_feedforward_cfg.update(common_kwargs)
self._feedforward_block = get_layer_instance(
self._feedforward_cls,
config=self._feedforward_cfg,
default_config=default_feedforward_cfg)
else:
self._feedforward_block = None
# self._dropout_rate controls dropout rates at two places:
# after attention, and after FFN.
self._attention_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
# Use float32 in layernorm for numeric stability.
# It is probably safe in mixed_float16, but we haven't validated this yet.
self._attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32))
if self._feedforward_block is None:
self._intermediate_dense = tf.keras.layers.EinsumDense(
"abc,cd->abd",
output_shape=(None, self._inner_dim),
bias_axes="d",
name="intermediate",
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs)
policy = tf.keras.mixed_precision.global_policy()
if policy.name == "mixed_bfloat16":
# bfloat16 causes BERT with the LAMB optimizer to not converge
# as well, so we use float32.
# TODO(b/154538392): Investigate this.
policy = tf.float32
self._intermediate_activation_layer = tf.keras.layers.Activation(
self._inner_activation, dtype=policy)
self._output_dense = tf.keras.layers.EinsumDense(
"abc,cd->abd",
output_shape=(None, hidden_size),
bias_axes="d",
name="output",
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs)
self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
# Use float32 in layernorm for numeric stability.
self._output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32)
super().build(input_shape)
logging.info("%s configs: %s", self.__class__.__name__, self.get_config())
def get_config(self):
config = {
"attention_cls": self._attention_layer,
"feedforward_cls": self._feedforward_block,
"num_attention_heads": self._num_heads,
"inner_dim": self._inner_dim,
"inner_activation": self._inner_activation,
"dropout_rate": self._dropout_rate,
"attention_dropout_rate": self._attention_dropout_rate,
"norm_first": self._norm_first,
"norm_epsilon": self._norm_epsilon,
"kernel_initializer": tf_utils.serialize_initializer(
self._kernel_initializer, use_legacy_format=True
),
"bias_initializer": tf_utils.serialize_initializer(
self._bias_initializer, use_legacy_format=True
),
"kernel_regularizer": tf_utils.serialize_regularizer(
self._kernel_regularizer, use_legacy_format=True
),
"bias_regularizer": tf_utils.serialize_regularizer(
self._bias_regularizer, use_legacy_format=True
),
"activity_regularizer": tf_utils.serialize_regularizer(
self._activity_regularizer, use_legacy_format=True
),
"kernel_constraint": tf_utils.serialize_constraint(
self._kernel_constraint, use_legacy_format=True
),
"bias_constraint": tf_utils.serialize_constraint(
self._bias_constraint, use_legacy_format=True
),
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, training=None):
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
input_tensor, attention_mask = inputs
key_value = None
elif len(inputs) == 3:
input_tensor, key_value, attention_mask = inputs
else:
raise ValueError("Unexpected inputs to %s with length at %d" %
(self.__class__, len(inputs)))
else:
input_tensor, key_value, attention_mask = (inputs, None, None)
if key_value is None:
key_value = input_tensor
if self._norm_first:
source_tensor = input_tensor
input_tensor = self._attention_layer_norm(input_tensor, training=training)
attention_output = self._attention_layer(
query=input_tensor, value=key_value, attention_mask=attention_mask,
training=training)
attention_output = self._attention_dropout(attention_output,
training=training)
if self._norm_first:
attention_output = source_tensor + attention_output
else:
attention_output = self._attention_layer_norm(input_tensor +
attention_output,
training=training)
if self._norm_first:
source_attention_output = attention_output
attention_output = self._output_layer_norm(attention_output,
training=training)
if self._feedforward_block is None:
intermediate_output = self._intermediate_dense(attention_output)
intermediate_output = self._intermediate_activation_layer(
intermediate_output)
layer_output = self._output_dense(intermediate_output, training=training)
layer_output = self._output_dropout(layer_output, training=training)
# During mixed precision training, attention_output is from layer norm
# and is always fp32 for now. Cast layer_output to fp32 for the subsequent
# add.
layer_output = tf.cast(layer_output, tf.float32)
if self._norm_first:
layer_output = source_attention_output + layer_output
else:
layer_output = self._output_layer_norm(layer_output + attention_output,
training=training)
else:
if self._norm_first:
# if norm_first, assume the feedforward block will not apply layer norm
layer_output = self._feedforward_block(attention_output,
training=training)
layer_output += source_attention_output
else:
# Attention: if not norm_first, assume that the feedforwad does apply
# layer norm. The feedford also apply residual connection. Please
# read the `GatedFeedforward` as a concrete example.
layer_output = self._feedforward_block(attention_output,
training=training)
return layer_output
| 15,694 | 42.597222 | 80 | py |
models | models-master/official/nlp/modeling/layers/pack_optimization_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pack_optimization."""
import tensorflow as tf
from official.nlp.modeling.layers import pack_optimization
class PackOptimizationTest(tf.test.TestCase):
def test_bert_embedding_packing(self):
batch_size, seq_len, embed_dim = 2, 4, 8
pack_sequences = 2
token_and_position_embed = tf.ones((batch_size, seq_len, embed_dim),
dtype=tf.float32)
input_mask = tf.ones((batch_size, seq_len), dtype=tf.int32)
layer = pack_optimization.PackBertEmbeddings(pack_sequences=pack_sequences)
outputs = layer(token_and_position_embed, input_mask)
self.assertEqual(outputs["packed_embeddings"].shape, (1, 8, embed_dim))
self.assertEqual(outputs["combined_attention_mask"].shape, (1, 8, 8))
def test_strided_transformer_encoder_block(self):
inputs = tf.zeros((2, 4, 8), dtype=tf.float32)
attention_mask = tf.ones((2, 4, 4), dtype=tf.float32)
transformer = pack_optimization.StridedTransformerEncoderBlock(
num_attention_heads=2, inner_dim=4, inner_activation="relu")
outputs = transformer([inputs, attention_mask],
stride=tf.constant(2, dtype=tf.int32))
self.assertEqual(outputs.shape, (2, 2, 8))
def test_strided_rezero_transformer(self):
inputs = tf.zeros((2, 4, 8), dtype=tf.float32)
attention_mask = tf.ones((2, 4, 4), dtype=tf.float32)
transformer = pack_optimization.StridedReZeroTransformer(
num_attention_heads=2, inner_dim=4, inner_activation="relu")
outputs = transformer([inputs, attention_mask],
stride=tf.constant(2, dtype=tf.int32))
self.assertEqual(outputs.shape, (2, 2, 8))
def test_strided_scaffold(self):
inputs = tf.zeros((2, 4, 8), dtype=tf.float32)
attention_mask = tf.ones((2, 4, 4), dtype=tf.float32)
test_layer = pack_optimization.StridedTransformerScaffold(
num_attention_heads=2,
inner_dim=128,
inner_activation="relu")
outputs = test_layer([inputs, attention_mask],
stride=tf.constant(2, dtype=tf.int32))
self.assertEqual(outputs.shape, (2, 2, 8))
if __name__ == "__main__":
tf.test.main()
| 2,785 | 40.58209 | 79 | py |
models | models-master/official/nlp/modeling/layers/cls_head_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for cls_head."""
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.modeling.layers import cls_head
class ClassificationHeadTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(("no_pooler_layer", 0, 2),
("has_pooler_layer", 5, 4))
def test_pooler_layer(self, inner_dim, num_weights_expected):
test_layer = cls_head.ClassificationHead(inner_dim=inner_dim, num_classes=2)
features = tf.zeros(shape=(2, 10, 10), dtype=tf.float32)
_ = test_layer(features)
num_weights_observed = len(test_layer.get_weights())
self.assertEqual(num_weights_observed, num_weights_expected)
def test_layer_invocation(self):
test_layer = cls_head.ClassificationHead(inner_dim=5, num_classes=2)
features = tf.zeros(shape=(2, 10, 10), dtype=tf.float32)
output = test_layer(features)
self.assertAllClose(output, [[0., 0.], [0., 0.]])
self.assertSameElements(test_layer.checkpoint_items.keys(),
["pooler_dense"])
outputs = test_layer(features, only_project=True)
self.assertEqual(outputs.shape, (2, 5))
def test_layer_serialization(self):
layer = cls_head.ClassificationHead(10, 2)
new_layer = cls_head.ClassificationHead.from_config(layer.get_config())
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(layer.get_config(), new_layer.get_config())
class MultiClsHeadsTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(("no_pooler_layer", 0, 4),
("has_pooler_layer", 5, 6))
def test_pooler_layer(self, inner_dim, num_weights_expected):
cls_list = [("foo", 2), ("bar", 3)]
test_layer = cls_head.MultiClsHeads(inner_dim=inner_dim, cls_list=cls_list)
features = tf.zeros(shape=(2, 10, 10), dtype=tf.float32)
_ = test_layer(features)
num_weights_observed = len(test_layer.get_weights())
self.assertEqual(num_weights_observed, num_weights_expected)
def test_layer_invocation(self):
cls_list = [("foo", 2), ("bar", 3)]
test_layer = cls_head.MultiClsHeads(inner_dim=5, cls_list=cls_list)
features = tf.zeros(shape=(2, 10, 10), dtype=tf.float32)
outputs = test_layer(features)
self.assertAllClose(outputs["foo"], [[0., 0.], [0., 0.]])
self.assertAllClose(outputs["bar"], [[0., 0., 0.], [0., 0., 0.]])
self.assertSameElements(test_layer.checkpoint_items.keys(),
["pooler_dense", "foo", "bar"])
outputs = test_layer(features, only_project=True)
self.assertEqual(outputs.shape, (2, 5))
def test_layer_serialization(self):
cls_list = [("foo", 2), ("bar", 3)]
test_layer = cls_head.MultiClsHeads(inner_dim=5, cls_list=cls_list)
new_layer = cls_head.MultiClsHeads.from_config(test_layer.get_config())
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(test_layer.get_config(), new_layer.get_config())
class GaussianProcessClassificationHead(tf.test.TestCase,
parameterized.TestCase):
def setUp(self):
super().setUp()
self.spec_norm_kwargs = dict(norm_multiplier=1.,)
self.gp_layer_kwargs = dict(num_inducing=512)
@parameterized.named_parameters(("no_pooler_layer", 0, 7),
("has_pooler_layer", 5, 11))
def test_pooler_layer(self, inner_dim, num_weights_expected):
test_layer = cls_head.GaussianProcessClassificationHead(
inner_dim=inner_dim,
num_classes=2,
use_spec_norm=True,
use_gp_layer=True,
initializer="zeros",
**self.spec_norm_kwargs,
**self.gp_layer_kwargs)
features = tf.zeros(shape=(2, 10, 10), dtype=tf.float32)
_ = test_layer(features)
num_weights_observed = len(test_layer.get_weights())
self.assertEqual(num_weights_observed, num_weights_expected)
def test_layer_invocation(self):
test_layer = cls_head.GaussianProcessClassificationHead(
inner_dim=5,
num_classes=2,
use_spec_norm=True,
use_gp_layer=True,
initializer="zeros",
**self.spec_norm_kwargs,
**self.gp_layer_kwargs)
features = tf.zeros(shape=(2, 10, 10), dtype=tf.float32)
output = test_layer(features)
self.assertAllClose(output, [[0., 0.], [0., 0.]])
self.assertSameElements(test_layer.checkpoint_items.keys(),
["pooler_dense"])
@parameterized.named_parameters(
("gp_layer_with_covmat", True, True),
("gp_layer_no_covmat", True, False),
("dense_layer_with_covmat", False, True),
("dense_layer_no_covmat", False, False))
def test_sngp_output_shape(self, use_gp_layer, return_covmat):
batch_size = 32
num_classes = 2
test_layer = cls_head.GaussianProcessClassificationHead(
inner_dim=5,
num_classes=num_classes,
use_spec_norm=True,
use_gp_layer=use_gp_layer,
**self.spec_norm_kwargs,
**self.gp_layer_kwargs)
features = tf.zeros(shape=(batch_size, 10, 10), dtype=tf.float32)
outputs = test_layer(features, return_covmat=return_covmat)
if use_gp_layer and return_covmat:
self.assertIsInstance(outputs, tuple)
self.assertEqual(outputs[0].shape, (batch_size, num_classes))
self.assertEqual(outputs[1].shape, (batch_size, batch_size))
else:
self.assertIsInstance(outputs, tf.Tensor)
self.assertEqual(outputs.shape, (batch_size, num_classes))
def test_sngp_train_logits(self):
"""Checks if temperature scaling is disabled during training."""
features = tf.zeros(shape=(5, 10, 10), dtype=tf.float32)
gp_layer = cls_head.GaussianProcessClassificationHead(
inner_dim=5, num_classes=2)
# Without temperature.
gp_layer.temperature = None
outputs_no_temp = gp_layer(features, training=True)
# With temperature.
gp_layer.temperature = 10.
outputs_with_temp = gp_layer(features, training=True)
self.assertAllEqual(outputs_no_temp, outputs_with_temp)
def test_layer_serialization(self):
layer = cls_head.GaussianProcessClassificationHead(
inner_dim=5,
num_classes=2,
use_spec_norm=True,
use_gp_layer=True,
**self.spec_norm_kwargs,
**self.gp_layer_kwargs)
new_layer = cls_head.GaussianProcessClassificationHead.from_config(
layer.get_config())
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(layer.get_config(), new_layer.get_config())
def test_sngp_kwargs_serialization(self):
"""Tests if SNGP-specific kwargs are added during serialization."""
layer = cls_head.GaussianProcessClassificationHead(
inner_dim=5,
num_classes=2,
use_spec_norm=True,
use_gp_layer=True,
**self.spec_norm_kwargs,
**self.gp_layer_kwargs)
layer_config = layer.get_config()
# The config value should equal to those defined in setUp().
self.assertEqual(layer_config["norm_multiplier"], 1.)
self.assertEqual(layer_config["num_inducing"], 512)
class PerQueryDenseHeadTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(("single_query", 1, 3, False),
("multi_queries", 10, 2, False),
("with_bias", 10, 2, True))
def test_layer_invocation(self, num_queries, features, use_bias):
batch_size = 5
hidden_size = 10
layer = cls_head.PerQueryDenseHead(
num_queries=num_queries, features=features, use_bias=use_bias)
inputs = tf.zeros(
shape=(batch_size, num_queries, hidden_size), dtype=tf.float32)
outputs = layer(inputs)
self.assertEqual(outputs.shape, [batch_size, num_queries, features])
def test_layer_serialization(self):
layer = cls_head.PerQueryDenseHead(
num_queries=10, features=2, use_bias=True)
new_layer = cls_head.PerQueryDenseHead.from_config(layer.get_config())
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(layer.get_config(), new_layer.get_config())
if __name__ == "__main__":
tf.test.main()
| 8,878 | 37.942982 | 80 | py |
models | models-master/official/nlp/modeling/layers/tn_transformer_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TN-BERT transformer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers.tn_transformer_expand_condense import TNTransformerExpandCondense
@parameterized.named_parameters(('tn', TNTransformerExpandCondense))
class TransformerLayerTest(tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(TransformerLayerTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy('float32')
def test_layer_creation(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=16,
intermediate_size=2048,
intermediate_activation='relu')
sequence_length = 21
width = 256
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_creation_with_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=16,
intermediate_size=2048,
intermediate_activation='relu')
sequence_length = 21
width = 256
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_creation_with_incorrect_mask_fails(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=16,
intermediate_size=2048,
intermediate_activation='relu')
sequence_length = 21
width = 256
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length - 3))
with self.assertRaisesRegex(ValueError, 'When passing a mask tensor.*'):
_ = test_layer([data_tensor, mask_tensor])
def test_layer_invocation(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=16,
intermediate_size=2048,
intermediate_activation='relu')
sequence_length = 21
width = 256
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# Create a model from the test layer.
model = tf.keras.Model(data_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 16 * np.random.random_sample(
(batch_size, sequence_length, width))
_ = model.predict(input_data)
def test_layer_invocation_with_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=16,
intermediate_size=2048,
intermediate_activation='relu')
sequence_length = 21
width = 256
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 16 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_layer_output_range(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=16,
intermediate_size=2048,
intermediate_activation='relu')
sequence_length = 21
width = 256
batch_size = 6
input_data = 16 * np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embeeding.
new_layer = transformer_cls(
num_attention_heads=16,
intermediate_size=2048,
intermediate_activation='relu',
output_range=1)
_ = new_layer([input_data, mask_data])
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer([input_data, mask_data])
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
def test_layer_invocation_with_float16_dtype(self, transformer_cls):
tf.keras.mixed_precision.set_global_policy('mixed_float16')
test_layer = transformer_cls(
num_attention_heads=16,
intermediate_size=2048,
intermediate_activation='relu')
sequence_length = 21
width = 256
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = (16 * np.random.random_sample(
(batch_size, sequence_length, width)))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_transform_with_initializer(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=16,
intermediate_size=2048,
intermediate_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
sequence_length = 21
width = 256
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list())
def test_dynamic_layer_sequence(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=16,
intermediate_size=2048,
intermediate_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Create a 3-dimensional input (the first dimension is implicit).
width = 256
input_tensor = tf.keras.Input(shape=(None, width))
output_tensor = test_layer(input_tensor)
model = tf.keras.Model(input_tensor, output_tensor)
input_length = 17
input_data = np.ones((1, input_length, width))
output_data = model.predict(input_data)
self.assertAllEqual([1, input_length, width], output_data.shape)
if __name__ == '__main__':
tf.test.main()
| 8,883 | 41.104265 | 99 | py |
models | models-master/official/nlp/modeling/layers/on_device_embedding.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based one-hot embedding layer."""
# pylint: disable=g-classes-have-attributes
import tensorflow as tf
@tf.keras.utils.register_keras_serializable(package="Text")
class OnDeviceEmbedding(tf.keras.layers.Layer):
"""Performs an embedding lookup suitable for accelerator devices.
This layer uses either tf.gather or tf.one_hot to translate integer indices to
float embeddings.
Args:
vocab_size: Number of elements in the vocabulary.
embedding_width: Output size of the embedding layer.
initializer: The initializer to use for the embedding weights. Defaults to
"glorot_uniform".
use_one_hot: Whether to use tf.one_hot over tf.gather for the embedding
lookup. Defaults to False (that is, using tf.gather). Setting this option
to True may improve performance, especially on small vocabulary sizes, but
will generally require more memory.
scale_factor: Whether to scale the output embeddings. Defaults to None (that
is, not to scale). Setting this option to a float will let values in
output embeddings multiplied by scale_factor.
weight_fallback_dtype: When keras mix precision inferred wrong dtype for
varibales, `weight_fallback_dtype` will be used to define the dtype of
weights.
"""
def __init__(self,
vocab_size,
embedding_width,
initializer="glorot_uniform",
use_one_hot=False,
scale_factor=None,
weight_fallback_dtype=tf.float32,
**kwargs):
super().__init__(**kwargs)
self._vocab_size = vocab_size
self._embedding_width = embedding_width
self._initializer = initializer
self._use_one_hot = use_one_hot
self._scale_factor = scale_factor
# Backup control of the weight dtype because Keras mix precision sometimes
# depends on the input to infer the compute dtype, but the inputs of
# this layer are int type.
self._weight_fallback_dtype = weight_fallback_dtype
def get_config(self):
config = {
"vocab_size": self._vocab_size,
"embedding_width": self._embedding_width,
"initializer": self._initializer,
"use_one_hot": self._use_one_hot,
"scale_factor": self._scale_factor,
"weight_fallback_dtype": self._weight_fallback_dtype,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
if (
self.dtype is not None
and not tf.dtypes.as_dtype(self.dtype).is_floating
):
# Keras failed to infer the right dtype.
dtype = self._weight_fallback_dtype
else:
dtype = self.dtype
self.embeddings = self.add_weight(
"embeddings",
shape=[self._vocab_size, self._embedding_width],
initializer=self._initializer,
dtype=dtype)
super().build(input_shape)
def call(self, inputs):
flat_inputs = tf.reshape(inputs, [-1])
if self._use_one_hot:
dtype = self.compute_dtype
if not tf.dtypes.as_dtype(dtype).is_floating:
# TensorFlow 1 compatibility. In TF1, self.compute_dtype is int32
# instead of a floating-point dtype, as the dtype is inferred from the
# dtype of the inputs
dtype = self._weight_fallback_dtype
one_hot_data = tf.one_hot(
flat_inputs, depth=self._vocab_size, dtype=dtype)
embeddings = tf.matmul(one_hot_data, self.embeddings)
else:
embeddings = tf.gather(self.embeddings, flat_inputs)
embeddings = tf.reshape(
embeddings,
tf.concat([tf.shape(inputs), [self._embedding_width]], axis=0))
embeddings.set_shape(inputs.shape.as_list() + [self._embedding_width])
if self._scale_factor:
embeddings *= self._scale_factor
return embeddings
@property
def vocab_size(self):
return self._vocab_size
@property
def embedding_width(self):
return self._embedding_width
| 4,572 | 36.178862 | 80 | py |
models | models-master/official/nlp/modeling/layers/mixing.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based mixing layers.
Based on the mixing layers use by FNet
(https://aclanthology.org/2022.naacl-main.319/) and Sparse Mixers
(https://arxiv.org/abs/2205.12399).
Mixing layers can be used as drop in replacements for self-attention layers. For
interoperability with attention layers, we use the same `query` and `value` call
signature.
Note: These mixing layers currently only support encoder stacks. Decoder stacks
can be supported in the future by utilizing the `value` inputs.
"""
import enum
import functools
from typing import Callable, Tuple, Union
import gin
import numpy as np
from scipy import linalg
import tensorflow as tf
from official.modeling import tf_utils
_Initializer = Union[str, tf.keras.initializers.Initializer]
default_kernel_initializer = tf.keras.initializers.TruncatedNormal(stddev=2e-2)
@gin.constants_from_enum
class MixingMechanism(enum.Enum):
"""Determines the type of mixing layer.
Possible options:
FOURIER: Fourier Transform mixing.
LINEAR: Mixing using dense matrix multiplications with learnable weights.
HARTLEY: Hartley Transform mixing.
"""
FOURIER = "fourier"
HARTLEY = "hartley"
LINEAR = "linear"
class MixingLayer(tf.keras.layers.Layer):
"""Mixing layer base class.
This class cannot be used directly. It just specifies the API for mixing
layer subclasses. For interoperability with attention layers, we use the same
`query` and `value` call signature.
Based on the mixing layers use by FNet
(https://aclanthology.org/2022.naacl-main.319/) and Sparse Mixers
(https://arxiv.org/abs/2205.12399).
"""
def __init__(self, name: str = "mixing", **kwargs):
"""Initializes layer.
Args:
name: Name for layer.
**kwargs: Keyword arguments.
"""
super().__init__(name=name, **kwargs)
def call(self, query: tf.Tensor, value: tf.Tensor, **kwargs) -> tf.Tensor:
"""Calls the layer.
Subclasses should return tensors of shape
<float>[batch_size, max_seq_length, hidden_dim].
Args:
query: Batch of input embeddings, typically of shape <float>[batch_size,
max_seq_length, hidden_dim].
value: Unused. Included to match attention layer API.
**kwargs: Optional arguments to catch unused attention keyword arguments.
Raises:
NotImplementedError. This class should not be called directly.
"""
raise NotImplementedError("Abstract method")
class FourierTransformLayer(MixingLayer):
"""Fourier Transform layer.
Applies 2D Fourier Transform over final two dimensions of `query` inputs -
typically the sequence and hidden dimensions.
"""
def __init__(self,
use_fft: bool = False,
name: str = "fourier_transform",
**kwargs):
"""Initializes layer.
Args:
use_fft: Whether to use Fast Fourier Transform (True) or the Discrete
Fourier Transform (DFT) matrix (False) to compute the Fourier Transform.
See _pick_fourier_transform() for recommendations on when to use FFT or
DFT.
name: Name for layer.
**kwargs: Keyword arguments.
"""
super().__init__(name=name, **kwargs)
self.use_fft = use_fft
def build(self, input_shape: Tuple[int, ...]):
"""Picks the Fourier Transform implementation."""
self.fourier_transform = _pick_fourier_transform(
self.use_fft,
max_seq_length=input_shape[-2],
hidden_dim=input_shape[-1])
def call(self, query: tf.Tensor, value: tf.Tensor, **kwargs) -> tf.Tensor:
"""Applies layer to `query`.
Args:
query: Batch of input embeddings, typically of shape <float>[batch_size,
max_seq_length, hidden_dim].
value: Unused. Included to match attention layer API.
**kwargs: Optional arguments to catch unused attention keyword arguments.
Returns:
Real part of discrete Fourier Transform of `query` inputs with shape
<float32>[batch_size, max_seq_length, hidden_dim].
"""
del value # Ignored by encoder-only mixing layers
query = tf.cast(query, tf.complex64)
return tf.math.real(self.fourier_transform(query))
class HartleyTransformLayer(MixingLayer):
"""Hartley Transform layer.
Applies 2D Hartley Transform over final two dimensions of `query` inputs -
typically the sequence and hidden dimensions.
"""
def __init__(self,
use_fft: bool = False,
name: str = "hartley_transform",
**kwargs):
"""Initializes layer.
Args:
use_fft: Whether to use Fast Fourier Transform (True) or the Discrete
Fourier Transform (DFT) matrix (False) to compute the Hartley Transform.
See _pick_fourier_transform() for recommendations on when to use FFT or
DFT.
name: Name for layer.
**kwargs: Keyword arguments.
"""
super().__init__(name=name, **kwargs)
self.use_fft = use_fft
def build(self, input_shape: Tuple[int, ...]):
"""Picks the Fourier Transform implementation."""
self.fourier_transform = _pick_fourier_transform(
self.use_fft,
max_seq_length=input_shape[-2],
hidden_dim=input_shape[-1])
def call(self, query: tf.Tensor, value: tf.Tensor, **kwargs) -> tf.Tensor:
"""Applies layer to `query`.
Args:
query: Batch of input embeddings, typically of shape <float>[batch_size,
max_seq_length, hidden_dim].
value: Unused. Included to match attention layer API.
**kwargs: Optional arguments to catch unused attention keyword arguments.
Returns:
Real part of discrete Hartley Transform of `query` inputs with shape
<float32>[batch_size, max_seq_length, hidden_dim].
"""
del value # Ignored by encoder-only mixing layers
query = tf.cast(query, tf.complex64)
frequencies = self.fourier_transform(query)
return tf.math.real(frequencies) - tf.math.imag(frequencies)
class LinearTransformLayer(MixingLayer):
"""Dense, linear transformation layer.
Applies matrix multiplications over sequence and hidden dimensions.
"""
def __init__(self,
kernel_initializer: _Initializer = default_kernel_initializer,
name: str = "linear_transform",
**kwargs):
"""Initializes layer.
Args:
kernel_initializer: Initialization scheme for kernel.
name: Name for layer.
**kwargs: Keyword arguments.
"""
super().__init__(name=name, **kwargs)
self.kernel_initializer = kernel_initializer
def build(self, input_shape: Tuple[int, ...]):
"""Creates the hidden and sequence matrix variables of the layer."""
self.mat_hidden = self.add_weight(
shape=(input_shape[-1], input_shape[-1]),
initializer=tf_utils.clone_initializer(self.kernel_initializer),
trainable=True,
name="hidden_kernel")
self.mat_seq = self.add_weight(
shape=(input_shape[-2], input_shape[-2]),
initializer=tf_utils.clone_initializer(self.kernel_initializer),
trainable=True,
name="seq_kernel")
def call(self, query: tf.Tensor, value: tf.Tensor, **kwargs) -> tf.Tensor:
"""Applies layer to `query`.
Args:
query: Batch of input embeddings, typically of shape <float>[batch_size,
max_seq_length, hidden_dim].
value: Unused. Included to match attention layer API.
**kwargs: Optional arguments to catch unused attention keyword arguments.
Returns:
Linearly transformed `query` inputs with shape
<float>[batch_size, max_seq_length, hidden_dim].
"""
del value # Ignored by encoder-only mixing layers
return tf.einsum("bij,jk,ni->bnk", query, self.mat_hidden, self.mat_seq)
def _pick_fourier_transform(
use_fft: bool, max_seq_length: int,
hidden_dim: int) -> Callable[[tf.Tensor], tf.Tensor]:
"""Returns FFT or DFT Fourier Transform implementation.
On TPUs, we recommend using the Discrete Fourier Transform (DFT) matrix
(use_fft=False), except for very long sequence lengths. On GPUs and CPUs, the
Fast Fourier Transform (use_fft=True) is generally optimal for all sequence
lengths.
Note: When using the FFT it is recommended to use a sequence length that is a
power of 2.
Args:
use_fft: If True, return FFT. Otherwise, return DFT matrix.
max_seq_length: Maximum sequence length of inputs. Only used if
use_fft=False.
hidden_dim: Size of hidden dimension of inputs. Only used if use_fft=False.
Returns:
Fourier Transform.
"""
if use_fft:
return tf.signal.fft2d
else:
dft_mat_seq = linalg.dft(max_seq_length).astype(np.complex64)
dft_mat_hidden = linalg.dft(hidden_dim).astype(np.complex64)
def two_dim_matmul(x: tf.Tensor, matrix_dim_one: tf.Tensor,
matrix_dim_two: tf.Tensor) -> tf.Tensor:
"""Applies 2D matrix multiplication to input tensors of rank >= 2."""
return tf.einsum("...ij,jk,ni->...nk", tf.cast(x, tf.complex64),
matrix_dim_two, matrix_dim_one)
return functools.partial(
two_dim_matmul,
matrix_dim_one=dft_mat_seq,
matrix_dim_two=dft_mat_hidden)
| 9,733 | 33.034965 | 80 | py |
models | models-master/official/nlp/modeling/layers/per_dim_scale_attention_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for PerDimScaleAttention."""
import tensorflow as tf
from official.nlp.modeling.layers import per_dim_scale_attention as attention
class PerDimScaleAttentionTest(tf.test.TestCase):
def test_attention(self):
num_heads = 12
key_dim = 64
seq_length = 1024
batch_size = 2
test_layer = attention.PerDimScaleAttention(
num_heads=num_heads, key_dim=key_dim)
query = tf.random.normal(
shape=(batch_size, seq_length, key_dim * num_heads))
value = query
output = test_layer(query=query, value=value)
self.assertEqual(output.shape,
[batch_size, seq_length, key_dim * num_heads])
def test_config(self):
num_heads = 12
key_dim = 64
test_layer = attention.PerDimScaleAttention(
num_heads=num_heads, key_dim=key_dim)
print(test_layer.get_config())
new_layer = attention.PerDimScaleAttention.from_config(
test_layer.get_config())
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(test_layer.get_config(), new_layer.get_config())
if __name__ == '__main__':
tf.test.main()
| 1,751 | 32.056604 | 79 | py |
models | models-master/official/nlp/modeling/layers/spectral_normalization_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for normalization layers.
## References:
[1] Hanie Sedghi, Vineet Gupta, Philip M. Long.
The Singular Values of Convolutional Layers.
In _International Conference on Learning Representations_, 2019.
"""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import spectral_normalization
DenseLayer = tf.keras.layers.Dense(10)
Conv2DLayer = tf.keras.layers.Conv2D(filters=64, kernel_size=3, padding='valid')
def _compute_spectral_norm(weight):
if weight.ndim > 2:
# Computes Conv2D via FFT transform as in [1].
weight = np.fft.fft2(weight, weight.shape[1:3], axes=[0, 1])
return np.max(np.linalg.svd(weight, compute_uv=False))
class NormalizationTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(NormalizationTest, self).setUp()
self.num_iterations = 1000
self.norm_multiplier = 0.95
@parameterized.named_parameters(
('Dense',
(None, 10), DenseLayer, spectral_normalization.SpectralNormalization),
('Conv2D', (None, 32, 32, 3), Conv2DLayer,
spectral_normalization.SpectralNormalizationConv2D))
def test_spec_norm_magnitude(self, input_shape, layer, norm_wrapper):
"""Tests if the weights spectral norm converges to norm_multiplier."""
layer.build(input_shape)
sn_layer = norm_wrapper(
layer,
iteration=self.num_iterations,
norm_multiplier=self.norm_multiplier)
# Perform normalization.
sn_layer.build(input_shape)
sn_layer.update_weights()
normalized_kernel = sn_layer.layer.kernel.numpy()
spectral_norm_computed = _compute_spectral_norm(normalized_kernel)
spectral_norm_expected = self.norm_multiplier
self.assertAllClose(
spectral_norm_computed, spectral_norm_expected, atol=1e-1)
# Test that the normalized layer is K-Lipschitz. In particular, if the layer
# is a function f, then ||f(x1) - f(x2)||_2 <= K * ||(x1 - x2)||_2, where K
# is the norm multiplier.
new_input_shape = (16,) + input_shape[1:]
new_input = tf.random.uniform(new_input_shape)
delta_vec = tf.random.uniform(new_input_shape)
output1 = sn_layer(new_input)
output2 = sn_layer(new_input + delta_vec)
delta_input = tf.norm(tf.reshape(delta_vec, (-1,))).numpy()
delta_output = tf.norm(tf.reshape(output2 - output1, (-1,))).numpy()
self.assertLessEqual(delta_output, self.norm_multiplier * delta_input)
if __name__ == '__main__':
tf.test.main()
| 3,111 | 34.770115 | 80 | py |
models | models-master/official/nlp/modeling/layers/routing_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for routing."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import routing
class TokenImportanceTest(tf.test.TestCase, parameterized.TestCase):
def test_token_importance(self):
token_importance_embed = routing.TokenImportanceWithMovingAvg(
vocab_size=4,
init_importance=10.0,
moving_average_beta=0.995)
importance = token_importance_embed(np.array([[0, 1], [2, 3]]))
self.assertAllClose(importance, np.array([[10.0, 10.0], [10.0, 10.0]]))
token_importance_embed.update_token_importance(
token_ids=np.array([[0, 1]]),
importance=np.array([[0.0, 0.0]]))
importance = token_importance_embed(np.array([[0, 1], [2, 3]]))
self.assertAllClose(importance, np.array([[9.95, 9.95], [10.0, 10.0]]))
class TopKSelectionTest(tf.test.TestCase, parameterized.TestCase):
def test_top_k_selection(self):
token_selection = routing.SelectTopK(top_k=2)
selected, _ = token_selection(np.array([[0, 1, 2, 3], [4, 3, 2, 1]]))
self.assertAllClose(selected, np.array([[3, 2], [0, 1]]))
def test_random_k_selection(self):
token_selection = routing.SelectTopK(random_k=2)
selected, _ = token_selection(np.array([[0, 1, 2, 3], [4, 3, 2, 1]]))
self.assertAllClose(selected.shape, (2, 2))
def test_top_k_random_k(self):
token_selection = routing.SelectTopK(top_k=1, random_k=1)
selected, _ = token_selection(np.array([[0, 1, 2, 3], [4, 3, 2, 1]]))
self.assertAllClose(selected.shape, (2, 2))
if __name__ == "__main__":
tf.test.main()
| 2,216 | 35.95 | 75 | py |
models | models-master/official/nlp/modeling/layers/on_device_embedding_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-based one-hot embedding layer."""
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import on_device_embedding
class OnDeviceEmbeddingTest(tf.test.TestCase):
def test_layer_creation(self):
vocab_size = 31
embedding_width = 27
test_layer = on_device_embedding.OnDeviceEmbedding(
vocab_size=vocab_size, embedding_width=embedding_width)
# Create a 2-dimensional input (the first dimension is implicit).
sequence_length = 23
input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
output_tensor = test_layer(input_tensor)
# The output should be the same as the input, save that it has an extra
# embedding_width dimension on the end.
expected_output_shape = [None, sequence_length, embedding_width]
self.assertEqual(expected_output_shape, output_tensor.shape.as_list())
self.assertEqual(output_tensor.dtype, tf.float32)
def test_layer_creation_with_mixed_precision(self):
vocab_size = 31
embedding_width = 27
test_layer = on_device_embedding.OnDeviceEmbedding(
vocab_size=vocab_size, embedding_width=embedding_width,
dtype="mixed_float16")
# Create a 2-dimensional input (the first dimension is implicit).
sequence_length = 23
input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
output_tensor = test_layer(input_tensor)
# The output should be the same as the input, save that it has an extra
# embedding_width dimension on the end.
expected_output_shape = [None, sequence_length, embedding_width]
self.assertEqual(expected_output_shape, output_tensor.shape.as_list())
self.assertEqual(output_tensor.dtype, tf.float16)
def test_layer_invocation(self):
vocab_size = 31
embedding_width = 27
test_layer = on_device_embedding.OnDeviceEmbedding(
vocab_size=vocab_size, embedding_width=embedding_width)
# Create a 2-dimensional input (the first dimension is implicit).
sequence_length = 23
input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
output_tensor = test_layer(input_tensor)
# Create a model from the test layer.
model = tf.keras.Model(input_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 3
input_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
output = model.predict(input_data)
self.assertEqual(tf.float32, output.dtype)
def test_layer_invocation_with_mixed_precision(self):
vocab_size = 31
embedding_width = 27
test_layer = on_device_embedding.OnDeviceEmbedding(
vocab_size=vocab_size, embedding_width=embedding_width,
dtype="mixed_float16")
# Create a 2-dimensional input (the first dimension is implicit).
sequence_length = 23
input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
output_tensor = test_layer(input_tensor)
# Create a model from the test layer.
model = tf.keras.Model(input_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 3
input_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
output = model.predict(input_data)
self.assertEqual(tf.float16, output.dtype)
def test_one_hot_layer_creation(self):
vocab_size = 31
embedding_width = 27
test_layer = on_device_embedding.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
use_one_hot=True)
# Create a 2-dimensional input (the first dimension is implicit).
sequence_length = 23
input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
output_tensor = test_layer(input_tensor)
# The output should be the same as the input, save that it has an extra
# embedding_width dimension on the end.
expected_output_shape = [None, sequence_length, embedding_width]
self.assertEqual(expected_output_shape, output_tensor.shape.as_list())
self.assertEqual(output_tensor.dtype, tf.float32)
def test_one_hot_layer_creation_with_mixed_precision(self):
vocab_size = 31
embedding_width = 27
test_layer = on_device_embedding.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
dtype="mixed_float16",
use_one_hot=True)
# Create a 2-dimensional input (the first dimension is implicit).
sequence_length = 23
input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
output_tensor = test_layer(input_tensor)
# The output should be the same as the input, save that it has an extra
# embedding_width dimension on the end.
expected_output_shape = [None, sequence_length, embedding_width]
self.assertEqual(expected_output_shape, output_tensor.shape.as_list())
self.assertEqual(output_tensor.dtype, tf.float16)
def test_one_hot_layer_invocation(self):
vocab_size = 31
embedding_width = 27
test_layer = on_device_embedding.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
use_one_hot=True)
# Create a 2-dimensional input (the first dimension is implicit).
sequence_length = 23
input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
output_tensor = test_layer(input_tensor)
# Create a model from the test layer.
model = tf.keras.Model(input_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 3
input_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
output = model.predict(input_data)
self.assertEqual(tf.float32, output.dtype)
def test_one_hot_layer_invocation_with_mixed_precision(self):
vocab_size = 31
embedding_width = 27
test_layer = on_device_embedding.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
dtype="mixed_float16",
use_one_hot=True)
# Create a 2-dimensional input (the first dimension is implicit).
sequence_length = 23
input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
output_tensor = test_layer(input_tensor)
# Create a model from the test layer.
model = tf.keras.Model(input_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 3
input_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
output = model.predict(input_data)
self.assertEqual(tf.float16, output.dtype)
def test_use_scale_layer_invocation(self):
vocab_size = 31
embedding_width = 27
test_layer = on_device_embedding.OnDeviceEmbedding(
vocab_size=vocab_size, embedding_width=embedding_width,
scale_factor=embedding_width**0.5)
# Create a 2-dimensional input (the first dimension is implicit).
sequence_length = 23
input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
output_tensor = test_layer(input_tensor)
# Create a model from the test layer.
model = tf.keras.Model(input_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 3
input_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
output = model.predict(input_data)
self.assertEqual(tf.float32, output.dtype)
if __name__ == "__main__":
tf.test.main()
| 8,579 | 39.857143 | 79 | py |
models | models-master/official/nlp/modeling/layers/moe_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for moe.py."""
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import moe
def small_config():
"""Creates a small model config that can be used by all tests."""
config = {}
config['d_ff'] = 32
config['output_dropout'] = 0.1
config['num_experts'] = 2
config['expert_d_ff'] = 33
config['expert_dropout_rate'] = 0.1
config['jitter_noise'] = 0.1
config['train_capacity_factor'] = 1.0
config['eval_capacity_factor'] = 1.0
config['examples_per_group'] = 2.0
config['backbone_d_ff'] = 13
return config
def make_input_ones(batch_size: int = 4,
seq_length: int = 10,
hidden_dim: int = 7) -> tf.Tensor:
return tf.ones((batch_size, seq_length, hidden_dim))
def make_experts_input_ones(num_groups: int = 1,
num_experts: int = 2,
expert_capacity: int = 5,
hidden_dim: int = 7) -> tf.Tensor:
return tf.ones((num_groups, num_experts, expert_capacity, hidden_dim))
class MoeTest(tf.test.TestCase):
def tearDown(self):
super().tearDown()
tf.keras.mixed_precision.set_global_policy('float32')
def test_router_z_loss_dtype(self):
x = tf.constant([[[10.0, 5.0]]], dtype=tf.float32)
y = moe._router_z_loss(x)
expected = (5 + np.log(np.exp(5) + 1))**2
self.assertAllClose(expected, y, atol=1e-7)
self.assertDTypeEqual(y, tf.float32)
def test_router_z_loss_shape(self):
x = make_input_ones(2, 5, 7)
y = moe._router_z_loss(x)
expected = (np.log(7) + 1)**2
self.assertAllClose(expected, y, atol=1e-7)
def test_experts_choose_masked_router_dtype_shape(self):
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16')
num_groups = 2
tokens_per_group = 3
hidden_dim = tokens_per_group
num_experts = tokens_per_group
expert_capacity = 2
x = np.zeros([num_groups, tokens_per_group, hidden_dim])
x[0, 0, 0] += 1
x[0, :2, :2] += 1
x[1, 1:, 1:] += 1
x[1, -1, -1] += 1
router = moe.ExpertsChooseMaskedRouter(
num_experts=num_experts,
jitter_noise=0.1,
use_bias=True,
kernel_initializer=tf.keras.initializers.get('identity'),
bias_initializer=tf.keras.initializers.get('ones'))
router_mask = router(x, expert_capacity=expert_capacity, training=False)
self.assertDTypeEqual(router_mask.dispatch_mask, tf.bfloat16)
self.assertDTypeEqual(router_mask.combine_array, tf.bfloat16)
expect_shape = [num_groups, tokens_per_group, num_experts, expert_capacity]
self.assertEqual(expect_shape, router_mask.dispatch_mask.shape)
self.assertEqual(expect_shape, router_mask.combine_array.shape)
# top_k call may not be sorted, so can't compare the output directly
# Check that the output contains only 0s and 1s
out_dm = router_mask.dispatch_mask.numpy()
self.assertSetEqual({0, 1}, set(out_dm.flatten().astype(np.int32)))
# Check that the right tokens for selected
out_dm_indices = np.dot(
out_dm.transpose((0, 2, 3, 1)), np.arange(tokens_per_group))
# Shape [num_groups, num_experts, expert_capacity]
self.assertSetEqual({0, 1}, set(out_dm_indices[0, 0, :].astype(np.int32)))
self.assertSetEqual({1, 2}, set(out_dm_indices[0, 1, :].astype(np.int32)))
self.assertSetEqual({1, 2}, set(out_dm_indices[0, 2, :].astype(np.int32)))
self.assertSetEqual({0, 1}, set(out_dm_indices[1, 0, :].astype(np.int32)))
self.assertSetEqual({0, 1}, set(out_dm_indices[1, 1, :].astype(np.int32)))
self.assertSetEqual({1, 2}, set(out_dm_indices[1, 2, :].astype(np.int32)))
out_ca = router_mask.combine_array.numpy()
out_ca = np.dot(out_ca, np.ones((expert_capacity,)))
expected_combine_array = np.array([[[0.66, 0.0, 0.0], [0.42, 0.42, 0.16],
[0.0, 0.33, 0.33]],
[[0.33, 0.33, 0.0], [0.16, 0.42, 0.42],
[0.0, 0.0, 0.66]]])
self.assertAllClose(expected_combine_array, out_ca, atol=1e-2)
def test_feed_forward_shape_and_vars(self):
config = small_config()
layer = moe.FeedForward(
d_ff=config['d_ff'], output_dropout=config['output_dropout'])
inputs = make_input_ones()
outputs = layer(inputs)
self.assertAllEqual(tf.shape(inputs), tf.shape(outputs))
var_names = sorted([v.name for v in layer.trainable_variables])
self.assertAllEqual([
'feed_forward/intermediate/bias:0',
'feed_forward/intermediate/kernel:0', 'feed_forward/output/bias:0',
'feed_forward/output/kernel:0'
], var_names)
def test_feed_forward_manual(self):
config = small_config()
layer = moe.FeedForward(
d_ff=config['d_ff'],
output_dropout=config['output_dropout'],
activation=tf.keras.activations.relu,
kernel_initializer=tf.keras.initializers.get('ones'),
bias_initializer=tf.keras.initializers.get('ones'))
inputs = make_input_ones(1, 2, 3)
outputs = layer(inputs, training=False)
manual_outputs = tf.constant([[[129.0, 129.0, 129.0], [129.0, 129.0,
129.0]]])
self.assertAllClose(manual_outputs, outputs, atol=1e-7)
def test_feed_forward_experts_shape_and_vars(self):
config = small_config()
layer = moe.FeedForwardExperts(
num_experts=config['num_experts'],
d_ff=config['expert_d_ff'],
output_dropout=config['expert_dropout_rate'])
inputs = make_experts_input_ones()
outputs = layer(inputs)
self.assertAllEqual(tf.shape(inputs), tf.shape(outputs))
var_names = sorted([v.name for v in layer.trainable_variables])
self.assertAllEqual([
'experts/intermediate/bias:0', 'experts/intermediate/kernel:0',
'experts/output/bias:0', 'experts/output/kernel:0'
], var_names)
def test_feed_forward_experts_manual(self):
config = small_config()
layer = moe.FeedForwardExperts(
num_experts=1,
d_ff=config['expert_d_ff'],
output_dropout=config['expert_dropout_rate'],
activation=tf.keras.activations.relu,
kernel_initializer=tf.keras.initializers.get('ones'),
bias_initializer=tf.keras.initializers.get('ones'))
inputs = make_experts_input_ones(1, 1, 2, 3)
outputs = layer(inputs, training=False)
manual_outputs = tf.constant([[[[133.0, 133.0, 133.0],
[133.0, 133.0, 133.0]]]])
self.assertAllClose(manual_outputs, outputs, atol=1e-7)
def test_moe_layer(self):
config = small_config()
experts = moe.FeedForwardExperts(
num_experts=config['num_experts'],
d_ff=config['expert_d_ff'],
output_dropout=config['expert_dropout_rate'])
router = moe.ExpertsChooseMaskedRouter(
config['num_experts'], jitter_noise=config['jitter_noise'])
moe_layer = moe.MoeLayer(
experts,
router,
train_capacity_factor=config['train_capacity_factor'],
eval_capacity_factor=config['eval_capacity_factor'],
examples_per_group=config['examples_per_group'])
inputs = make_input_ones()
outputs = moe_layer(inputs, training=True)
self.assertAllEqual(tf.shape(inputs), tf.shape(outputs))
var_names = sorted([v.name for v in moe_layer.trainable_variables])
self.assertAllEqual([
'moe/experts/intermediate/bias:0', 'moe/experts/intermediate/kernel:0',
'moe/experts/output/bias:0', 'moe/experts/output/kernel:0',
'moe/router/router_weights/bias:0', 'moe/router/router_weights/kernel:0'
], var_names)
self.assertLen(moe_layer.losses, 1)
metrics = [metric.name for metric in moe_layer.metrics]
self.assertSetEqual(
{
'router_z_loss', 'unscaled_router_z_loss', 'load_balancing_loss',
'fraction_tokens_left_behind', 'router_confidence', 'expert_usage'
}, set(metrics))
def test_moe_layer_with_backbone(self):
config = small_config()
experts = moe.FeedForwardExperts(
num_experts=config['num_experts'],
d_ff=config['expert_d_ff'],
output_dropout=config['expert_dropout_rate'])
router = moe.ExpertsChooseMaskedRouter(
config['num_experts'], jitter_noise=config['jitter_noise'])
moe_layer = moe.MoeLayer(
experts,
router,
train_capacity_factor=config['train_capacity_factor'],
eval_capacity_factor=config['eval_capacity_factor'],
examples_per_group=config['examples_per_group'])
layer = moe.MoeLayerWithBackbone(moe_layer, config['backbone_d_ff'])
inputs = make_input_ones()
outputs = layer(inputs)
self.assertAllEqual(tf.shape(inputs), tf.shape(outputs))
if __name__ == '__main__':
tf.test.main()
| 9,404 | 38.351464 | 80 | py |
models | models-master/official/nlp/modeling/layers/cls_head.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Classification head layer which is common used with sequence encoders."""
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling.layers import gaussian_process
from official.nlp.modeling.layers import spectral_normalization
class ClassificationHead(tf.keras.layers.Layer):
"""Pooling head for sentence-level classification tasks."""
def __init__(self,
inner_dim,
num_classes,
cls_token_idx=0,
activation="tanh",
dropout_rate=0.0,
initializer="glorot_uniform",
**kwargs):
"""Initializes the `ClassificationHead`.
Args:
inner_dim: The dimensionality of inner projection layer. If 0 or `None`
then only the output projection layer is created.
num_classes: Number of output classes.
cls_token_idx: The index inside the sequence to pool.
activation: Dense layer activation.
dropout_rate: Dropout probability.
initializer: Initializer for dense layer kernels.
**kwargs: Keyword arguments.
"""
super().__init__(**kwargs)
self.dropout_rate = dropout_rate
self.inner_dim = inner_dim
self.num_classes = num_classes
self.activation = tf_utils.get_activation(activation)
self.initializer = tf.keras.initializers.get(initializer)
self.cls_token_idx = cls_token_idx
if self.inner_dim:
self.dense = tf.keras.layers.Dense(
units=self.inner_dim,
activation=self.activation,
kernel_initializer=tf_utils.clone_initializer(self.initializer),
name="pooler_dense")
self.dropout = tf.keras.layers.Dropout(rate=self.dropout_rate)
self.out_proj = tf.keras.layers.Dense(
units=num_classes,
kernel_initializer=tf_utils.clone_initializer(self.initializer),
name="logits")
def call(self, features: tf.Tensor, only_project: bool = False):
"""Implements call().
Args:
features: a rank-3 Tensor when self.inner_dim is specified, otherwise
it is a rank-2 Tensor.
only_project: a boolean. If True, we return the intermediate Tensor
before projecting to class logits.
Returns:
a Tensor, if only_project is True, shape= [batch size, hidden size].
If only_project is False, shape= [batch size, num classes].
"""
if not self.inner_dim:
x = features
else:
x = features[:, self.cls_token_idx, :] # take <CLS> token.
x = self.dense(x)
if only_project:
return x
x = self.dropout(x)
x = self.out_proj(x)
return x
def get_config(self):
config = {
"cls_token_idx": self.cls_token_idx,
"dropout_rate": self.dropout_rate,
"num_classes": self.num_classes,
"inner_dim": self.inner_dim,
"activation": tf.keras.activations.serialize(self.activation),
"initializer": tf.keras.initializers.serialize(self.initializer),
}
config.update(super(ClassificationHead, self).get_config())
return config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def checkpoint_items(self):
return {self.dense.name: self.dense}
class MultiClsHeads(tf.keras.layers.Layer):
"""Pooling heads sharing the same pooling stem."""
def __init__(self,
inner_dim,
cls_list,
cls_token_idx=0,
activation="tanh",
dropout_rate=0.0,
initializer="glorot_uniform",
**kwargs):
"""Initializes the `MultiClsHeads`.
Args:
inner_dim: The dimensionality of inner projection layer. If 0 or `None`
then only the output projection layer is created.
cls_list: a list of pairs of (classification problem name and the numbers
of classes.
cls_token_idx: The index inside the sequence to pool.
activation: Dense layer activation.
dropout_rate: Dropout probability.
initializer: Initializer for dense layer kernels.
**kwargs: Keyword arguments.
"""
super().__init__(**kwargs)
self.dropout_rate = dropout_rate
self.inner_dim = inner_dim
self.cls_list = cls_list
self.activation = tf_utils.get_activation(activation)
self.initializer = tf.keras.initializers.get(initializer)
self.cls_token_idx = cls_token_idx
if self.inner_dim:
self.dense = tf.keras.layers.Dense(
units=inner_dim,
activation=self.activation,
kernel_initializer=tf_utils.clone_initializer(self.initializer),
name="pooler_dense")
self.dropout = tf.keras.layers.Dropout(rate=self.dropout_rate)
self.out_projs = []
for name, num_classes in cls_list:
self.out_projs.append(
tf.keras.layers.Dense(
units=num_classes,
kernel_initializer=tf_utils.clone_initializer(self.initializer),
name=name))
def call(self, features: tf.Tensor, only_project: bool = False):
"""Implements call().
Args:
features: a rank-3 Tensor when self.inner_dim is specified, otherwise
it is a rank-2 Tensor.
only_project: a boolean. If True, we return the intermediate Tensor
before projecting to class logits.
Returns:
If only_project is True, a Tensor with shape= [batch size, hidden size].
If only_project is False, a dictionary of Tensors.
"""
if not self.inner_dim:
x = features
else:
x = features[:, self.cls_token_idx, :] # take <CLS> token.
x = self.dense(x)
if only_project:
return x
x = self.dropout(x)
outputs = {}
for proj_layer in self.out_projs:
outputs[proj_layer.name] = proj_layer(x)
return outputs
def get_config(self):
config = {
"dropout_rate": self.dropout_rate,
"cls_token_idx": self.cls_token_idx,
"cls_list": self.cls_list,
"inner_dim": self.inner_dim,
"activation": tf.keras.activations.serialize(self.activation),
"initializer": tf.keras.initializers.serialize(self.initializer),
}
config.update(super().get_config())
return config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def checkpoint_items(self):
items = {self.dense.name: self.dense}
items.update({v.name: v for v in self.out_projs})
return items
class GaussianProcessClassificationHead(ClassificationHead):
"""Gaussian process-based pooling head for sentence classification.
This class implements a classifier head for BERT encoder that is based on the
spectral-normalized neural Gaussian process (SNGP) [1]. SNGP is a simple
method to improve a neural network's uncertainty quantification ability
without sacrificing accuracy or lantency. It applies spectral normalization to
the hidden pooler layer, and then replaces the dense output layer with a
Gaussian process.
[1]: Jeremiah Liu et al. Simple and Principled Uncertainty Estimation with
Deterministic Deep Learning via Distance Awareness.
In _Neural Information Processing Systems_, 2020.
https://arxiv.org/abs/2006.10108
"""
def __init__(self,
inner_dim,
num_classes,
cls_token_idx=0,
activation="tanh",
dropout_rate=0.0,
initializer="glorot_uniform",
use_spec_norm=True,
use_gp_layer=True,
temperature=None,
**kwargs):
"""Initializes the `GaussianProcessClassificationHead`.
Args:
inner_dim: The dimensionality of inner projection layer. If 0 or `None`
then only the output projection layer is created.
num_classes: Number of output classes.
cls_token_idx: The index inside the sequence to pool.
activation: Dense layer activation.
dropout_rate: Dropout probability.
initializer: Initializer for dense layer kernels.
use_spec_norm: Whether to apply spectral normalization to pooler layer.
use_gp_layer: Whether to use Gaussian process as the output layer.
temperature: The temperature parameter to be used for mean-field
approximation during inference. If None then no mean-field adjustment is
applied.
**kwargs: Additional keyword arguments.
"""
# Collects spectral normalization and Gaussian process args from kwargs.
self.use_spec_norm = use_spec_norm
self.use_gp_layer = use_gp_layer
self.spec_norm_kwargs = extract_spec_norm_kwargs(kwargs)
self.gp_layer_kwargs = extract_gp_layer_kwargs(kwargs)
self.temperature = temperature
super().__init__(
inner_dim=inner_dim,
num_classes=num_classes,
cls_token_idx=cls_token_idx,
activation=activation,
dropout_rate=dropout_rate,
initializer=initializer,
**kwargs)
# Applies spectral normalization to the dense pooler layer.
if self.use_spec_norm and hasattr(self, "dense"):
self.dense = spectral_normalization.SpectralNormalization(
self.dense, inhere_layer_name=True, **self.spec_norm_kwargs)
# Replace Dense output layer with the Gaussian process layer.
if use_gp_layer:
self.out_proj = gaussian_process.RandomFeatureGaussianProcess(
self.num_classes,
kernel_initializer=tf_utils.clone_initializer(self.initializer),
name="logits",
**self.gp_layer_kwargs)
def call(self, features, training=False, return_covmat=False):
"""Returns model output.
Dring training, the model returns raw logits. During evaluation, the model
returns uncertainty adjusted logits, and (optionally) the covariance matrix.
Arguments:
features: A tensor of input features, shape (batch_size, feature_dim).
training: Whether the model is in training mode.
return_covmat: Whether the model should also return covariance matrix if
`use_gp_layer=True`. During training, it is recommended to set
`return_covmat=False` to be compatible with the standard Keras pipelines
(e.g., `model.fit()`).
Returns:
logits: Uncertainty-adjusted predictive logits, shape
(batch_size, num_classes).
covmat: (Optional) Covariance matrix, shape (batch_size, batch_size).
Returned only when return_covmat=True.
"""
logits = super().call(features)
# Extracts logits and covariance matrix from model output.
if self.use_gp_layer:
logits, covmat = logits
else:
covmat = None
# Computes the uncertainty-adjusted logits during evaluation.
if not training:
logits = gaussian_process.mean_field_logits(
logits, covmat, mean_field_factor=self.temperature)
if return_covmat and covmat is not None:
return logits, covmat
return logits
def reset_covariance_matrix(self):
"""Resets covariance matrix of the Gaussian process layer."""
if hasattr(self.out_proj, "reset_covariance_matrix"):
self.out_proj.reset_covariance_matrix()
def get_config(self):
config = dict(
use_spec_norm=self.use_spec_norm, use_gp_layer=self.use_gp_layer)
config.update(self.spec_norm_kwargs)
config.update(self.gp_layer_kwargs)
config["temperature"] = self.temperature
config.update(super(GaussianProcessClassificationHead, self).get_config())
return config
def extract_gp_layer_kwargs(kwargs):
"""Extracts Gaussian process layer configs from a given kwarg."""
return dict(
num_inducing=kwargs.pop("num_inducing", 1024),
normalize_input=kwargs.pop("normalize_input", True),
gp_cov_momentum=kwargs.pop("gp_cov_momentum", 0.999),
gp_cov_ridge_penalty=kwargs.pop("gp_cov_ridge_penalty", 1.),
scale_random_features=kwargs.pop("scale_random_features", False),
l2_regularization=kwargs.pop("l2_regularization", 1e-6),
gp_cov_likelihood=kwargs.pop("gp_cov_likelihood", "gaussian"),
return_gp_cov=kwargs.pop("return_gp_cov", True),
return_random_features=kwargs.pop("return_random_features", False),
use_custom_random_features=kwargs.pop("use_custom_random_features", True),
custom_random_features_initializer=kwargs.pop(
"custom_random_features_initializer", "random_normal"),
custom_random_features_activation=kwargs.pop(
"custom_random_features_activation", None))
def extract_spec_norm_kwargs(kwargs):
"""Extracts spectral normalization configs from a given kwarg."""
return dict(
iteration=kwargs.pop("iteration", 1),
norm_multiplier=kwargs.pop("norm_multiplier", .99))
class PerQueryDenseHead(tf.keras.layers.Layer):
"""Pooling head used for EncT5 style models.
This module projects each query to use a different projection.
For a input shape= [bs, num_queries, hidden_size], it projects each query to
(features). Ending up with shape= [bs, num_queries, features].
For example, for classification with a few classes, one may use num_queries
as 1 and features as number of classes. For multilabel classification, one
may use num_queries as number of classes and features as 2. So each query
represents a binary classification of one label.
"""
def __init__(self,
num_queries: int,
features: int,
use_bias: bool = False,
kernel_initializer: str = "glorot_uniform",
**kwargs):
"""Initializes the `PerQueryDenseHead`.
Args:
num_queries: number of queries (the learnable embeddings in the input
sequences) from the decoder.
features: int with numbers of output features. Each query with be
projected to this number with a different projection.
use_bias: whether to add a bias to the output.
kernel_initializer: Initializer for dense layer kernels.
**kwargs: Keyword arguments.
"""
super().__init__(**kwargs)
self.num_queries = num_queries
self.features = features
self.use_bias = use_bias
self.kernel_initializer = tf.keras.initializers.get(kernel_initializer)
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
# Hidden size.
last_dim = tf.compat.dimension_value(input_shape[-1])
self.hidden_size = last_dim
self.kernel = self.add_weight(
"kernel",
shape=[self.num_queries, last_dim, self.features],
initializer=self.kernel_initializer,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_weight(
"bias",
shape=[
self.num_queries,
self.features,
],
dtype=self.dtype,
trainable=True)
else:
self.bias = None
def call(self, inputs: tf.Tensor) -> tf.Tensor:
"""Implements call().
Args:
inputs: a rank-3 Tensor of shape= [bs, num_queries, hidden_size].
Returns:
A Tensor, shape= [batch size, num_queries, features].
"""
outputs = tf.einsum("bqh,qhf->bqf", inputs, self.kernel)
if self.use_bias:
outputs += self.bias
return outputs
def get_config(self):
config = {
"num_queries":
self.num_queries,
"features":
self.features,
"kernel_initializer":
tf.keras.activations.serialize(self.kernel_initializer),
}
config.update(super(PerQueryDenseHead, self).get_config())
return config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 16,198 | 34.138829 | 80 | py |
models | models-master/official/nlp/modeling/layers/gated_feedforward.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based gated feedforward layer."""
# pylint: disable=g-classes-have-attributes
import gin
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling.layers import util
@tf.keras.utils.register_keras_serializable(package="Text")
@gin.configurable
class GatedFeedforward(tf.keras.layers.Layer):
"""Gated linear feedforward layer.
This layer follows the paper "GLU Variants Improve Transformer"
(https://arxiv.org/abs/2002.05202). In additional, it allows to stack
multiple feedforward blocks and specify the position of dropout layer.
Args:
intermediate_size: Size of the intermediate layer.
intermediate_activation: Activation for the intermediate layer.
dropout: Dropout probability for the output dropout.
use_gate: Whether to use gated linear units. If True, assuming `GELU` as the
activation and omitting bias, will apply
`GEGLU(x, W, V, W_2) = (GEGLU(xW) * xV)W2`; if False, will follow
"Attention Is All You Need" (https://arxiv.org/abs/1706.03762) paper and
apply `FFN(x, W, W_2) = GELU(xW_1)W_2.`
num_blocks: The number of feedforward blocks to stack. Each block contains a
(gated) linear layer and a fully connected layer followed by dropout,
layer norm and residual.
dropout_position: Where to apply the dropout, the value can be either
`before_residual` or `after_residual`. If `before_residual`, will apply
`layer_output = layer_norm(dropout(layer_output) + layer_input)`; if
`after residual`, will apply
`layer_output = dropout(layer_norm(layer_output + layer_input))`.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
"""
def __init__(self,
inner_dim=768,
inner_activation=tf_utils.get_activation("gelu"),
dropout=0.0,
use_gate=True,
apply_output_layer_norm=True,
num_blocks=1,
dropout_position="before_residual",
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
inner_dim = kwargs.pop("intermediate_size", inner_dim)
inner_activation = kwargs.pop("intermediate_activation", inner_activation)
util.filter_kwargs(kwargs)
super().__init__(**kwargs)
self._inner_dim = inner_dim
self._inner_activation = inner_activation
self._dropout = dropout
self._use_gate = use_gate
self._num_blocks = num_blocks
self._apply_output_layer_norm = apply_output_layer_norm
self._dropout_position = dropout_position
if self._dropout_position not in ("before_residual", "after_residual"):
raise ValueError(
"The dropout_position should be either `before_residual` or"
"`after_residual`, got: %s" % self._dropout_position)
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
def build(self, input_shape):
hidden_size = input_shape.as_list()[-1]
common_kwargs = dict(
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
self._intermediate_dense = []
self._inner_activation_layers = []
self._gate_dense = []
self._output_dense = []
self._output_dropout = []
self._output_layer_norm = []
activation_policy = tf.keras.mixed_precision.global_policy()
if activation_policy.name == "mixed_bfloat16":
# bfloat16 causes BERT with the LAMB optimizer to not converge
# as well, so we use float32.
# TODO(b/154538392): Investigate this.
activation_policy = tf.float32
for i in range(self._num_blocks):
self._intermediate_dense.append(
tf.keras.layers.EinsumDense(
"abc,cd->abd",
output_shape=(None, self._inner_dim),
bias_axes="d",
name="intermediate_%d" % i,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(
self._bias_initializer),
**common_kwargs))
self._inner_activation_layers.append(
tf.keras.layers.Activation(
self._inner_activation, dtype=activation_policy))
if self._use_gate:
self._gate_dense.append(
tf.keras.layers.EinsumDense(
"abc,cd->abd",
output_shape=(None, self._inner_dim),
bias_axes="d",
name="gate_%d" % i,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(
self._bias_initializer),
**common_kwargs))
self._output_dense.append(
tf.keras.layers.EinsumDense(
"abc,cd->abd",
output_shape=(None, hidden_size),
bias_axes="d",
name="output_%d" % i,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(
self._bias_initializer),
**common_kwargs))
self._output_dropout.append(tf.keras.layers.Dropout(rate=self._dropout))
# Use float32 in layernorm for numeric stability.
if self._apply_output_layer_norm:
self._output_layer_norm.append(
tf.keras.layers.LayerNormalization(
name="output_layer_norm_%d" % i,
axis=-1,
epsilon=1e-12,
dtype=tf.float32))
def get_config(self):
config = {
"inner_dim":
self._inner_dim,
"inner_activation":
self._inner_activation,
"dropout":
self._dropout,
"use_gate":
self._use_gate,
"num_blocks":
self._num_blocks,
"dropout_position":
self._dropout_position,
"kernel_initializer":
tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer":
tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer":
tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer":
tf.keras.regularizers.serialize(self._bias_regularizer),
"activity_regularizer":
tf.keras.regularizers.serialize(self._activity_regularizer),
"kernel_constraint":
tf.keras.constraints.serialize(self._kernel_constraint),
"bias_constraint":
tf.keras.constraints.serialize(self._bias_constraint)
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
layer_output = inputs
for i in range(self._num_blocks):
layer_input = layer_output
intermediate_output = self._intermediate_dense[i](layer_input)
intermediate_output = self._inner_activation_layers[i](
intermediate_output)
if self._use_gate:
gated_linear = self._gate_dense[i](layer_input)
intermediate_output = intermediate_output * gated_linear
layer_output = self._output_dense[i](intermediate_output)
if self._dropout_position == "before_residual":
layer_output = self._output_dropout[i](layer_output)
# During mixed precision training, `layer_input` may be from layer norm.
# If so, it is always fp32. Cast layer_output to fp32 for the subsequent
# add.
if layer_input.dtype == tf.float32:
layer_output = tf.cast(layer_output, tf.float32)
if self._apply_output_layer_norm:
layer_output = self._output_layer_norm[i](layer_output + layer_input)
if self._dropout_position == "after_residual":
layer_output = self._output_dropout[i](layer_output)
return layer_output
| 9,681 | 41.651982 | 80 | py |
models | models-master/official/nlp/modeling/layers/gaussian_process.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definitions for random feature Gaussian process layer."""
import math
import tensorflow as tf
_SUPPORTED_LIKELIHOOD = ('binary_logistic', 'poisson', 'gaussian')
class RandomFeatureGaussianProcess(tf.keras.layers.Layer):
"""Gaussian process layer with random feature approximation [1].
During training, the model updates the maximum a posteriori (MAP) logits
estimates and posterior precision matrix using minibatch statistics. During
inference, the model divides the MAP logit estimates by the predictive
standard deviation, which is equivalent to approximating the posterior mean
of the predictive probability via the mean-field approximation.
User can specify different types of random features by setting
`use_custom_random_features=True`, and change the initializer and activations
of the custom random features. For example:
MLP Kernel: initializer='random_normal', activation=tf.nn.relu
RBF Kernel: initializer='random_normal', activation=tf.math.cos
A linear kernel can also be specified by setting gp_kernel_type='linear' and
`use_custom_random_features=True`.
[1]: Ali Rahimi and Benjamin Recht. Random Features for Large-Scale Kernel
Machines. In _Neural Information Processing Systems_, 2007.
https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf
Attributes:
units: (int) The dimensionality of layer.
num_inducing: (int) The number of random features for the approximation.
is_training: (tf.bool) Whether the layer is set in training mode. If so the
layer updates the Gaussian process' variance estimate using statistics
computed from the incoming minibatches.
"""
def __init__(self,
units,
num_inducing=1024,
gp_kernel_type='gaussian',
gp_kernel_scale=1.,
gp_output_bias=0.,
normalize_input=False,
gp_kernel_scale_trainable=False,
gp_output_bias_trainable=False,
gp_cov_momentum=0.999,
gp_cov_ridge_penalty=1.,
scale_random_features=True,
use_custom_random_features=True,
custom_random_features_initializer=None,
custom_random_features_activation=None,
l2_regularization=1e-6,
gp_cov_likelihood='gaussian',
return_gp_cov=True,
return_random_features=False,
dtype=None,
name='random_feature_gaussian_process',
**gp_output_kwargs):
"""Initializes a random-feature Gaussian process layer instance.
Args:
units: (int) Number of output units.
num_inducing: (int) Number of random Fourier features used for
approximating the Gaussian process.
gp_kernel_type: (string) The type of kernel function to use for Gaussian
process. Currently default to 'gaussian' which is the Gaussian RBF
kernel.
gp_kernel_scale: (float) The length-scale parameter of the a
shift-invariant kernel function, i.e., for RBF kernel:
exp(-|x1 - x2|**2 / gp_kernel_scale).
gp_output_bias: (float) Scalar initial value for the bias vector.
normalize_input: (bool) Whether to normalize the input to Gaussian
process.
gp_kernel_scale_trainable: (bool) Whether the length scale variable is
trainable.
gp_output_bias_trainable: (bool) Whether the bias is trainable.
gp_cov_momentum: (float) A discount factor used to compute the moving
average for posterior covariance matrix.
gp_cov_ridge_penalty: (float) Initial Ridge penalty to posterior
covariance matrix.
scale_random_features: (bool) Whether to scale the random feature
by sqrt(2. / num_inducing).
use_custom_random_features: (bool) Whether to use custom random
features implemented using tf.keras.layers.Dense.
custom_random_features_initializer: (str or callable) Initializer for
the random features. Default to random normal which approximates a RBF
kernel function if activation function is cos.
custom_random_features_activation: (callable) Activation function for the
random feature layer. Default to cosine which approximates a RBF
kernel function.
l2_regularization: (float) The strength of l2 regularization on the output
weights.
gp_cov_likelihood: (string) Likelihood to use for computing Laplace
approximation for covariance matrix. Default to `gaussian`.
return_gp_cov: (bool) Whether to also return GP covariance matrix.
If False then no covariance learning is performed.
return_random_features: (bool) Whether to also return random features.
dtype: (tf.DType) Input data type.
name: (string) Layer name.
**gp_output_kwargs: Additional keyword arguments to dense output layer.
"""
super().__init__(name=name, dtype=dtype)
self.units = units
self.num_inducing = num_inducing
self.normalize_input = normalize_input
self.gp_input_scale = 1. / tf.sqrt(gp_kernel_scale)
self.gp_feature_scale = tf.sqrt(2. / float(num_inducing))
self.scale_random_features = scale_random_features
self.return_random_features = return_random_features
self.return_gp_cov = return_gp_cov
self.gp_kernel_type = gp_kernel_type
self.gp_kernel_scale = gp_kernel_scale
self.gp_output_bias = gp_output_bias
self.gp_kernel_scale_trainable = gp_kernel_scale_trainable
self.gp_output_bias_trainable = gp_output_bias_trainable
self.use_custom_random_features = use_custom_random_features
self.custom_random_features_initializer = custom_random_features_initializer
self.custom_random_features_activation = custom_random_features_activation
self.l2_regularization = l2_regularization
self.gp_output_kwargs = gp_output_kwargs
self.gp_cov_momentum = gp_cov_momentum
self.gp_cov_ridge_penalty = gp_cov_ridge_penalty
self.gp_cov_likelihood = gp_cov_likelihood
if self.use_custom_random_features:
# Default to Gaussian RBF kernel.
self.random_features_bias_initializer = tf.random_uniform_initializer(
minval=0., maxval=2. * math.pi)
if self.custom_random_features_initializer is None:
self.custom_random_features_initializer = (
tf.keras.initializers.RandomNormal(stddev=1.))
if self.custom_random_features_activation is None:
self.custom_random_features_activation = tf.math.cos
def build(self, input_shape):
# Defines model layers.
if self.normalize_input:
self._input_norm_layer = tf.keras.layers.LayerNormalization(
name='gp_input_normalization')
self._input_norm_layer.build(input_shape)
input_shape = self._input_norm_layer.compute_output_shape(input_shape)
self._random_feature = self._make_random_feature_layer(
name='gp_random_feature')
self._random_feature.build(input_shape)
input_shape = self._random_feature.compute_output_shape(input_shape)
if self.return_gp_cov:
self._gp_cov_layer = LaplaceRandomFeatureCovariance(
momentum=self.gp_cov_momentum,
ridge_penalty=self.gp_cov_ridge_penalty,
likelihood=self.gp_cov_likelihood,
dtype=self.dtype,
name='gp_covariance')
self._gp_cov_layer.build(input_shape)
self._gp_output_layer = tf.keras.layers.Dense(
units=self.units,
use_bias=False,
kernel_regularizer=tf.keras.regularizers.l2(self.l2_regularization),
dtype=self.dtype,
name='gp_output_weights',
**self.gp_output_kwargs)
self._gp_output_layer.build(input_shape)
self._gp_output_bias = tf.Variable(
initial_value=[self.gp_output_bias] * self.units,
dtype=self.dtype,
trainable=self.gp_output_bias_trainable,
name='gp_output_bias')
self.built = True
def _make_random_feature_layer(self, name):
"""Defines random feature layer depending on kernel type."""
if not self.use_custom_random_features:
# Use default RandomFourierFeatures layer from tf.keras.
return tf.keras.layers.experimental.RandomFourierFeatures(
output_dim=self.num_inducing,
kernel_initializer=self.gp_kernel_type,
scale=self.gp_kernel_scale,
trainable=self.gp_kernel_scale_trainable,
dtype=self.dtype,
name=name)
if self.gp_kernel_type.lower() == 'linear':
custom_random_feature_layer = tf.keras.layers.Lambda(
lambda x: x, name=name)
else:
# Use user-supplied configurations.
custom_random_feature_layer = tf.keras.layers.Dense(
units=self.num_inducing,
use_bias=True,
activation=self.custom_random_features_activation,
kernel_initializer=self.custom_random_features_initializer,
bias_initializer=self.random_features_bias_initializer,
trainable=False,
name=name)
return custom_random_feature_layer
def reset_covariance_matrix(self):
"""Resets covariance matrix of the GP layer.
This function is useful for reseting the model's covariance matrix at the
beginning of a new epoch.
"""
self._gp_cov_layer.reset_precision_matrix()
def call(self, inputs, global_step=None, training=None):
# Computes random features.
gp_inputs = inputs
if self.normalize_input:
gp_inputs = self._input_norm_layer(gp_inputs)
elif self.use_custom_random_features:
# Supports lengthscale for custom random feature layer by directly
# rescaling the input.
gp_input_scale = tf.cast(self.gp_input_scale, inputs.dtype)
gp_inputs = gp_inputs * gp_input_scale
gp_feature = self._random_feature(gp_inputs)
if self.scale_random_features:
# Scale random feature by 2. / sqrt(num_inducing) following [1].
# When using GP layer as the output layer of a nerual network,
# it is recommended to turn this scaling off to prevent it from changing
# the learning rate to the hidden layers.
gp_feature_scale = tf.cast(self.gp_feature_scale, inputs.dtype)
gp_feature = gp_feature * gp_feature_scale
# Computes posterior center (i.e., MAP estimate) and variance.
gp_output = self._gp_output_layer(gp_feature) + self._gp_output_bias
if self.return_gp_cov:
gp_covmat = self._gp_cov_layer(gp_feature, gp_output, training)
# Assembles model output.
model_output = [gp_output,]
if self.return_gp_cov:
model_output.append(gp_covmat)
if self.return_random_features:
model_output.append(gp_feature)
return model_output
class LaplaceRandomFeatureCovariance(tf.keras.layers.Layer):
"""Computes the Gaussian Process covariance using Laplace method.
At training time, this layer updates the Gaussian process posterior using
model features in minibatches.
Attributes:
momentum: (float) A discount factor used to compute the moving average for
posterior precision matrix. Analogous to the momentum factor in batch
normalization. If -1 then update covariance matrix using a naive sum
without momentum, which is desirable if the goal is to compute the exact
covariance matrix by passing through data once (say in the final epoch).
ridge_penalty: (float) Initial Ridge penalty to weight covariance matrix.
This value is used to stablize the eigenvalues of weight covariance
estimate so that the matrix inverse can be computed for Cov = inv(t(X) * X
+ s * I). The ridge factor s cannot be too large since otherwise it will
dominate the t(X) * X term and make covariance estimate not meaningful.
likelihood: (str) The likelihood to use for computing Laplace approximation
for the covariance matrix. Can be one of ('binary_logistic', 'poisson',
'gaussian').
"""
def __init__(self,
momentum=0.999,
ridge_penalty=1.,
likelihood='gaussian',
dtype=None,
name='laplace_covariance'):
if likelihood not in _SUPPORTED_LIKELIHOOD:
raise ValueError(
f'"likelihood" must be one of {_SUPPORTED_LIKELIHOOD}, got {likelihood}.'
)
self.ridge_penalty = ridge_penalty
self.momentum = momentum
self.likelihood = likelihood
super(LaplaceRandomFeatureCovariance, self).__init__(dtype=dtype, name=name)
def compute_output_shape(self, input_shape):
gp_feature_dim = input_shape[-1]
return tf.TensorShape([gp_feature_dim, gp_feature_dim])
def build(self, input_shape):
gp_feature_dim = input_shape[-1]
# Convert gp_feature_dim to int value for TF1 compatibility.
if isinstance(gp_feature_dim, tf.compat.v1.Dimension):
gp_feature_dim = gp_feature_dim.value
# Posterior precision matrix for the GP's random feature coefficients.
self.initial_precision_matrix = (
self.ridge_penalty * tf.eye(gp_feature_dim, dtype=self.dtype))
self.precision_matrix = (
self.add_weight(
name='gp_precision_matrix',
shape=(gp_feature_dim, gp_feature_dim),
dtype=self.dtype,
initializer=tf.keras.initializers.Identity(self.ridge_penalty),
trainable=False,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA))
self.built = True
def make_precision_matrix_update_op(self,
gp_feature,
logits,
precision_matrix):
"""Defines update op for the precision matrix of feature weights."""
if self.likelihood != 'gaussian':
if logits is None:
raise ValueError(
f'"logits" cannot be None when likelihood={self.likelihood}')
if logits.shape[-1] != 1:
raise ValueError(
f'likelihood={self.likelihood} only support univariate logits.'
f'Got logits dimension: {logits.shape[-1]}')
batch_size = tf.shape(gp_feature)[0]
batch_size = tf.cast(batch_size, dtype=gp_feature.dtype)
# Computes batch-specific normalized precision matrix.
if self.likelihood == 'binary_logistic':
prob = tf.sigmoid(logits)
prob_multiplier = prob * (1. - prob)
elif self.likelihood == 'poisson':
prob_multiplier = tf.exp(logits)
else:
prob_multiplier = 1.
gp_feature_adjusted = tf.sqrt(prob_multiplier) * gp_feature
precision_matrix_minibatch = tf.matmul(
gp_feature_adjusted, gp_feature_adjusted, transpose_a=True)
# Updates the population-wise precision matrix.
if self.momentum > 0:
# Use moving-average updates to accumulate batch-specific precision
# matrices.
precision_matrix_minibatch = precision_matrix_minibatch / batch_size
precision_matrix_new = (
self.momentum * precision_matrix +
(1. - self.momentum) * precision_matrix_minibatch)
else:
# Compute exact population-wise covariance without momentum.
# If use this option, make sure to pass through data only once.
precision_matrix_new = precision_matrix + precision_matrix_minibatch
# Returns the update op.
return precision_matrix.assign(precision_matrix_new)
def reset_precision_matrix(self):
"""Resets precision matrix to its initial value.
This function is useful for reseting the model's covariance matrix at the
beginning of a new epoch.
"""
precision_matrix_reset_op = self.precision_matrix.assign(
self.initial_precision_matrix)
self.add_update(precision_matrix_reset_op)
def compute_predictive_covariance(self, gp_feature):
"""Computes posterior predictive variance.
Approximates the Gaussian process posterior using random features.
Given training random feature Phi_tr (num_train, num_hidden) and testing
random feature Phi_ts (batch_size, num_hidden). The predictive covariance
matrix is computed as (assuming Gaussian likelihood):
s * Phi_ts @ inv(t(Phi_tr) * Phi_tr + s * I) @ t(Phi_ts),
where s is the ridge factor to be used for stablizing the inverse, and I is
the identity matrix with shape (num_hidden, num_hidden).
Args:
gp_feature: (tf.Tensor) The random feature of testing data to be used for
computing the covariance matrix. Shape (batch_size, gp_hidden_size).
Returns:
(tf.Tensor) Predictive covariance matrix, shape (batch_size, batch_size).
"""
# Computes the covariance matrix of the feature coefficient.
feature_cov_matrix = tf.linalg.inv(self.precision_matrix)
# Computes the covariance matrix of the gp prediction.
cov_feature_product = tf.matmul(
feature_cov_matrix, gp_feature, transpose_b=True) * self.ridge_penalty
gp_cov_matrix = tf.matmul(gp_feature, cov_feature_product)
return gp_cov_matrix
def _get_training_value(self, training=None):
if training is None:
training = tf.keras.backend.learning_phase()
if isinstance(training, int):
training = bool(training)
return training
def call(self, inputs, logits=None, training=None):
"""Minibatch updates the GP's posterior precision matrix estimate.
Args:
inputs: (tf.Tensor) GP random features, shape (batch_size,
gp_hidden_size).
logits: (tf.Tensor) Pre-activation output from the model. Needed
for Laplace approximation under a non-Gaussian likelihood.
training: (tf.bool) whether or not the layer is in training mode. If in
training mode, the gp_weight covariance is updated using gp_feature.
Returns:
gp_stddev (tf.Tensor): GP posterior predictive variance,
shape (batch_size, batch_size).
"""
batch_size = tf.shape(inputs)[0]
training = self._get_training_value(training)
if training:
# Define and register the update op for feature precision matrix.
precision_matrix_update_op = self.make_precision_matrix_update_op(
gp_feature=inputs,
logits=logits,
precision_matrix=self.precision_matrix)
self.add_update(precision_matrix_update_op)
# Return null estimate during training.
return tf.eye(batch_size, dtype=self.dtype)
else:
# Return covariance estimate during inference.
return self.compute_predictive_covariance(gp_feature=inputs)
def mean_field_logits(logits, covariance_matrix=None, mean_field_factor=1.):
"""Adjust the model logits so its softmax approximates the posterior mean [1].
[1]: Zhiyun Lu, Eugene Ie, Fei Sha. Uncertainty Estimation with Infinitesimal
Jackknife. _arXiv preprint arXiv:2006.07584_, 2020.
https://arxiv.org/abs/2006.07584
Arguments:
logits: A float tensor of shape (batch_size, num_classes).
covariance_matrix: The covariance matrix of shape (batch_size, batch_size).
If None then it assumes the covariance_matrix is an identity matrix.
mean_field_factor: The scale factor for mean-field approximation, used to
adjust the influence of posterior variance in posterior mean
approximation. If covariance_matrix=None then it is used as the
temperature parameter for temperature scaling.
Returns:
Tensor of adjusted logits, shape (batch_size, num_classes).
"""
if mean_field_factor is None or mean_field_factor < 0:
return logits
# Compute standard deviation.
if covariance_matrix is None:
variances = 1.
else:
variances = tf.linalg.diag_part(covariance_matrix)
# Compute scaling coefficient for mean-field approximation.
logits_scale = tf.sqrt(1. + variances * mean_field_factor)
if len(logits.shape) > 1:
# Cast logits_scale to compatible dimension.
logits_scale = tf.expand_dims(logits_scale, axis=-1)
return logits / logits_scale
| 20,440 | 40.294949 | 83 | py |
models | models-master/official/nlp/modeling/layers/talking_heads_attention.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Talking Head Attention layer."""
# pylint: disable=g-classes-have-attributes
import math
import string
import gin
import tensorflow as tf
from official.modeling import tf_utils
_CHR_IDX = string.ascii_lowercase
@tf.keras.utils.register_keras_serializable(package="Text")
@gin.configurable
class TalkingHeadsAttention(tf.keras.layers.MultiHeadAttention):
"""Implements Talking-Heads Attention.
This is an implementation of Talking-Heads Attention based on the paper
Talking-Heads Attention (https://arxiv.org/abs/2003.02436): it enhanced
multi-head attention by including linearprojections across the attention-heads
dimension, immediately before and after the softmax operation.
See the base class `tf.keras.layers.MultiHeadAttention` for more details.
Args:
num_heads: Number of attention heads.
key_dim: Size of each attention head for query and key.
value_dim: Size of each attention head for value.
dropout: Dropout probability.
use_bias: Boolean, whether the dense layers use bias vectors/matrices.
output_shape: The expected shape of an output tensor, besides the batch and
sequence dims. If not specified, projects back to the key feature dim.
attention_axes: axes over which the attention is applied. `None` means
attention over all axes, but batch, heads, and features.
return_attention_scores: bool, if `True`, returns the multi-head attention
scores as an additional output argument.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
"""
def _build_attention(self, qkv_rank):
"""Builds multi-head dot-product attention computations.
This function overrides base class to create additional linear projection
that will be applied on attention scores before and after softmax.
Args:
qkv_rank: The rank of query, key, value tensors after projection.
"""
super(TalkingHeadsAttention, self)._build_attention(qkv_rank)
# Build an equation:
# (<batch_dims>, num_heads_a, ...),(num_heads_a, num_heads_b) ->
# (<batch_dims>, num_heads_b, ...)
# qkv_ranks has `batch_dims`, `attention_dims`, `num_heads` and `channels`.
num_batch_dims = qkv_rank - len(self._attention_axes) - 2
# The shape of attn_scores is:
# (<batch_dims>, num_heads, <query_attn_dims>, <key_attn_dims>)
attn_scores_rank = num_batch_dims + 1 + len(self._attention_axes) * 2
scores_notation = _CHR_IDX[:attn_scores_rank]
projection_notation = scores_notation[num_batch_dims] + (
_CHR_IDX[attn_scores_rank])
projected_scores_notation = scores_notation[:num_batch_dims] + (
_CHR_IDX[attn_scores_rank] + scores_notation[num_batch_dims + 1:])
self._talking_heads_equation = "%s,%s->%s" % (
scores_notation, projection_notation, projected_scores_notation)
self._pre_softmax_weight = self.add_weight(
"pre_softmax_weight",
shape=(self._num_heads, self._num_heads),
initializer=tf_utils.clone_initializer(self._kernel_initializer),
regularizer=self._kernel_regularizer,
constraint=self._kernel_constraint,
dtype=self.dtype,
trainable=True)
self._post_softmax_weight = self.add_weight(
"post_softmax_weight",
shape=(self._num_heads, self._num_heads),
initializer=tf_utils.clone_initializer(self._kernel_initializer),
regularizer=self._kernel_regularizer,
constraint=self._kernel_constraint,
dtype=self.dtype,
trainable=True)
def _compute_attention(self,
query_tensor,
key_tensor,
value_tensor,
attention_mask=None,
training=None):
"""Applies Dot-product attention with query, key, value tensors.
This function overrides base class to apply additional linear projection
on attention scores before and after softmax.
Args:
query_tensor: Projected query `Tensor` of shape `[B, T, N, key_dim]`.
key_tensor: Projected key `Tensor` of shape `[B, T, N, key_dim]`.
value_tensor: Projected value `Tensor` of shape `[B, T, N, value_dim]`.
attention_mask: a boolean mask of shape `[B, T, S]`, that prevents
attention to certain positions.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Returns:
attention_output: Multi-headed outputs of attention computation.
attention_scores: Multi-headed attention weights.
"""
# Take the dot product between "query" and "key" to get the raw
# attention scores.
attention_scores = tf.einsum(self._dot_product_equation, key_tensor,
query_tensor)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(self._key_dim)))
# Apply linear projection before softmax
attention_scores = tf.einsum(self._talking_heads_equation, attention_scores,
self._pre_softmax_weight)
# Normalize the attention scores to probabilities.
# `attention_scores` = [B, N, T, S]
attention_scores = self._masked_softmax(attention_scores, attention_mask)
# Apply linear projection after softmax
attention_scores = tf.einsum(self._talking_heads_equation, attention_scores,
self._post_softmax_weight)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_scores_dropout = self._dropout_layer(
attention_scores, training=training)
# `context_layer` = [B, T, N, H]
attention_output = tf.einsum(self._combine_equation,
attention_scores_dropout, value_tensor)
return attention_output, attention_scores
| 6,903 | 42.696203 | 80 | py |
models | models-master/official/nlp/modeling/layers/self_attention_mask.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras layer that creates a self-attention mask."""
from typing import Optional
import tensorflow as tf
def get_mask(inputs: tf.Tensor,
to_mask: tf.Tensor,
dtype: Optional[tf.DType] = None) -> tf.Tensor:
"""Gets a 3D self-attention mask.
Args:
inputs: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length,
...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
dtype: the output Tensor dtype.
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = tf.shape(inputs)
batch_size = from_shape[0]
from_seq_length = from_shape[1]
dtype = inputs.dtype if dtype is None else dtype
to_shape = tf.shape(to_mask)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), dtype=dtype)
return tf.broadcast_to(to_mask, [batch_size, from_seq_length, to_seq_length])
@tf.keras.utils.register_keras_serializable(package='Text')
class SelfAttentionMask(tf.keras.layers.Layer):
"""Create 3D attention mask from a 2D tensor mask.
inputs[0]: from_tensor: 2D or 3D Tensor of shape
[batch_size, from_seq_length, ...].
inputs[1]: to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
def call(self, inputs, to_mask=None):
if isinstance(inputs, list) and to_mask is None:
to_mask = inputs[1]
inputs = inputs[0]
return get_mask(inputs, to_mask)
| 2,163 | 32.292308 | 79 | py |
models | models-master/official/nlp/modeling/layers/mobile_bert_layers_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.layers import mobile_bert_layers
from official.nlp.modeling.networks import mobile_bert_encoder
def generate_fake_input(batch_size=1, seq_len=5, vocab_size=10000, seed=0):
"""Generate consistent fake integer input sequences."""
np.random.seed(seed)
fake_input = []
for _ in range(batch_size):
fake_input.append([])
for _ in range(seq_len):
fake_input[-1].append(np.random.randint(0, vocab_size))
fake_input = np.asarray(fake_input)
return fake_input
class MobileBertEncoderTest(parameterized.TestCase, tf.test.TestCase):
def test_embedding_layer_with_token_type(self):
layer = mobile_bert_layers.MobileBertEmbedding(10, 8, 2, 16)
input_seq = tf.Variable([[2, 3, 4, 5]])
token_type = tf.Variable([[0, 1, 1, 1]])
output = layer(input_seq, token_type)
output_shape = output.shape.as_list()
expected_shape = [1, 4, 16]
self.assertListEqual(output_shape, expected_shape, msg=None)
def test_embedding_layer_without_token_type(self):
layer = mobile_bert_layers.MobileBertEmbedding(10, 8, 2, 16)
input_seq = tf.Variable([[2, 3, 4, 5]])
output = layer(input_seq)
output_shape = output.shape.as_list()
expected_shape = [1, 4, 16]
self.assertListEqual(output_shape, expected_shape, msg=None)
def test_embedding_layer_get_config(self):
layer = mobile_bert_layers.MobileBertEmbedding(
word_vocab_size=16,
word_embed_size=32,
type_vocab_size=4,
output_embed_size=32,
max_sequence_length=32,
normalization_type='layer_norm',
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01),
dropout_rate=0.5)
layer_config = layer.get_config()
new_layer = mobile_bert_layers.MobileBertEmbedding.from_config(layer_config)
self.assertEqual(layer_config, new_layer.get_config())
def test_no_norm(self):
layer = mobile_bert_layers.NoNorm()
feature = tf.random.normal([2, 3, 4])
output = layer(feature)
output_shape = output.shape.as_list()
expected_shape = [2, 3, 4]
self.assertListEqual(output_shape, expected_shape, msg=None)
@parameterized.named_parameters(('with_kq_shared_bottleneck', False),
('without_kq_shared_bottleneck', True))
def test_transfomer_kq_shared_bottleneck(self, is_kq_shared):
feature = tf.random.uniform([2, 3, 512])
layer = mobile_bert_layers.MobileBertTransformer(
key_query_shared_bottleneck=is_kq_shared)
output = layer(feature)
output_shape = output.shape.as_list()
expected_shape = [2, 3, 512]
self.assertListEqual(output_shape, expected_shape, msg=None)
def test_transfomer_with_mask(self):
feature = tf.random.uniform([2, 3, 512])
input_mask = [[[0., 0., 1.], [0., 0., 1.], [0., 0., 1.]],
[[0., 1., 1.], [0., 1., 1.], [0., 1., 1.]]]
input_mask = np.asarray(input_mask)
layer = mobile_bert_layers.MobileBertTransformer()
output = layer(feature, input_mask)
output_shape = output.shape.as_list()
expected_shape = [2, 3, 512]
self.assertListEqual(output_shape, expected_shape, msg=None)
def test_transfomer_return_attention_score(self):
sequence_length = 5
num_attention_heads = 8
feature = tf.random.uniform([2, sequence_length, 512])
layer = mobile_bert_layers.MobileBertTransformer(
num_attention_heads=num_attention_heads)
_, attention_score = layer(feature, return_attention_scores=True)
expected_shape = [2, num_attention_heads, sequence_length, sequence_length]
self.assertListEqual(
attention_score.shape.as_list(), expected_shape, msg=None)
def test_transformer_get_config(self):
layer = mobile_bert_layers.MobileBertTransformer(
hidden_size=32,
num_attention_heads=2,
intermediate_size=48,
intermediate_act_fn='gelu',
hidden_dropout_prob=0.5,
attention_probs_dropout_prob=0.4,
intra_bottleneck_size=64,
use_bottleneck_attention=True,
key_query_shared_bottleneck=False,
num_feedforward_networks=2,
normalization_type='layer_norm',
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01),
name='block')
layer_config = layer.get_config()
new_layer = mobile_bert_layers.MobileBertTransformer.from_config(
layer_config)
self.assertEqual(layer_config, new_layer.get_config())
class MobileBertMaskedLMTest(tf.test.TestCase):
def create_layer(self,
vocab_size,
hidden_size,
embedding_width,
output='predictions',
xformer_stack=None):
# First, create a transformer stack that we can use to get the LM's
# vocabulary weight.
if xformer_stack is None:
xformer_stack = mobile_bert_encoder.MobileBERTEncoder(
word_vocab_size=vocab_size,
num_blocks=1,
hidden_size=hidden_size,
num_attention_heads=4,
word_embed_size=embedding_width)
# Create a maskedLM from the transformer stack.
test_layer = mobile_bert_layers.MobileBertMaskedLM(
embedding_table=xformer_stack.get_embedding_table(), output=output)
return test_layer
def test_layer_creation(self):
vocab_size = 100
sequence_length = 32
hidden_size = 64
embedding_width = 32
num_predictions = 21
test_layer = self.create_layer(
vocab_size=vocab_size,
hidden_size=hidden_size,
embedding_width=embedding_width)
# Make sure that the output tensor of the masked LM is the right shape.
lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size))
masked_positions = tf.keras.Input(shape=(num_predictions,), dtype=tf.int32)
output = test_layer(lm_input_tensor, masked_positions=masked_positions)
expected_output_shape = [None, num_predictions, vocab_size]
self.assertEqual(expected_output_shape, output.shape.as_list())
def test_layer_invocation_with_external_logits(self):
vocab_size = 100
sequence_length = 32
hidden_size = 64
embedding_width = 32
num_predictions = 21
xformer_stack = mobile_bert_encoder.MobileBERTEncoder(
word_vocab_size=vocab_size,
num_blocks=1,
hidden_size=hidden_size,
num_attention_heads=4,
word_embed_size=embedding_width)
test_layer = self.create_layer(
vocab_size=vocab_size,
hidden_size=hidden_size,
embedding_width=embedding_width,
xformer_stack=xformer_stack,
output='predictions')
logit_layer = self.create_layer(
vocab_size=vocab_size,
hidden_size=hidden_size,
embedding_width=embedding_width,
xformer_stack=xformer_stack,
output='logits')
# Create a model from the masked LM layer.
lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size))
masked_positions = tf.keras.Input(shape=(num_predictions,), dtype=tf.int32)
output = test_layer(lm_input_tensor, masked_positions)
logit_output = logit_layer(lm_input_tensor, masked_positions)
logit_output = tf.keras.layers.Activation(tf.nn.log_softmax)(logit_output)
logit_layer.set_weights(test_layer.get_weights())
model = tf.keras.Model([lm_input_tensor, masked_positions], output)
logits_model = tf.keras.Model(([lm_input_tensor, masked_positions]),
logit_output)
# Invoke the masked LM on some fake data to make sure there are no runtime
# errors in the code.
batch_size = 3
lm_input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, hidden_size))
masked_position_data = np.random.randint(
sequence_length, size=(batch_size, num_predictions))
# ref_outputs = model.predict([lm_input_data, masked_position_data])
# outputs = logits_model.predict([lm_input_data, masked_position_data])
ref_outputs = model([lm_input_data, masked_position_data])
outputs = logits_model([lm_input_data, masked_position_data])
# Ensure that the tensor shapes are correct.
expected_output_shape = (batch_size, num_predictions, vocab_size)
self.assertEqual(expected_output_shape, ref_outputs.shape)
self.assertEqual(expected_output_shape, outputs.shape)
self.assertAllClose(ref_outputs, outputs)
def test_layer_invocation(self):
vocab_size = 100
sequence_length = 32
hidden_size = 64
embedding_width = 32
num_predictions = 21
test_layer = self.create_layer(
vocab_size=vocab_size,
hidden_size=hidden_size,
embedding_width=embedding_width)
# Create a model from the masked LM layer.
lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size))
masked_positions = tf.keras.Input(shape=(num_predictions,), dtype=tf.int32)
output = test_layer(lm_input_tensor, masked_positions)
model = tf.keras.Model([lm_input_tensor, masked_positions], output)
# Invoke the masked LM on some fake data to make sure there are no runtime
# errors in the code.
batch_size = 3
lm_input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, hidden_size))
masked_position_data = np.random.randint(
2, size=(batch_size, num_predictions))
_ = model.predict([lm_input_data, masked_position_data])
def test_unknown_output_type_fails(self):
with self.assertRaisesRegex(ValueError, 'Unknown `output` value "bad".*'):
_ = self.create_layer(
vocab_size=8, hidden_size=8, embedding_width=4, output='bad')
def test_hidden_size_smaller_than_embedding_width(self):
hidden_size = 8
sequence_length = 32
num_predictions = 20
with self.assertRaisesRegex(
ValueError, 'hidden size 8 cannot be smaller than embedding width 16.'):
test_layer = self.create_layer(
vocab_size=8, hidden_size=8, embedding_width=16)
lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size))
masked_positions = tf.keras.Input(
shape=(num_predictions,), dtype=tf.int32)
_ = test_layer(lm_input_tensor, masked_positions)
if __name__ == '__main__':
tf.test.main()
| 10,880 | 38.711679 | 80 | py |
models | models-master/official/nlp/modeling/layers/per_dim_scale_attention.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based attention layer with learnable per dim scaling."""
import gin
import numpy as np
import tensorflow as tf
@gin.configurable
@tf.keras.utils.register_keras_serializable(package='Text')
class PerDimScaleAttention(tf.keras.layers.MultiHeadAttention):
"""Learn scales for individual dims.
It can improve quality but might hurt training stability.
"""
def _build_from_signature(self, query, value, key=None):
super()._build_from_signature(query=query, value=value, key=key) # pytype: disable=attribute-error
self._scale_dim = self._key_dim
with tf.init_scope():
self.per_dim_scale = self.add_weight(
name='per_dim_scale',
shape=(self._scale_dim,),
initializer='zeros',
dtype=self.dtype,
trainable=True)
def _scale_query(self, query):
# 1.0/tf.nn.softplus(0.0) = 1.442695041. Hard code this number so that we
# can avoid unnecessary XLA op fusion mess on TPU.
r_softplus_0 = 1.442695041
scale = tf.constant(
r_softplus_0 / np.sqrt(float(self._scale_dim)), dtype=query.dtype)
scale *= tf.nn.softplus(self.per_dim_scale)
return query * scale
def _compute_attention(self,
query,
key,
value,
attention_mask=None,
training=None):
query = self._scale_query(query)
attention_scores = tf.einsum(self._dot_product_equation, key, query)
attention_scores = self._masked_softmax(attention_scores, attention_mask)
attention_scores_dropout = self._dropout_layer(
attention_scores, training=training)
# `context_layer` = [B, T, N, H]
attention_output = tf.einsum(self._combine_equation,
attention_scores_dropout, value)
return attention_output, attention_scores
def call( # pytype: disable=signature-mismatch # overriding-parameter-count-checks
self,
query,
value,
key=None,
attention_mask=None,
return_attention_scores=False,
training=None,
):
if not self._built_from_signature:
self._build_from_signature(query=query, value=value, key=key)
if key is None:
key = value
# N = `num_attention_heads`
# H = `size_per_head`
# `query` = [B, T, N ,H]
query = self._query_dense(query)
# `key` = [B, S, N, H]
key = self._key_dense(key)
# `value` = [B, S, N, H]
value = self._value_dense(value)
attention_output, attention_scores = self._compute_attention(
query, key, value, attention_mask, training)
attention_output = self._output_dense(attention_output)
if return_attention_scores:
return attention_output, attention_scores
return attention_output
| 3,406 | 32.401961 | 103 | py |
models | models-master/official/nlp/modeling/layers/reuse_attention.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based attention layer."""
# pylint: disable=g-classes-have-attributes
import collections
import math
import string
import numpy as np
import tensorflow as tf
from official.modeling import tf_utils
_CHR_IDX = string.ascii_lowercase
def _build_attention_equation(rank, attn_axes):
"""Builds einsum equations for the attention computation.
Query, key, value inputs after projection are expected to have the shape as:
`(bs, <non-attention dims>, <attention dims>, num_heads, channels)`.
`bs` and `<non-attention dims>` are treated as `<batch dims>`.
The attention operations can be generalized:
(1) Query-key dot product:
`(<batch dims>, <query attention dims>, num_heads, channels), (<batch dims>,
<key attention dims>, num_heads, channels) -> (<batch dims>,
num_heads, <query attention dims>, <key attention dims>)`
(2) Combination:
`(<batch dims>, num_heads, <query attention dims>, <key attention dims>),
(<batch dims>, <value attention dims>, num_heads, channels) -> (<batch dims>,
<query attention dims>, num_heads, channels)`
Args:
rank: Rank of query, key, value tensors.
attn_axes: List/tuple of axes, `[-1, rank)`,
that attention will be applied to.
Returns:
Einsum equations.
"""
target_notation = _CHR_IDX[:rank]
# `batch_dims` includes the head dim.
batch_dims = tuple(np.delete(range(rank), attn_axes + (rank - 1,)))
letter_offset = rank
source_notation = ""
for i in range(rank):
if i in batch_dims or i == rank - 1:
source_notation += target_notation[i]
else:
source_notation += _CHR_IDX[letter_offset]
letter_offset += 1
product_notation = "".join([target_notation[i] for i in batch_dims] +
[target_notation[i] for i in attn_axes] +
[source_notation[i] for i in attn_axes])
dot_product_equation = "%s,%s->%s" % (source_notation, target_notation,
product_notation)
attn_scores_rank = len(product_notation)
combine_equation = "%s,%s->%s" % (product_notation, source_notation,
target_notation)
return dot_product_equation, combine_equation, attn_scores_rank
def _build_proj_equation(free_dims, bound_dims, output_dims):
"""Builds an einsum equation for projections inside multi-head attention."""
input_str = ""
kernel_str = ""
output_str = ""
bias_axes = ""
letter_offset = 0
for i in range(free_dims):
char = _CHR_IDX[i + letter_offset]
input_str += char
output_str += char
letter_offset += free_dims
for i in range(bound_dims):
char = _CHR_IDX[i + letter_offset]
input_str += char
kernel_str += char
letter_offset += bound_dims
for i in range(output_dims):
char = _CHR_IDX[i + letter_offset]
kernel_str += char
output_str += char
bias_axes += char
equation = "%s,%s->%s" % (input_str, kernel_str, output_str)
return equation, bias_axes, len(output_str)
def _get_output_shape(output_rank, known_last_dims):
return [None] * (output_rank - len(known_last_dims)) + list(known_last_dims)
class ReuseMultiHeadAttention(tf.keras.layers.Layer):
"""MultiHeadAttention layer.
This is an implementation of multi-headed attention as described in the paper
"Attention is all you Need" (Vaswani et al., 2017).
If `query`, `key,` `value` are the same, then
this is self-attention. Each timestep in `query` attends to the
corresponding sequence in `key`, and returns a fixed-width vector.
This layer first projects `query`, `key` and `value`. These are
(effectively) a list of tensors of length `num_attention_heads`, where the
corresponding shapes are `(batch_size, <query dimensions>, key_dim)`,
`(batch_size, <key/value dimensions>, key_dim)`,
`(batch_size, <key/value dimensions>, value_dim)`.
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor.
Finally, the result tensor with the last dimension as value_dim can take an
linear projection and return.
Examples:
Performs 1D cross-attention over two sequence inputs with an attention mask.
Returns the additional attention weights over heads.
>>> layer = MultiHeadAttention(num_heads=2, key_dim=2)
>>> target = tf.keras.Input(shape=[8, 16])
>>> source = tf.keras.Input(shape=[4, 16])
>>> output_tensor, weights = layer(target, source,
... return_attention_scores=True)
>>> print(output_tensor.shape)
(None, 8, 16)
>>> print(weights.shape)
(None, 2, 8, 4)
Performs 2D self-attention over a 5D input tensor on axes 2 and 3.
>>> layer = MultiHeadAttention(num_heads=2, key_dim=2, attention_axes=(2, 3))
>>> input_tensor = tf.keras.Input(shape=[5, 3, 4, 16])
>>> output_tensor = layer(input_tensor, input_tensor)
>>> print(output_tensor.shape)
(None, 5, 3, 4, 16)
Args:
num_heads: Number of attention heads.
key_dim: Size of each attention head for query and key.
value_dim: Size of each attention head for value.
dropout: Dropout probability.
reuse_attention: An integer specifying number of heads to reuse.
-1 for all heads.
use_relative_pe: Whether to use relative position bias.
max_sequence_length: Used to set the size of the relative positin encodings.
use_bias: Boolean, whether the dense layers use bias vectors/matrices.
output_shape: The expected shape of an output tensor, besides the batch and
sequence dims. If not specified, projects back to the key feature dim.
attention_axes: axes over which the attention is applied. `None` means
attention over all axes, but batch, heads, and features.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
Call arguments:
query: Query `Tensor` of shape `(B, T, dim)`.
value: Value `Tensor` of shape `(B, S, dim)`.
key: Optional key `Tensor` of shape `(B, S, dim)`. If not given, will use
`value` for both `key` and `value`, which is the most common case.
attention_mask: a boolean mask of shape `(B, T, S)`, that prevents
attention to certain positions. The boolean mask specifies which query
elements can attend to which key elements, 1 indicates attention and 0
indicates no attention. Broadcasting can happen for the missing batch
dimensions and the head dimension.
return_attention_scores: A boolean to indicate whether the output should
be attention output if True, or (attention_output, attention_scores) if
False. Defaults to False.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
Defaults to either using the training mode of the parent layer/model,
or False (inference) if there is no parent layer.
Returns:
attention_output: The result of the computation, of shape `(B, T, E)`,
where `T` is for target sequence shapes and `E` is the query input last
dimension if `output_shape` is `None`. Otherwise, the multi-head outputs
are project to the shape specified by `output_shape`.
attention_scores: [Optional] multi-head attention coeffients over
attention axes.
"""
def __init__(self,
num_heads,
key_dim,
value_dim=None,
dropout=0.0,
reuse_attention=0,
use_relative_pe=False,
pe_max_seq_length=512,
use_bias=True,
output_shape=None,
attention_axes=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super().__init__(**kwargs)
self._num_heads = num_heads
self._key_dim = key_dim
self._value_dim = value_dim if value_dim else key_dim
self._dropout = dropout
if reuse_attention > self._num_heads or reuse_attention < -1:
raise ValueError("reuse_attention should be between -1 "
"and %d in call to %s." % (self.__class__,
self._num_heads))
if reuse_attention == -1:
reuse_attention = self._num_heads
self._reuse_heads = reuse_attention
self._use_relative_pe = use_relative_pe
self._pe_max_seq_length = pe_max_seq_length
self._use_bias = use_bias
self._output_shape = output_shape
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
if attention_axes is not None and not isinstance(attention_axes,
collections.abc.Sized):
self._attention_axes = (attention_axes,)
else:
self._attention_axes = attention_axes
self._built_from_signature = False
self._query_shape, self._key_shape, self._value_shape = None, None, None
# Use relative PE only if reuse_heads < num_heads.
if self._use_relative_pe and self._reuse_heads < self._num_heads:
# Determine the dtype from global policy.
policy = tf.keras.mixed_precision.global_policy()
if policy.name == "mixed_bfloat16":
policy = tf.bfloat16
elif policy.name == "mixed_float16":
policy = tf.float16
else:
policy = tf.float32
self._position_embeddings = tf.Variable(
name="relative_position_embeddings",
initial_value=lambda: tf.random.truncated_normal( # pylint: disable=g-long-lambda
[
1, self._num_heads - self._reuse_heads, 2 * self.
_pe_max_seq_length - 1
], mean=0.0, stddev=0.2, dtype=policy),
trainable=True, dtype=policy)
def get_config(self):
config = {
"num_heads": self._num_heads,
"key_dim": self._key_dim,
"value_dim": self._value_dim,
"dropout": self._dropout,
"use_bias": self._use_bias,
"output_shape": self._output_shape,
"attention_axes": self._attention_axes,
"reuse_attention": self._reuse_heads,
"use_relative_pe": self._use_relative_pe,
"pe_max_seq_length": self._pe_max_seq_length,
"kernel_initializer":
tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer":
tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer":
tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer":
tf.keras.regularizers.serialize(self._bias_regularizer),
"activity_regularizer":
tf.keras.regularizers.serialize(self._activity_regularizer),
"kernel_constraint":
tf.keras.constraints.serialize(self._kernel_constraint),
"bias_constraint":
tf.keras.constraints.serialize(self._bias_constraint),
"query_shape": self._query_shape,
"key_shape": self._key_shape,
"value_shape": self._value_shape,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
# If the layer has a different build() function from the Keras default,
# we need to trigger the customized build to create weights.
query_shape = config.pop("query_shape")
key_shape = config.pop("key_shape")
value_shape = config.pop("value_shape")
layer = cls(**config)
if None in [query_shape, key_shape, value_shape]:
tf.get_logger().warning(
"One of dimensions of the input shape is missing. It should have been"
" memorized when the layer was serialized. "
"%s is created without weights.",
str(cls))
else:
layer._build_from_signature(query_shape, value_shape, key_shape) # pylint: disable=protected-access
return layer
def _build_from_signature(self, query, value, key=None):
"""Builds layers and variables.
Once the method is called, self._built_from_signature will be set to True.
Args:
query: Query tensor or TensorShape.
value: Value tensor or TensorShape.
key: Key tensor or TensorShape.
"""
self._built_from_signature = True
if hasattr(query, "shape"):
self._query_shape = tf.TensorShape(query.shape)
else:
self._query_shape = tf.TensorShape(query)
if hasattr(value, "shape"):
self._value_shape = tf.TensorShape(value.shape)
else:
self._value_shape = tf.TensorShape(value)
if key is None:
self._key_shape = self._value_shape
elif hasattr(key, "shape"):
self._key_shape = tf.TensorShape(key.shape)
else:
self._key_shape = tf.TensorShape(key)
common_kwargs = dict(
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
# Any setup work performed only once should happen in an `init_scope`
# to avoid creating symbolic Tensors that will later pollute any eager
# operations.
with tf.init_scope():
free_dims = self._query_shape.rank - 1
if self._reuse_heads < self._num_heads:
einsum_equation, bias_axes, output_rank = _build_proj_equation(
free_dims, bound_dims=1, output_dims=2)
self._query_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(
output_rank - 1,
[self._num_heads - self._reuse_heads, self._key_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="query",
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs)
einsum_equation, bias_axes, output_rank = _build_proj_equation(
self._key_shape.rank - 1, bound_dims=1, output_dims=2)
self._key_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(
output_rank - 1,
[self._num_heads - self._reuse_heads, self._key_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="key",
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs)
einsum_equation, bias_axes, output_rank = _build_proj_equation(
self._value_shape.rank - 1, bound_dims=1, output_dims=2)
self._value_dense = []
if self._reuse_heads > 0:
self._value_dense.append(
tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(
output_rank - 1, [self._reuse_heads, self._value_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="value_reuse",
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(
self._bias_initializer),
**common_kwargs))
if self._reuse_heads < self._num_heads:
self._value_dense.append(
tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(
output_rank - 1,
[self._num_heads - self._reuse_heads, self._value_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="value_new",
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(
self._bias_initializer),
**common_kwargs))
# Builds the attention computations for multi-head dot product attention.
# These computations could be wrapped into the keras attention layer once
# it support mult-head einsum computations.
self._build_attention(output_rank)
self._output_dense = []
if self._reuse_heads > 0:
self._output_dense.append(self._make_output_dense(
free_dims, common_kwargs, "attention_output_reuse"))
if self._reuse_heads < self._num_heads:
self._output_dense.append(self._make_output_dense(
free_dims, common_kwargs, "attention_output_new",
self._reuse_heads == 0))
def _make_output_dense(self, free_dims, common_kwargs, name=None,
use_bias=True):
"""Builds the output projection matrix.
Args:
free_dims: Number of free dimensions for einsum equation building.
common_kwargs: Common keyword arguments for einsum layer.
name: Name for the projection layer.
use_bias: Use bias if self._use_bias is true
Returns:
Projection layer.
"""
if self._output_shape:
if not isinstance(self._output_shape, collections.abc.Sized):
output_shape = [self._output_shape]
else:
output_shape = self._output_shape
else:
output_shape = [self._query_shape[-1]]
einsum_equation, bias_axes, output_rank = _build_proj_equation(
free_dims, bound_dims=2, output_dims=len(output_shape))
return tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1, output_shape),
bias_axes=bias_axes if (use_bias and self._use_bias) else None,
name=name,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs)
def _build_attention(self, rank):
"""Builds multi-head dot-product attention computations.
This function builds attributes necessary for `_compute_attention` to
customize attention computation to replace the default dot-product
attention.
Args:
rank: the rank of query, key, value tensors.
"""
if self._attention_axes is None:
self._attention_axes = tuple(range(1, rank - 2))
else:
self._attention_axes = tuple(self._attention_axes)
self._dot_product_equation, self._combine_equation, attn_scores_rank = (
_build_attention_equation(rank, attn_axes=self._attention_axes))
norm_axes = tuple(
range(attn_scores_rank - len(self._attention_axes), attn_scores_rank))
self._softmax = tf.keras.layers.Softmax(axis=norm_axes)
self._dropout_layer = tf.keras.layers.Dropout(rate=self._dropout)
def _masked_softmax(self, attention_scores, attention_mask=None):
# Normalize the attention scores to probabilities.
# `attention_scores` = [B, N, T, S]
if attention_mask is not None:
# The expand dim happens starting from the `num_heads` dimension,
# (<batch_dims>, num_heads, <query_attention_dims, key_attention_dims>)
mask_expansion_axes = [-len(self._attention_axes) * 2 - 1]
for _ in range(len(attention_scores.shape) - len(attention_mask.shape)):
attention_mask = tf.expand_dims(
attention_mask, axis=mask_expansion_axes)
return self._softmax(attention_scores, attention_mask)
def _compute_relative_position(self, query_seq_length, key_seq_length):
position_zero = self._pe_max_seq_length - 1
# We take the vector position variable and concatenate to form a matrix of
# relative position encodings. i=0 indicates reltaive position is 0.
indices = tf.expand_dims(tf.range(0, -query_seq_length, -1),
-1) + tf.range(key_seq_length) + position_zero
indices = tf.maximum(indices, 0)
indices = tf.minimum(indices, 2*self._pe_max_seq_length-2)
attention_biases = tf.gather(self._position_embeddings, indices, axis=2)
return attention_biases
def _compute_attention(self,
query,
key,
value,
reuse_scores=None,
attention_mask=None,
training=None):
"""Applies Dot-product attention with query, key, value tensors.
This function defines the computation inside `call` with projected
multi-head Q, K, V inputs. Users can override this function for customized
attention implementation.
Args:
query: Projected query `Tensor` of shape `(B, T, N, key_dim)`.
key: Projected key `Tensor` of shape `(B, T, N, key_dim)`.
value: Projected value `Tensor` of shape `(B, T, N, value_dim)`.
reuse_scores: Attention scores from a previous layer if needed.
attention_mask: a boolean mask of shape `(B, T, S)`, that prevents
attention to certain positions.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Returns:
attention_output: Multi-headed outputs of attention computation.
attention_scores: Multi-headed attention weights.
"""
# Partial or no reuse
if self._reuse_heads < self._num_heads:
query = tf.multiply(query, 1.0 / math.sqrt(float(self._key_dim)))
new_scores = tf.einsum(self._dot_product_equation, key, query)
# Add relative position embeddings if required.
if self._use_relative_pe:
new_scores = new_scores + self._compute_relative_position(
tf.shape(query)[1], tf.shape(key)[1])
new_scores = self._masked_softmax(new_scores, attention_mask)
if self._reuse_heads > 0: # Partial reuse
reuse_scores = reuse_scores[:, :self._reuse_heads, :, :]
attention_scores = tf.concat([new_scores, reuse_scores], 1)
else: # No reuse
attention_scores = new_scores
else: # Full reuse
attention_scores = reuse_scores
new_scores = None
# `context_layer` = [B, T, N, H]
attention_output = []
# Partial or full reuse
if self._reuse_heads > 0:
attention_output.append(
tf.einsum(self._combine_equation, self._dropout_layer(
reuse_scores, training=training), value[0]))
# Partial or no reuse
if self._reuse_heads < self._num_heads:
attention_output.append(
tf.einsum(self._combine_equation, self._dropout_layer(
new_scores, training=training), value[-1]))
return attention_output, attention_scores
def call(self,
query,
value,
key=None,
attention_mask=None,
return_attention_scores=False,
training=None,
reuse_attention_scores=None):
if self._reuse_heads > 0 and reuse_attention_scores is None:
raise ValueError("reuse_attention_scores cannot be None when "
"reuse_attention is True or > 0.")
if not self._built_from_signature:
self._build_from_signature(query=query, value=value, key=key)
if key is None:
key = value
# N = `num_attention_heads`
# H = `size_per_head`
# `value` = [B, S, N, H]
value = [vd(value) for vd in self._value_dense]
if self._reuse_heads < self._num_heads:
# `query` = [B, T, N ,H]
query = self._query_dense(query)
# `key` = [B, S, N, H]
key = self._key_dense(key)
else:
query, key = None, None
attention_output, attention_scores = self._compute_attention(
query, key, value, reuse_attention_scores, attention_mask, training)
attention_output = [od(attention_output[i]) for i, od in enumerate(
self._output_dense)]
if len(attention_output) == 1:
attention_output = attention_output[0]
else:
attention_output = attention_output[0] + attention_output[1]
if return_attention_scores:
return attention_output, attention_scores
return attention_output
| 25,658 | 41.133005 | 106 | py |
models | models-master/official/nlp/modeling/layers/pack_optimization.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pack sequence optimization on accelerators."""
from typing import Dict
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling.layers import rezero_transformer
from official.nlp.modeling.layers import self_attention_mask
from official.nlp.modeling.layers import transformer_encoder_block
from official.nlp.modeling.layers import transformer_scaffold
@tf.keras.utils.register_keras_serializable(package='Text')
class PackBertEmbeddings(tf.keras.layers.Layer):
"""Performs packing tricks for BERT inputs to improve TPU utilization."""
def __init__(self, pack_sequences: int, **kwargs):
super().__init__(**kwargs)
self.pack_sequences = pack_sequences
def call(self, input_embeddings: tf.Tensor,
input_mask: tf.Tensor) -> Dict[str, tf.Tensor]:
batch_size, seq_len, embedding_dim = tf_utils.get_shape_list(
input_embeddings, expected_rank=3)
reduced_batch_size = batch_size // self.pack_sequences
packed_seq_len = self.pack_sequences * seq_len
packed_embeddings = tf.reshape(
input_embeddings, [reduced_batch_size, packed_seq_len, embedding_dim])
input_mask = tf.reshape(input_mask, [reduced_batch_size, packed_seq_len])
example_ids = 1 + tf.range(self.pack_sequences)
# Shape: [batch_size, seq_len, pack_sequences].
example_ids = tf.tile(example_ids[None, :, None],
[reduced_batch_size, 1, seq_len])
example_ids = tf.reshape(example_ids, [reduced_batch_size, packed_seq_len])
example_ids = tf.where(
tf.math.equal(input_mask, 0), tf.zeros_like(example_ids), example_ids)
packing_mask = tf.cast(
tf.equal(
tf.expand_dims(example_ids, 2), tf.expand_dims(example_ids, 1)),
dtype=tf.bool)
attention_mask = self_attention_mask.get_mask(
packed_embeddings, input_mask, dtype=tf.bool)
combined_attention_mask = tf.cast(
tf.math.logical_and(attention_mask, packing_mask), tf.float32)
return dict(
packed_embeddings=packed_embeddings,
combined_attention_mask=combined_attention_mask)
@tf.keras.utils.register_keras_serializable(package='Text')
class StridedTransformerEncoderBlock(
transformer_encoder_block.TransformerEncoderBlock):
"""Transformer layer for packing optimization to stride over inputs."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self._output_range is not None:
raise ValueError('StridedTransformerEncoderBlock does not '
'support `output_range` argument.')
def call(self, inputs, stride: tf.Tensor):
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
input_tensor, attention_mask = inputs
key_value = None
elif len(inputs) == 3:
input_tensor, key_value, attention_mask = inputs
else:
raise ValueError('Unexpected inputs to %s with length at %d' %
(self.__class__, len(inputs)))
else:
input_tensor, key_value, attention_mask = (inputs, None, None)
if self._norm_first:
source_tensor = input_tensor[:, ::stride, :]
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm_kv(key_value)
target_tensor = input_tensor[:, ::stride, :]
if attention_mask is not None:
attention_mask = attention_mask[:, ::stride, :]
if key_value is None:
key_value = input_tensor
attention_output = self._attention_layer(
query=target_tensor, value=key_value, attention_mask=attention_mask)
attention_output = self._attention_dropout(attention_output)
if self._norm_first:
# Important to not combine `self._norm_first` and
# `self._use_query_residual` into one if clause because else is only for
# `_norm_first == False`.
if self._use_query_residual:
attention_output = source_tensor + attention_output
else:
if self._use_query_residual:
attention_output = target_tensor + attention_output
attention_output = self._attention_layer_norm(attention_output)
if self._norm_first:
source_attention_output = attention_output
attention_output = self._output_layer_norm(attention_output)
inner_output = self._intermediate_dense(attention_output)
inner_output = self._intermediate_activation_layer(inner_output)
inner_output = self._inner_dropout_layer(inner_output)
layer_output = self._output_dense(inner_output)
layer_output = self._output_dropout(layer_output)
if self._norm_first:
return source_attention_output + layer_output
layer_output = tf.cast(layer_output, tf.float32)
return self._output_layer_norm(layer_output + attention_output)
@tf.keras.utils.register_keras_serializable(package='Text')
class StridedReZeroTransformer(rezero_transformer.ReZeroTransformer):
"""ReZeroTransformer for packing optimization to stride over inputs."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self._output_range is not None:
raise ValueError(f'{self.__class__} does not '
'support `output_range` argument.')
def call(self, inputs, stride: tf.Tensor):
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
input_tensor, attention_mask = inputs
key_value = None
elif len(inputs) == 3:
input_tensor, key_value, attention_mask = inputs
else:
raise ValueError(f'Unexpected inputs to {self.__class__} with '
f'length at {len(inputs)}.')
else:
input_tensor, key_value, attention_mask = (inputs, None, None)
target_tensor = input_tensor[:, ::stride, :]
if attention_mask is not None:
attention_mask = attention_mask[:, ::stride, :]
if key_value is None:
key_value = input_tensor
attention_output = self._attention_layer(
query=target_tensor, value=key_value, attention_mask=attention_mask)
attention_output = self._attention_dropout(attention_output)
attention_output = target_tensor + self._rezero_a * attention_output
if self._use_layer_norm:
attention_output = self._attention_layer_norm(attention_output)
else:
attention_output = tf.cast(attention_output, tf.float32)
intermediate_output = self._intermediate_dense(attention_output)
intermediate_output = self._inner_activation_layer(intermediate_output)
layer_output = self._output_dense(intermediate_output)
layer_output = self._output_dropout(layer_output)
layer_output = attention_output + tf.cast(self._rezero_a_ffn * layer_output,
tf.float32)
if self._use_layer_norm:
layer_output = self._output_layer_norm(layer_output)
return layer_output
@tf.keras.utils.register_keras_serializable(package='Text')
class StridedTransformerScaffold(transformer_scaffold.TransformerScaffold):
"""TransformerScaffold for packing optimization to stride over inputs."""
def call(self, inputs, stride: tf.Tensor, training=None):
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
input_tensor, attention_mask = inputs
key_value = None
elif len(inputs) == 3:
input_tensor, key_value, attention_mask = inputs
else:
raise ValueError('Unexpected inputs to %s with length at %d' %
(self.__class__, len(inputs)))
else:
input_tensor, key_value, attention_mask = (inputs, None, None)
if key_value is None:
key_value = input_tensor
if self._norm_first:
source_tensor = input_tensor[:, ::stride, :]
input_tensor = self._attention_layer_norm(input_tensor, training=training)
if attention_mask is not None:
attention_mask = attention_mask[:, ::stride, :]
target_tensor = input_tensor[:, ::stride, :]
attention_output = self._attention_layer(
query=target_tensor,
value=key_value,
attention_mask=attention_mask,
training=training)
attention_output = self._attention_dropout(
attention_output, training=training)
if self._norm_first:
attention_output = source_tensor + attention_output
else:
attention_output = self._attention_layer_norm(
target_tensor + attention_output, training=training)
if self._norm_first:
source_attention_output = attention_output
attention_output = self._output_layer_norm(
attention_output, training=training)
if self._feedforward_block is None:
intermediate_output = self._intermediate_dense(attention_output)
intermediate_output = self._intermediate_activation_layer(
intermediate_output)
layer_output = self._output_dense(intermediate_output, training=training)
layer_output = self._output_dropout(layer_output, training=training)
layer_output = tf.cast(layer_output, tf.float32)
if self._norm_first:
layer_output = source_attention_output + layer_output
else:
layer_output = self._output_layer_norm(
layer_output + attention_output, training=training)
else:
if self._norm_first:
# if norm_first, assume the feedforward block will not apply layer norm
layer_output = self._feedforward_block(
attention_output, training=training)
layer_output += source_attention_output
else:
# if not norm_first, assume that the feedforwad does apply layer norm
layer_output = self._feedforward_block(
attention_output, training=training)
return layer_output
| 10,279 | 39.956175 | 80 | py |
models | models-master/official/nlp/modeling/layers/multi_channel_attention.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-channel Attention."""
# pylint: disable=g-classes-have-attributes
import math
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling.layers import masked_softmax
class VotingAttention(tf.keras.layers.Layer):
"""Voting Attention layer.
Args:
num_heads: The number of attention heads.
head_size: Per-head hidden size.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
"""
def __init__(self,
num_heads,
head_size,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super().__init__(**kwargs)
self._num_heads = num_heads
self._head_size = head_size
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
def build(self, unused_input_shapes):
common_kwargs = dict(
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
self._query_dense = tf.keras.layers.EinsumDense(
"BAE,ENH->BANH",
output_shape=(None, self._num_heads, self._head_size),
bias_axes="NH",
name="query",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs)
self._key_dense = tf.keras.layers.EinsumDense(
"BAE,ENH->BANH",
output_shape=(None, self._num_heads, self._head_size),
bias_axes="NH",
name="key",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs)
super().build(unused_input_shapes)
def call(self, encoder_outputs, doc_attention_mask):
num_docs = tf_utils.get_shape_list(encoder_outputs, expected_rank=[4])[1]
cls_embeddings = encoder_outputs[:, :, 0, :]
key = self._key_dense(cls_embeddings)
query = self._query_dense(cls_embeddings)
doc_attention_mask = tf.cast(doc_attention_mask, tf.float32)
key = tf.einsum("BANH,BA->BANH", key, doc_attention_mask)
query = tf.einsum("BANH,BA->BANH", query, doc_attention_mask)
attention_matrix = tf.einsum("BXNH,BYNH->BNXY", query, key)
mask = tf.ones([num_docs, num_docs])
mask = tf.linalg.set_diag(mask, tf.zeros(num_docs))
attention_matrix = tf.einsum("BNXY,XY->BNXY", attention_matrix, mask)
doc_attention_probs = tf.einsum("BNAY->BNA", attention_matrix)
doc_attention_probs = tf.einsum("BNA->BA", doc_attention_probs)
infadder = (1.0 - doc_attention_mask) * -100000.0
return tf.nn.softmax(doc_attention_probs + infadder)
class MultiChannelAttention(tf.keras.layers.MultiHeadAttention):
"""Multi-channel Attention layer.
Introduced in, [Generating Representative Headlines for News Stories
](https://arxiv.org/abs/2001.09386). Expects multiple cross-attention
target sequences.
Call args:
query: Query `Tensor` of shape `[B, T, dim]`.
value: Value `Tensor` of shape `[B, A, S, dim]`, where A denotes the
context_attention_weights: Context weights of shape `[B, N, T, A]`, where N
is the number of attention heads. Combines multi-channel sources
context tensors according to the distribution among channels.
key: Optional key `Tensor` of shape `[B, A, S, dim]`. If not given, will use
`value` for both `key` and `value`, which is the most common case.
attention_mask: A boolean mask of shape `[B, T, S]`, that prevents attention
to certain positions.
"""
def _build_attention(self, rank):
super()._build_attention(rank) # pytype: disable=attribute-error # typed-keras
self._masked_softmax = masked_softmax.MaskedSoftmax(mask_expansion_axes=[2])
def call(self,
query,
value,
key=None,
context_attention_weights=None,
attention_mask=None):
if not self._built_from_signature:
self._build_from_signature(query, value, key=key)
if key is None:
key = value
# Scalar dimensions referenced here:
# B = batch size (number of stories)
# A = num_docs (number of docs)
# F = target sequence length
# T = source sequence length
# N = `num_attention_heads`
# H = `size_per_head`
# `query_tensor` = [B, F, N ,H]
query_tensor = self._query_dense(query)
# `key_tensor` = [B, A, T, N, H]
key_tensor = self._key_dense(key)
# `value_tensor` = [B, A, T, N, H]
value_tensor = self._value_dense(value)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
attention_scores = tf.einsum("BATNH,BFNH->BANFT", key_tensor, query_tensor)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(self._key_dim)))
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, A, N, F, T]
attention_probs = self._masked_softmax(attention_scores, attention_mask)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self._dropout_layer(attention_probs)
# `context_layer` = [B, F, N, H]
context_layer = tf.einsum("BANFT,BATNH->BAFNH", attention_probs,
value_tensor)
attention_output = tf.einsum("BNFA,BAFNH->BFNH", context_attention_weights,
context_layer)
attention_output = self._output_dense(attention_output)
return attention_output
| 7,312 | 40.316384 | 84 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.