repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/official/projects/edgetpu/nlp/modeling/model_builder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mobilebert_edgetpu.model_builder.py."""
import tensorflow as tf
from official.nlp import modeling
from official.nlp.configs import encoders
from official.projects.edgetpu.nlp.configs import params
from official.projects.edgetpu.nlp.modeling import model_builder
class ModelBuilderTest(tf.test.TestCase):
def setUp(self):
super(ModelBuilderTest, self).setUp()
self.pretrainer_config = params.PretrainerModelParams(
encoder=encoders.EncoderConfig(type='mobilebert'))
def test_default_initialization(self):
"""Initializes pretrainer model from stratch."""
pretrainer = model_builder.build_bert_pretrainer(
pretrainer_cfg=self.pretrainer_config,
name='test_model')
# Makes sure the pretrainer variables are created.
_ = pretrainer(pretrainer.inputs)
self.assertEqual(pretrainer.name, 'test_model')
encoder = pretrainer.encoder_network
default_number_layer = encoders.MobileBertEncoderConfig().num_blocks
encoder_transformer_layer_counter = 0
for layer in encoder.layers:
if isinstance(layer, modeling.layers.MobileBertTransformer):
encoder_transformer_layer_counter += 1
self.assertEqual(default_number_layer, encoder_transformer_layer_counter)
def test_initialization_with_encoder(self):
"""Initializes pretrainer model with an existing encoder network."""
encoder = encoders.build_encoder(
config=encoders.EncoderConfig(type='mobilebert'))
pretrainer = model_builder.build_bert_pretrainer(
pretrainer_cfg=self.pretrainer_config,
encoder=encoder)
encoder_network = pretrainer.encoder_network
self.assertEqual(encoder_network, encoder)
def test_initialization_with_mlm(self):
"""Initializes pretrainer model with an existing MLM head."""
embedding = modeling.layers.MobileBertEmbedding(
word_vocab_size=30522,
word_embed_size=128,
type_vocab_size=2,
output_embed_size=encoders.MobileBertEncoderConfig().hidden_size)
dummy_input = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32)
_ = embedding(dummy_input)
embedding_table = embedding.word_embedding.embeddings
mlm_layer = modeling.layers.MobileBertMaskedLM(
embedding_table=embedding_table)
pretrainer = model_builder.build_bert_pretrainer(
pretrainer_cfg=self.pretrainer_config,
masked_lm=mlm_layer)
mlm_network = pretrainer.masked_lm
self.assertEqual(mlm_network, mlm_layer)
if __name__ == '__main__':
tf.test.main()
| 3,142 | 38.2875 | 77 | py |
models | models-master/official/projects/edgetpu/nlp/modeling/pretrainer_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for BERT pretrainer model."""
import itertools
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.modeling import layers
from official.nlp.modeling import networks
from official.projects.edgetpu.nlp.modeling import pretrainer
class MobileBERTEdgeTPUPretrainerTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(itertools.product([True, False],
[True, False],
[True, False]))
def test_mobilebert_edgetpu_pretrainer(
self,
dict_outputs,
return_all_encoder_outputs,
use_customized_masked_lm):
"""Validate that the Keras object can be created."""
# Build a transformer network to use within the BERT trainer.
vocab_size = 100
sequence_length = 512
hidden_size = 48
num_layers = 2
test_network = networks.BertEncoder(
vocab_size=vocab_size,
num_layers=num_layers,
hidden_size=hidden_size,
max_sequence_length=sequence_length,
return_all_encoder_outputs=return_all_encoder_outputs,
dict_outputs=dict_outputs)
# Create a BERT trainer with the created network.
if use_customized_masked_lm:
customized_masked_lm = layers.MaskedLM(
embedding_table=test_network.get_embedding_table())
else:
customized_masked_lm = None
bert_trainer_model = pretrainer.MobileBERTEdgeTPUPretrainer(
encoder_network=test_network, customized_masked_lm=customized_masked_lm)
num_token_predictions = 20
# Create a set of 2-dimensional inputs (the first dimension is implicit).
inputs = dict(
input_word_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32))
inputs['masked_lm_positions'] = tf.keras.Input(
shape=(num_token_predictions,), dtype=tf.int32)
# Invoke the trainer model on the inputs. This causes the layer to be built.
outputs = bert_trainer_model(inputs)
has_encoder_outputs = dict_outputs or return_all_encoder_outputs
expected_keys = ['sequence_output', 'pooled_output']
if has_encoder_outputs:
expected_keys.append('encoder_outputs')
expected_keys.append('mlm_logits')
self.assertSameElements(outputs.keys(), expected_keys)
# Validate that the outputs are of the expected shape.
expected_lm_shape = [None, num_token_predictions, vocab_size]
self.assertAllEqual(expected_lm_shape,
outputs['mlm_logits'].shape.as_list())
expected_sequence_output_shape = [None, sequence_length, hidden_size]
self.assertAllEqual(expected_sequence_output_shape,
outputs['sequence_output'].shape.as_list())
expected_pooled_output_shape = [None, hidden_size]
self.assertAllEqual(expected_pooled_output_shape,
outputs['pooled_output'].shape.as_list())
def test_multiple_cls_outputs(self):
"""Validate that the Keras object can be created."""
# Build a transformer network to use within the BERT trainer.
vocab_size = 100
sequence_length = 512
hidden_size = 48
num_layers = 2
test_network = networks.BertEncoder(
vocab_size=vocab_size,
num_layers=num_layers,
hidden_size=hidden_size,
max_sequence_length=sequence_length,
dict_outputs=True)
bert_trainer_model = pretrainer.MobileBERTEdgeTPUPretrainer(
encoder_network=test_network,
classification_heads=[layers.MultiClsHeads(
inner_dim=5, cls_list=[('foo', 2), ('bar', 3)])])
num_token_predictions = 20
# Create a set of 2-dimensional inputs (the first dimension is implicit).
inputs = dict(
input_word_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32),
masked_lm_positions=tf.keras.Input(
shape=(num_token_predictions,), dtype=tf.int32))
# Invoke the trainer model on the inputs. This causes the layer to be built.
outputs = bert_trainer_model(inputs)
self.assertEqual(outputs['foo'].shape.as_list(), [None, 2])
self.assertEqual(outputs['bar'].shape.as_list(), [None, 3])
def test_v2_serialize_deserialize(self):
"""Validate that the BERT trainer can be serialized and deserialized."""
# Build a transformer network to use within the BERT trainer. (Here, we use
# a short sequence_length for convenience.)
test_network = networks.BertEncoder(vocab_size=100, num_layers=2)
# Create a BERT trainer with the created network. (Note that all the args
# are different, so we can catch any serialization mismatches.)
bert_trainer_model = pretrainer.MobileBERTEdgeTPUPretrainer(
encoder_network=test_network)
# Create another BERT trainer via serialization and deserialization.
config = bert_trainer_model.get_config()
new_bert_trainer_model = pretrainer.MobileBERTEdgeTPUPretrainer.from_config(
config)
# Validate that the config can be forced to JSON.
_ = new_bert_trainer_model.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(bert_trainer_model.get_config(),
new_bert_trainer_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 6,190 | 40.550336 | 80 | py |
models | models-master/official/projects/edgetpu/nlp/modeling/encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MobileBERT text encoder network."""
import tensorflow as tf
from official.nlp import modeling
from official.nlp.modeling import layers
from official.projects.edgetpu.nlp.modeling import edgetpu_layers
@tf.keras.utils.register_keras_serializable(package='Text')
class MobileBERTEncoder(tf.keras.Model):
"""A Keras functional API implementation for MobileBERT encoder."""
def __init__(self,
word_vocab_size=30522,
word_embed_size=128,
type_vocab_size=2,
max_sequence_length=512,
num_blocks=24,
hidden_size=512,
num_attention_heads=4,
intermediate_size=512,
intermediate_act_fn='relu',
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
intra_bottleneck_size=128,
initializer_range=0.02,
use_bottleneck_attention=False,
key_query_shared_bottleneck=True,
num_feedforward_networks=4,
normalization_type='no_norm',
classifier_activation=False,
input_mask_dtype='int32',
quantization_friendly=True,
**kwargs):
"""Class initialization.
Args:
word_vocab_size: Number of words in the vocabulary.
word_embed_size: Word embedding size.
type_vocab_size: Number of word types.
max_sequence_length: Maximum length of input sequence.
num_blocks: Number of transformer block in the encoder model.
hidden_size: Hidden size for the transformer block.
num_attention_heads: Number of attention heads in the transformer block.
intermediate_size: The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: Dropout probability for the hidden layers.
attention_probs_dropout_prob: Dropout probability of the attention
probabilities.
intra_bottleneck_size: Size of bottleneck.
initializer_range: The stddev of the `truncated_normal_initializer` for
initializing all weight matrices.
use_bottleneck_attention: Use attention inputs from the bottleneck
transformation. If true, the following `key_query_shared_bottleneck`
will be ignored.
key_query_shared_bottleneck: Whether to share linear transformation for
keys and queries.
num_feedforward_networks: Number of stacked feed-forward networks.
normalization_type: The type of normalization_type, only `no_norm` and
`layer_norm` are supported. `no_norm` represents the element-wise linear
transformation for the student model, as suggested by the original
MobileBERT paper. `layer_norm` is used for the teacher model.
classifier_activation: If using the tanh activation for the final
representation of the `[CLS]` token in fine-tuning.
input_mask_dtype: The dtype of `input_mask` tensor, which is one of the
input tensors of this encoder. Defaults to `int32`. If you want
to use `tf.lite` quantization, which does not support `Cast` op,
please set this argument to `tf.float32` and feed `input_mask`
tensor with values in `float32` to avoid `tf.cast` in the computation.
quantization_friendly: If enabled, the model calss EdgeTPU mobile
transformer. The difference is we have a customized softmax
ops which use -120 as the mask value, which is more stable for post-
training quantization.
**kwargs: Other keyworded and arguments.
"""
self._self_setattr_tracking = False
initializer = tf.keras.initializers.TruncatedNormal(
stddev=initializer_range)
# layer instantiation
self.embedding_layer = layers.MobileBertEmbedding(
word_vocab_size=word_vocab_size,
word_embed_size=word_embed_size,
type_vocab_size=type_vocab_size,
output_embed_size=hidden_size,
max_sequence_length=max_sequence_length,
normalization_type=normalization_type,
initializer=initializer,
dropout_rate=hidden_dropout_prob)
self._transformer_layers = []
transformer_layer_args = dict(
hidden_size=hidden_size,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
intermediate_act_fn=intermediate_act_fn,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
intra_bottleneck_size=intra_bottleneck_size,
use_bottleneck_attention=use_bottleneck_attention,
key_query_shared_bottleneck=key_query_shared_bottleneck,
num_feedforward_networks=num_feedforward_networks,
normalization_type=normalization_type,
initializer=initializer,
)
for layer_idx in range(num_blocks):
if quantization_friendly:
transformer = edgetpu_layers.EdgetpuMobileBertTransformer(
name=f'transformer_layer_{layer_idx}',
**transformer_layer_args)
else:
transformer = layers.MobileBertTransformer(
name=f'transformer_layer_{layer_idx}',
**transformer_layer_args)
self._transformer_layers.append(transformer)
# input tensor
input_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_word_ids')
type_ids = tf.keras.layers.Input(
shape=(None,), dtype=tf.int32, name='input_type_ids')
input_mask = tf.keras.layers.Input(
shape=(None,), dtype=input_mask_dtype, name='input_mask')
self.inputs = [input_ids, input_mask, type_ids]
# The dtype of `attention_mask` will the same as the dtype of `input_mask`.
attention_mask = modeling.layers.SelfAttentionMask()(input_mask, input_mask)
# build the computation graph
all_layer_outputs = []
all_attention_scores = []
embedding_output = self.embedding_layer(input_ids, type_ids)
all_layer_outputs.append(embedding_output)
prev_output = embedding_output
for layer_idx in range(num_blocks):
layer_output, attention_score = self._transformer_layers[layer_idx](
prev_output,
attention_mask,
return_attention_scores=True)
all_layer_outputs.append(layer_output)
all_attention_scores.append(attention_score)
prev_output = layer_output
first_token = tf.squeeze(prev_output[:, 0:1, :], axis=1)
if classifier_activation:
self._pooler_layer = tf.keras.layers.EinsumDense(
'ab,bc->ac',
output_shape=hidden_size,
activation=tf.tanh,
bias_axes='c',
kernel_initializer=initializer,
name='pooler')
first_token = self._pooler_layer(first_token)
else:
self._pooler_layer = None
outputs = dict(
sequence_output=prev_output,
pooled_output=first_token,
encoder_outputs=all_layer_outputs,
attention_scores=all_attention_scores)
super(MobileBERTEncoder, self).__init__(
inputs=self.inputs, outputs=outputs, **kwargs)
def get_embedding_table(self):
return self.embedding_layer.word_embedding.embeddings
def get_embedding_layer(self):
return self.embedding_layer.word_embedding
@property
def transformer_layers(self):
"""List of Transformer layers in the encoder."""
return self._transformer_layers
@property
def pooler_layer(self):
"""The pooler dense layer after the transformer layers."""
return self._pooler_layer
| 8,251 | 40.467337 | 80 | py |
models | models-master/official/projects/edgetpu/nlp/modeling/edgetpu_layers_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for custom layers used by MobileBERT-EdgeTPU."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.edgetpu.nlp.modeling import edgetpu_layers
keras = tf.keras
class MultiHeadAttentionTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("key_value_same_proj", None, None, [40, 80]),
("key_value_different_proj", 32, 60, [40, 60]),
)
def test_non_masked_attention(self, value_dim, output_shape, output_dims):
"""Test that the attention layer can be created without a mask tensor."""
test_layer = edgetpu_layers.EdgeTPUMultiHeadAttention(
num_heads=12,
key_dim=64,
value_dim=value_dim,
output_shape=output_shape)
# Create a 3-dimensional input (the first dimension is implicit).
query = keras.Input(shape=(40, 80))
value = keras.Input(shape=(20, 80))
output = test_layer(query=query, value=value)
self.assertEqual(output.shape.as_list(), [None] + output_dims)
def test_non_masked_self_attention(self):
"""Test with one input (self-attenntion) and no mask tensor."""
test_layer = edgetpu_layers.EdgeTPUMultiHeadAttention(
num_heads=12, key_dim=64)
# Create a 3-dimensional input (the first dimension is implicit).
query = keras.Input(shape=(40, 80))
output = test_layer(query, query)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
def test_attention_scores(self):
"""Test attention outputs with coefficients."""
test_layer = edgetpu_layers.EdgeTPUMultiHeadAttention(
num_heads=12, key_dim=64)
# Create a 3-dimensional input (the first dimension is implicit).
query = keras.Input(shape=(40, 80))
output, coef = test_layer(query, query, return_attention_scores=True)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
self.assertEqual(coef.shape.as_list(), [None, 12, 40, 40])
def test_attention_scores_with_values(self):
"""Test attention outputs with coefficients."""
test_layer = edgetpu_layers.EdgeTPUMultiHeadAttention(
num_heads=12, key_dim=64)
# Create a 3-dimensional input (the first dimension is implicit).
query = keras.Input(shape=(40, 80))
value = keras.Input(shape=(60, 80))
output, coef = test_layer(query, value, return_attention_scores=True)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
self.assertEqual(coef.shape.as_list(), [None, 12, 40, 60])
@parameterized.named_parameters(("with_bias", True), ("no_bias", False))
def test_masked_attention(self, use_bias):
"""Test with a mask tensor."""
test_layer = edgetpu_layers.EdgeTPUMultiHeadAttention(
num_heads=2, key_dim=2, use_bias=use_bias)
# Create a 3-dimensional input (the first dimension is implicit).
batch_size = 3
query = keras.Input(shape=(4, 8))
value = keras.Input(shape=(2, 8))
mask_tensor = keras.Input(shape=(4, 2))
output = test_layer(query=query, value=value, attention_mask=mask_tensor)
# Create a model containing the test layer.
model = keras.Model([query, value, mask_tensor], output)
# Generate data for the input (non-mask) tensors.
from_data = 10 * np.random.random_sample((batch_size, 4, 8))
to_data = 10 * np.random.random_sample((batch_size, 2, 8))
# Invoke the data with a random set of mask data. This should mask at least
# one element.
mask_data = np.random.randint(2, size=(batch_size, 4, 2))
masked_output_data = model.predict([from_data, to_data, mask_data])
# Invoke the same data, but with a null mask (where no elements are masked).
null_mask_data = np.ones((batch_size, 4, 2))
unmasked_output_data = model.predict([from_data, to_data, null_mask_data])
# Because one data is masked and one is not, the outputs should not be the
# same.
self.assertNotAllClose(masked_output_data, unmasked_output_data)
# Tests the layer with three inputs: Q, K, V.
key = keras.Input(shape=(2, 8))
output = test_layer(query, value=value, key=key, attention_mask=mask_tensor)
model = keras.Model([query, value, key, mask_tensor], output)
masked_output_data = model.predict([from_data, to_data, to_data, mask_data])
unmasked_output_data = model.predict(
[from_data, to_data, to_data, null_mask_data])
# Because one data is masked and one is not, the outputs should not be the
# same.
self.assertNotAllClose(masked_output_data, unmasked_output_data)
if use_bias:
self.assertLen(test_layer._query_dense.trainable_variables, 2)
self.assertLen(test_layer._output_dense.trainable_variables, 2)
else:
self.assertLen(test_layer._query_dense.trainable_variables, 1)
self.assertLen(test_layer._output_dense.trainable_variables, 1)
def test_initializer(self):
"""Test with a specified initializer."""
test_layer = edgetpu_layers.EdgeTPUMultiHeadAttention(
num_heads=12,
key_dim=64,
kernel_initializer=keras.initializers.TruncatedNormal(stddev=0.02))
# Create a 3-dimensional input (the first dimension is implicit).
query = keras.Input(shape=(40, 80))
output = test_layer(query, query)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
def test_masked_attention_with_scores(self):
"""Test with a mask tensor."""
test_layer = edgetpu_layers.EdgeTPUMultiHeadAttention(
num_heads=2, key_dim=2)
# Create a 3-dimensional input (the first dimension is implicit).
batch_size = 3
query = keras.Input(shape=(4, 8))
value = keras.Input(shape=(2, 8))
mask_tensor = keras.Input(shape=(4, 2))
output = test_layer(query=query, value=value, attention_mask=mask_tensor)
# Create a model containing the test layer.
model = keras.Model([query, value, mask_tensor], output)
# Generate data for the input (non-mask) tensors.
from_data = 10 * np.random.random_sample((batch_size, 4, 8))
to_data = 10 * np.random.random_sample((batch_size, 2, 8))
# Invoke the data with a random set of mask data. This should mask at least
# one element.
mask_data = np.random.randint(2, size=(batch_size, 4, 2))
masked_output_data = model.predict([from_data, to_data, mask_data])
# Invoke the same data, but with a null mask (where no elements are masked).
null_mask_data = np.ones((batch_size, 4, 2))
unmasked_output_data = model.predict([from_data, to_data, null_mask_data])
# Because one data is masked and one is not, the outputs should not be the
# same.
self.assertNotAllClose(masked_output_data, unmasked_output_data)
# Create a model containing attention scores.
output, scores = test_layer(
query=query, value=value, attention_mask=mask_tensor,
return_attention_scores=True)
model = keras.Model([query, value, mask_tensor], [output, scores])
masked_output_data_score, masked_score = model.predict(
[from_data, to_data, mask_data])
unmasked_output_data_score, unmasked_score = model.predict(
[from_data, to_data, null_mask_data])
self.assertNotAllClose(masked_output_data_score, unmasked_output_data_score)
self.assertAllClose(masked_output_data, masked_output_data_score)
self.assertAllClose(unmasked_output_data, unmasked_output_data_score)
self.assertNotAllClose(masked_score, unmasked_score)
@parameterized.named_parameters(
("4d_inputs_1freebatch_mask2", [3, 4], [3, 2], [4, 2],
(2,)), ("4d_inputs_1freebatch_mask3", [3, 4], [3, 2], [3, 4, 2], (2,)),
("4d_inputs_1freebatch_mask4", [3, 4], [3, 2], [3, 2, 4, 2],
(2,)), ("4D_inputs_2D_attention", [3, 4], [3, 2], [3, 4, 3, 2], (1, 2)),
("5D_inputs_2D_attention", [5, 3, 4], [5, 3, 2], [3, 4, 3, 2], (2, 3)),
("5D_inputs_2D_attention_fullmask", [5, 3, 4], [5, 3, 2], [5, 3, 4, 3, 2],
(2, 3)))
def test_high_dim_attention(self, q_dims, v_dims, mask_dims, attention_axes):
"""Test with a mask tensor."""
test_layer = edgetpu_layers.EdgeTPUMultiHeadAttention(
num_heads=2, key_dim=2, attention_axes=attention_axes)
batch_size, hidden_size = 3, 8
# Generate data for the input (non-mask) tensors.
query_shape = [batch_size] + q_dims + [hidden_size]
value_shape = [batch_size] + v_dims + [hidden_size]
mask_shape = [batch_size] + mask_dims
query = 10 * np.random.random_sample(query_shape)
value = 10 * np.random.random_sample(value_shape)
# Invoke the data with a random set of mask data. This should mask at least
# one element.
mask_data = np.random.randint(2, size=mask_shape).astype("bool")
# Invoke the same data, but with a null mask (where no elements are masked).
null_mask_data = np.ones(mask_shape)
# Because one data is masked and one is not, the outputs should not be the
# same.
query_tensor = keras.Input(query_shape[1:], name="query")
value_tensor = keras.Input(value_shape[1:], name="value")
mask_tensor = keras.Input(mask_shape[1:], name="mask")
output = test_layer(query=query_tensor, value=value_tensor,
attention_mask=mask_tensor)
model = keras.Model([query_tensor, value_tensor, mask_tensor], output)
self.assertNotAllClose(
model.predict([query, value, mask_data]),
model.predict([query, value, null_mask_data]))
def test_dropout(self):
test_layer = edgetpu_layers.EdgeTPUMultiHeadAttention(
num_heads=2, key_dim=2, dropout=0.5)
# Generate data for the input (non-mask) tensors.
from_data = keras.backend.ones(shape=(32, 4, 8))
to_data = keras.backend.ones(shape=(32, 2, 8))
train_out = test_layer(from_data, to_data, None, None, None, True)
test_out = test_layer(from_data, to_data, None, None, None, False)
# Output should be close when not in training mode,
# and should not be close when enabling dropout in training mode.
self.assertNotAllClose(
keras.backend.eval(train_out),
keras.backend.eval(test_out))
if __name__ == "__main__":
tf.test.main()
| 10,672 | 43.844538 | 80 | py |
models | models-master/official/projects/edgetpu/nlp/modeling/model_builder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build MobileBERT-EdgeTPU model."""
from typing import Optional
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp import modeling
from official.projects.edgetpu.nlp.configs import params
from official.projects.edgetpu.nlp.modeling import encoder as edgetpu_encoder
from official.projects.edgetpu.nlp.modeling import pretrainer as edgetpu_pretrainer
def build_bert_pretrainer(pretrainer_cfg: params.PretrainerModelParams,
encoder: Optional[tf.keras.Model] = None,
masked_lm: Optional[tf.keras.Model] = None,
quantization_friendly: Optional[bool] = False,
name: Optional[str] = None) -> tf.keras.Model:
"""Builds pretrainer.
Args:
pretrainer_cfg: configs for the pretrainer model.
encoder: (Optional) The encoder network for the pretrainer model.
masked_lm: (Optional) The masked_lm network for the pretrainer model.
quantization_friendly: (Optional) If enabled, the model will use EdgeTPU
mobilebert transformer. The difference is we have a customized softmax
ops which use -120 as the mask value, which is more stable for post-
training quantization.
name: (Optional) Name of the pretrainer model.
Returns:
The pretrainer model.
"""
encoder_cfg = pretrainer_cfg.encoder.mobilebert
encoder = encoder or edgetpu_encoder.MobileBERTEncoder(
word_vocab_size=encoder_cfg.word_vocab_size,
word_embed_size=encoder_cfg.word_embed_size,
type_vocab_size=encoder_cfg.type_vocab_size,
max_sequence_length=encoder_cfg.max_sequence_length,
num_blocks=encoder_cfg.num_blocks,
hidden_size=encoder_cfg.hidden_size,
num_attention_heads=encoder_cfg.num_attention_heads,
intermediate_size=encoder_cfg.intermediate_size,
intermediate_act_fn=encoder_cfg.hidden_activation,
hidden_dropout_prob=encoder_cfg.hidden_dropout_prob,
attention_probs_dropout_prob=encoder_cfg.attention_probs_dropout_prob,
intra_bottleneck_size=encoder_cfg.intra_bottleneck_size,
initializer_range=encoder_cfg.initializer_range,
use_bottleneck_attention=encoder_cfg.use_bottleneck_attention,
key_query_shared_bottleneck=encoder_cfg.key_query_shared_bottleneck,
num_feedforward_networks=encoder_cfg.num_feedforward_networks,
normalization_type=encoder_cfg.normalization_type,
classifier_activation=encoder_cfg.classifier_activation,
input_mask_dtype=encoder_cfg.input_mask_dtype,
quantization_friendly=quantization_friendly)
if pretrainer_cfg.cls_heads:
cls_heads = [
modeling.layers.ClassificationHead(**cfg.as_dict())
for cfg in pretrainer_cfg.cls_heads
]
else:
cls_heads = []
# Get the embedding table from the encoder model.
def _get_embedding_table(encoder):
for layer in encoder.layers:
if layer.name.startswith('mobile_bert_embedding'):
return layer.word_embedding.embeddings
raise ValueError('Can not find embedding layer in the encoder.')
masked_lm = masked_lm or modeling.layers.MobileBertMaskedLM(
embedding_table=_get_embedding_table(encoder),
activation=tf_utils.get_activation(pretrainer_cfg.mlm_activation),
initializer=tf.keras.initializers.TruncatedNormal(
stddev=pretrainer_cfg.mlm_initializer_range),
output_weights_use_proj=pretrainer_cfg.mlm_output_weights_use_proj,
name='cls/predictions')
pretrainer = edgetpu_pretrainer.MobileBERTEdgeTPUPretrainer(
encoder_network=encoder,
classification_heads=cls_heads,
customized_masked_lm=masked_lm,
name=name)
return pretrainer
| 4,298 | 42.867347 | 83 | py |
models | models-master/official/projects/edgetpu/nlp/modeling/pretrainer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT Pre-training model."""
# pylint: disable=g-classes-have-attributes
import copy
from typing import List, Optional
import tensorflow as tf
from official.nlp.modeling import layers
@tf.keras.utils.register_keras_serializable(package='Text')
class MobileBERTEdgeTPUPretrainer(tf.keras.Model):
"""BERT pretraining model V2.
Adds the masked language model head and optional classification heads upon the
transformer encoder.
Args:
encoder_network: A transformer network. This network should output a
sequence output and a classification output.
mlm_activation: The activation (if any) to use in the masked LM network. If
None, no activation will be used.
mlm_initializer: The initializer (if any) to use in the masked LM. Default
to a Glorot uniform initializer.
classification_heads: A list of optional head layers to transform on encoder
sequence outputs.
customized_masked_lm: A customized masked_lm layer. If None, will create
a standard layer from `layers.MaskedLM`; if not None, will use the
specified masked_lm layer. Above arguments `mlm_activation` and
`mlm_initializer` will be ignored.
name: The name of the model.
Inputs: Inputs defined by the encoder network, plus `masked_lm_positions` as a
dictionary.
Outputs: A dictionary of `lm_output`, classification head outputs keyed by
head names, and also outputs from `encoder_network`, keyed by
`sequence_output` and `encoder_outputs` (if any).
"""
def __init__(
self,
encoder_network: tf.keras.Model,
mlm_activation=None,
mlm_initializer='glorot_uniform',
classification_heads: Optional[List[tf.keras.layers.Layer]] = None,
customized_masked_lm: Optional[tf.keras.layers.Layer] = None,
name: str = 'bert',
**kwargs):
inputs = copy.copy(encoder_network.inputs)
outputs = {}
encoder_network_outputs = encoder_network(inputs)
if isinstance(encoder_network_outputs, list):
outputs['pooled_output'] = encoder_network_outputs[1]
if isinstance(encoder_network_outputs[0], list):
outputs['encoder_outputs'] = encoder_network_outputs[0]
outputs['sequence_output'] = encoder_network_outputs[0][-1]
else:
outputs['sequence_output'] = encoder_network_outputs[0]
elif isinstance(encoder_network_outputs, dict):
outputs = encoder_network_outputs
else:
raise ValueError('encoder_network\'s output should be either a list '
'or a dict, but got %s' % encoder_network_outputs)
masked_lm_positions = tf.keras.layers.Input(
shape=(None,), name='masked_lm_positions', dtype=tf.int32)
inputs.append(masked_lm_positions)
masked_lm_layer = customized_masked_lm or layers.MaskedLM(
embedding_table=encoder_network.get_embedding_table(),
activation=mlm_activation,
initializer=mlm_initializer,
name='cls/predictions')
sequence_output = outputs['sequence_output']
outputs['mlm_logits'] = masked_lm_layer(
sequence_output, masked_positions=masked_lm_positions)
classification_head_layers = classification_heads or []
for cls_head in classification_head_layers:
cls_outputs = cls_head(sequence_output)
if isinstance(cls_outputs, dict):
outputs.update(cls_outputs)
else:
outputs[cls_head.name] = cls_outputs
super(MobileBERTEdgeTPUPretrainer, self).__init__(
inputs=inputs,
outputs=outputs,
name=name,
**kwargs)
self._config = {
'encoder_network': encoder_network,
'mlm_activation': mlm_activation,
'mlm_initializer': mlm_initializer,
'classification_heads': classification_heads,
'customized_masked_lm': customized_masked_lm,
'name': name,
}
self.encoder_network = encoder_network
self.masked_lm = masked_lm_layer
self.classification_heads = classification_head_layers
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(encoder=self.encoder_network, masked_lm=self.masked_lm)
for head in self.classification_heads:
for key, item in head.checkpoint_items.items():
items['.'.join([head.name, key])] = item
return items
def get_config(self):
return self._config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 5,076 | 37.172932 | 80 | py |
models | models-master/official/projects/edgetpu/nlp/modeling/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/edgetpu/nlp/modeling/edgetpu_layers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Customized MobileBERT-EdgeTPU layers.
There are two reasons for us to customize the layers instead of using the well-
defined layers used in baseline MobileBERT.
1. The layer introduces compiler sharding failures. For example, the gather in
OnDeviceEmbedding.
2. The layer contains ops that need to have bounded input/output ranges. For
example, softmax op.
"""
import string
import numpy as np
import tensorflow as tf
from official.nlp.modeling import layers
_CHR_IDX = string.ascii_lowercase
# This function is directly copied from the tf.keras.layers.MultiHeadAttention
# implementation.
def _build_attention_equation(rank, attn_axes):
"""Builds einsum equations for the attention computation.
Query, key, value inputs after projection are expected to have the shape as:
`(bs, <non-attention dims>, <attention dims>, num_heads, channels)`.
`bs` and `<non-attention dims>` are treated as `<batch dims>`.
The attention operations can be generalized:
(1) Query-key dot product:
`(<batch dims>, <query attention dims>, num_heads, channels), (<batch dims>,
<key attention dims>, num_heads, channels) -> (<batch dims>,
num_heads, <query attention dims>, <key attention dims>)`
(2) Combination:
`(<batch dims>, num_heads, <query attention dims>, <key attention dims>),
(<batch dims>, <value attention dims>, num_heads, channels) -> (<batch dims>,
<query attention dims>, num_heads, channels)`
Args:
rank: Rank of query, key, value tensors.
attn_axes: List/tuple of axes, `[-1, rank)`,
that attention will be applied to.
Returns:
Einsum equations.
"""
target_notation = _CHR_IDX[:rank]
# `batch_dims` includes the head dim.
batch_dims = tuple(np.delete(range(rank), attn_axes + (rank - 1,)))
letter_offset = rank
source_notation = ''
for i in range(rank):
if i in batch_dims or i == rank - 1:
source_notation += target_notation[i]
else:
source_notation += _CHR_IDX[letter_offset]
letter_offset += 1
product_notation = ''.join([target_notation[i] for i in batch_dims] +
[target_notation[i] for i in attn_axes] +
[source_notation[i] for i in attn_axes])
dot_product_equation = '%s,%s->%s' % (source_notation, target_notation,
product_notation)
attn_scores_rank = len(product_notation)
combine_equation = '%s,%s->%s' % (product_notation, source_notation,
target_notation)
return dot_product_equation, combine_equation, attn_scores_rank
@tf.keras.utils.register_keras_serializable(package='Text')
class EdgeTPUSoftmax(tf.keras.layers.Softmax):
"""EdgeTPU/Quantization friendly implementation for the SoftMax.
When export quant model, use -120 mask value.
When export float model and run inference with bf16 on device, use -10000.
"""
def __init__(self,
mask_value: int = -120,
**kwargs):
self._mask_value = mask_value
super(EdgeTPUSoftmax, self).__init__(**kwargs)
def get_config(self):
config = {
'mask_value': self._mask_value
}
base_config = super(EdgeTPUSoftmax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, mask=None):
if mask is not None:
adder = (1.0 - tf.cast(mask, inputs.dtype)) * self._mask_value
inputs += adder
if isinstance(self.axis, (tuple, list)):
if len(self.axis) > 1:
return tf.exp(inputs - tf.reduce_logsumexp(
inputs, axis=self.axis, keepdims=True))
else:
return tf.keras.backend.softmax(inputs, axis=self.axis[0])
return tf.keras.backend.softmax(inputs, axis=self.axis)
@tf.keras.utils.register_keras_serializable(package='Text')
class EdgeTPUMultiHeadAttention(tf.keras.layers.MultiHeadAttention):
"""Quantization friendly implementation for the MultiHeadAttention."""
def _build_attention(self, rank):
"""Builds multi-head dot-product attention computations.
This function builds attributes necessary for `_compute_attention` to
customize attention computation to replace the default dot-product
attention.
Args:
rank: the rank of query, key, value tensors.
"""
if self._attention_axes is None:
self._attention_axes = tuple(range(1, rank - 2))
else:
self._attention_axes = tuple(self._attention_axes)
self._dot_product_equation, self._combine_equation, attn_scores_rank = (
_build_attention_equation(
rank, attn_axes=self._attention_axes))
norm_axes = tuple(
range(attn_scores_rank - len(self._attention_axes), attn_scores_rank))
self._softmax = EdgeTPUSoftmax(axis=norm_axes)
self._dropout_layer = tf.keras.layers.Dropout(rate=self._dropout)
class EdgetpuMobileBertTransformer(layers.MobileBertTransformer):
"""Quantization friendly MobileBertTransformer.
Inherits from the MobileBertTransformer but use our customized MHA.
"""
def __init__(self, **kwargs):
super(EdgetpuMobileBertTransformer, self).__init__(**kwargs)
attention_head_size = int(
self.intra_bottleneck_size / self.num_attention_heads)
attention_layer = EdgeTPUMultiHeadAttention(
num_heads=self.num_attention_heads,
key_dim=attention_head_size,
value_dim=attention_head_size,
dropout=self.attention_probs_dropout_prob,
output_shape=self.intra_bottleneck_size,
kernel_initializer=self.initializer,
name='attention')
layer_norm = self.block_layers['attention'][1]
self.block_layers['attention'] = [attention_layer, layer_norm]
| 6,274 | 36.801205 | 79 | py |
models | models-master/official/projects/edgetpu/vision/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/edgetpu/vision/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Model Garden Vision training for MobileNet-EdgeTPU."""
from absl import app
from absl import flags
import gin
from official.common import distribute_utils
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
# pylint: disable=unused-import
from official.projects.edgetpu.vision.configs import mobilenet_edgetpu_config
from official.projects.edgetpu.vision.configs import semantic_segmentation_config
from official.projects.edgetpu.vision.configs import semantic_segmentation_searched_config
from official.projects.edgetpu.vision.modeling.backbones import mobilenet_edgetpu
from official.projects.edgetpu.vision.tasks import image_classification
from official.projects.edgetpu.vision.tasks import semantic_segmentation
from official.vision import registry_imports
# pylint: enable=unused-import
FLAGS = flags.FLAGS
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu)
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir)
train_utils.save_gin_config(FLAGS.mode, model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
flags.mark_flags_as_required(['mode', 'model_dir'])
app.run(main)
| 3,078 | 38.987013 | 90 | py |
models | models-master/official/projects/edgetpu/vision/serving/tflite_imagenet_evaluator_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tflite_imagenet_evaluator."""
from unittest import mock
import tensorflow as tf
from official.projects.edgetpu.vision.serving import tflite_imagenet_evaluator
class TfliteImagenetEvaluatorTest(tf.test.TestCase):
# Only tests the parallelization aspect. Mocks image evaluation and dataset.
def test_evaluate_all(self):
batch_size = 8
num_threads = 4
num_batches = 5
labels = tf.data.Dataset.range(batch_size * num_threads * num_batches)
images = tf.data.Dataset.range(batch_size * num_threads * num_batches)
dataset = tf.data.Dataset.zip((images, labels))
dataset = dataset.batch(batch_size)
with mock.patch.object(
tflite_imagenet_evaluator.AccuracyEvaluator,
'evaluate_single_image',
return_value=True,
autospec=True):
evaluator = tflite_imagenet_evaluator.AccuracyEvaluator(
model_content='MockModelContent'.encode('utf-8'),
dataset=dataset,
num_threads=num_threads)
num_evals, num_corrects = evaluator.evaluate_all()
expected_evals = num_batches * num_threads * batch_size
self.assertEqual(num_evals, expected_evals)
self.assertEqual(num_corrects, expected_evals)
if __name__ == '__main__':
tf.test.main()
| 1,866 | 32.945455 | 78 | py |
models | models-master/official/projects/edgetpu/vision/serving/tflite_imagenet_evaluator.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluates image classification accuracy using TFLite Interpreter."""
import dataclasses
import multiprocessing.pool as mp
from typing import Tuple
from absl import logging
import numpy as np
import tensorflow as tf
@dataclasses.dataclass
class EvaluationInput():
"""Contains image and its label as evaluation input."""
image: tf.Tensor
label: tf.Tensor
class AccuracyEvaluator():
"""Evaluates image classification accuracy using TFLite Interpreter.
Attributes:
model_content: The contents of a TFLite model.
num_threads: Number of threads used to evaluate images.
thread_batch_size: Batch size assigned to each thread.
image_size: Width/Height of the images.
num_classes: Number of classes predicted by the model.
resize_method: Resize method to use during image preprocessing.
"""
def __init__(self,
model_content: bytes,
dataset: tf.data.Dataset,
num_threads: int = 16):
self._model_content: bytes = model_content
self._dataset = dataset
self._num_threads: int = num_threads
def evaluate_single_image(self, eval_input: EvaluationInput) -> bool:
"""Evaluates a given single input.
Args:
eval_input: EvaluationInput holding image and label.
Returns:
Whether the estimation is correct.
"""
interpreter = tf.lite.Interpreter(
model_content=self._model_content, num_threads=1)
interpreter.allocate_tensors()
# Get input and output tensors and quantization details.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
image_tensor = interpreter.tensor(input_details[0]['index'])
logits_tensor = interpreter.tensor(output_details[0]['index'])
# Handle quantization.
scale = 1.0
zero_point = 0.0
input_dtype = tf.as_dtype(input_details[0]['dtype'])
if input_dtype.is_quantized or input_dtype.is_integer:
input_quantization = input_details[0]['quantization']
scale = input_quantization[0]
zero_point = input_quantization[1]
image_tensor()[0, :] = (eval_input.image.numpy() / scale) + zero_point
interpreter.invoke()
return eval_input.label.numpy() == np.argmax(logits_tensor()[0])
def evaluate_all(self) -> Tuple[int, int]:
"""Evaluates all of images in the default dataset.
Returns:
Total number of evaluations and correct predictions as tuple of ints.
"""
num_evals = 0
num_corrects = 0
for image_batch, label_batch in self._dataset:
inputs = [
EvaluationInput(image, label)
for image, label in zip(image_batch, label_batch)
]
pool = mp.ThreadPool(self._num_threads)
results = pool.map(self.evaluate_single_image, inputs)
pool.close()
pool.join()
num_evals += len(results)
num_corrects += results.count(True)
accuracy = 100.0 * num_corrects / num_evals if num_evals > 0 else 0
logging.info('Evaluated: %d, Correct: %d, Accuracy: %f', num_evals,
num_corrects, accuracy)
return (num_evals, num_corrects)
| 3,715 | 34.056604 | 75 | py |
models | models-master/official/projects/edgetpu/vision/serving/export_tflite_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for export_tflite."""
import itertools
import os
from absl.testing import parameterized
import tensorflow as tf
from official.core import exp_factory
from official.core import task_factory
from official.projects.edgetpu.vision.serving import export_util
def _build_experiment_model(experiment_type):
"""Builds model from experiment type configuration w/o loading checkpoint.
To reduce test latency and avoid unexpected errors (e.g. checkpoint files not
exist in the dedicated path), we skip the checkpoint loading for the tests.
Args:
experiment_type: model type for the experiment.
Returns:
TF/Keras model for the task.
"""
params = exp_factory.get_exp_config(experiment_type)
if 'deeplabv3plus_mobilenet_edgetpuv2' in experiment_type:
params.task.model.backbone.mobilenet_edgetpu.pretrained_checkpoint_path = None
if 'autoseg_edgetpu' in experiment_type:
params.task.model.model_params.model_weights_path = None
params.validate()
params.lock()
task = task_factory.get_task(params.task)
return task.build_model()
def _build_model(config):
model = _build_experiment_model(config.model_name)
model_input = tf.keras.Input(
shape=(config.image_size, config.image_size, 3), batch_size=1)
model_output = export_util.finalize_serving(model(model_input), config)
model_for_inference = tf.keras.Model(model_input, model_output)
return model_for_inference
def _dump_tflite(model, config):
converter = tf.lite.TFLiteConverter.from_keras_model(model)
export_util.configure_tflite_converter(config, converter)
tflite_buffer = converter.convert()
tf.io.gfile.makedirs(os.path.dirname(config.output_dir))
tflite_path = os.path.join(config.output_dir, f'{config.model_name}.tflite')
tf.io.gfile.GFile(tflite_path, 'wb').write(tflite_buffer)
return tflite_path
SEG_MODELS = [
'autoseg_edgetpu_xs',
]
FINALIZE_METHODS = [
'resize512,argmax,squeeze', 'resize256,argmax,resize512,squeeze',
'resize128,argmax,resize512,squeeze'
]
class ExportTfliteTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('mobilenet_edgetpu_v2_xs', 224),
('autoseg_edgetpu_xs', 512),
('deeplabv3plus_mobilenet_edgetpuv2_xs_ade20k', 512),
('deeplabv3plus_mobilenet_edgetpuv2_xs_ade20k_32', 512),
)
def test_model_build_and_export_tflite(self, model_name, image_size):
tmp_dir = self.create_tempdir().full_path
config = export_util.ExportConfig(
model_name=model_name, image_size=image_size, output_dir=tmp_dir)
config.quantization_config.quantize = False
model = _build_model(config)
tflite_path = _dump_tflite(model, config)
self.assertTrue(tf.io.gfile.exists(tflite_path))
@parameterized.parameters(
('mobilenet_edgetpu_v2_xs', 224),
('autoseg_edgetpu_xs', 512),
('deeplabv3plus_mobilenet_edgetpuv2_xs_ade20k', 512),
('deeplabv3plus_mobilenet_edgetpuv2_xs_ade20k_32', 512),
)
def test_model_build_and_export_saved_model(self, model_name, image_size):
tmp_dir = self.create_tempdir().full_path
config = export_util.ExportConfig(
model_name=model_name, image_size=image_size, output_dir=tmp_dir)
model = _build_model(config)
saved_model_path = os.path.join(config.output_dir, config.model_name)
model.save(saved_model_path)
self.assertTrue(tf.saved_model.contains_saved_model(saved_model_path))
@parameterized.parameters(itertools.product(SEG_MODELS, FINALIZE_METHODS))
def test_segmentation_finalize_methods(self, model_name, finalize_method):
tmp_dir = self.create_tempdir().full_path
config = export_util.ExportConfig(
model_name=model_name,
image_size=512,
output_dir=tmp_dir,
finalize_method=finalize_method.split(','))
config.quantization_config.quantize = False
model = _build_model(config)
model_input = tf.random.normal([1, config.image_size, config.image_size, 3])
self.assertEqual(
model(model_input).get_shape().as_list(),
[1, config.image_size, config.image_size])
if __name__ == '__main__':
tf.test.main()
| 4,725 | 35.921875 | 82 | py |
models | models-master/official/projects/edgetpu/vision/serving/export_util.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements serving with custom post processing."""
import dataclasses
from typing import List, Optional
import tensorflow as tf
import tensorflow_datasets as tfds
from official.core import exp_factory
from official.core import task_factory
from official.modeling.hyperparams import base_config
# pylint: disable=unused-import
from official.projects.edgetpu.vision.configs import mobilenet_edgetpu_config
from official.projects.edgetpu.vision.configs import semantic_segmentation_config
from official.projects.edgetpu.vision.configs import semantic_segmentation_searched_config
from official.projects.edgetpu.vision.modeling import custom_layers
from official.projects.edgetpu.vision.modeling.backbones import mobilenet_edgetpu
from official.projects.edgetpu.vision.tasks import image_classification
from official.projects.edgetpu.vision.tasks import semantic_segmentation as edgetpu_semantic_segmentation
from official.vision.tasks import semantic_segmentation
# pylint: enable=unused-import
MEAN_RGB = [127.5, 127.5, 127.5]
STDDEV_RGB = [127.5, 127.5, 127.5]
@dataclasses.dataclass
class QuantizationConfig(base_config.Config):
"""Configuration for post training quantization.
Attributes:
quantize: Whether to quantize model before exporting tflite.
quantize_less_restrictive: Allows non int8 based intermediate types,
automatic model output type.
use_experimental_quantizer: Enables experimental quantizer of
TFLiteConverter 2.0.
num_calibration_steps: Number of post-training quantization calibration
steps to run.
dataset_name: Name of the dataset to use for quantization calibration.
dataset_dir: Dataset location.
dataset_split: The dataset split (train, validation etc.) to use for
calibration.
"""
quantize: bool = False
quantize_less_restrictive: bool = False
use_experimental_quantizer: bool = True
dataset_name: Optional[str] = None
dataset_dir: Optional[str] = None
dataset_split: Optional[str] = None
num_calibration_steps: int = 100
@dataclasses.dataclass
class ExportConfig(base_config.Config):
"""Configuration for exporting models as tflite and saved_models.
Attributes:
model_name: One of the registered model names.
output_layer: Layer name to take the output from. Can be used to take the
output from an intermediate layer.
ckpt_path: Path of the training checkpoint. If not provided tflite with
random parameters is exported.
ckpt_format: Format of the checkpoint. tf_checkpoint is for ckpt files from
tf.train.Checkpoint.save() method. keras_checkpoint is for ckpt files from
keras.Model.save_weights() method
output_dir: Directory to output exported files.
image_size: Size of the input image. Ideally should be the same as the
image_size used in training config
output_layer: Layer name to take the output from. Can be used to take the
output from an intermediate layer. None means use the original model
output.
finalize_method: 'Additional layers to be added to customize serving output
Supported are (none|(argmax|resize<?>)[,...]).
- none: do not add extra serving layers.
- argmax: adds argmax.
- squeeze: removes dimensions (except batch dim) of size 1 from the shape
of a tensor.
- resize<?> (for example resize512): adds resize bilinear|nn to <?> size.
For example: --finalize_method=resize128,argmax,resize512,squeeze will do
resize bilinear to 128x128, then argmax then resize nn to 512x512
"""
quantization_config: QuantizationConfig = dataclasses.field(
default_factory=QuantizationConfig
)
model_name: Optional[str] = None
output_layer: Optional[str] = None
ckpt_path: Optional[str] = None
ckpt_format: Optional[str] = 'tf_checkpoint'
output_dir: str = '/tmp/'
image_size: int = 224
output_layer: Optional[str] = None
finalize_method: Optional[List[str]] = None
def finalize_serving(model_output, export_config):
"""Adds extra layers based on the provided configuration."""
if isinstance(model_output, dict):
return {
key: finalize_serving(model_output[key], export_config)
for key in model_output
}
finalize_method = export_config.finalize_method
output_layer = model_output
if not finalize_method or finalize_method[0] == 'none':
return output_layer
discrete = False
for i in range(len(finalize_method)):
if finalize_method[i] == 'argmax':
discrete = True
is_argmax_last = (i + 1) == len(finalize_method)
if is_argmax_last:
output_layer = tf.argmax(
output_layer, axis=3, output_type=tf.dtypes.int32)
else:
# TODO(tohaspiridonov): add first_match=False when cl/383951533 submited
output_layer = custom_layers.argmax(
output_layer, keepdims=True, epsilon=1e-3)
elif finalize_method[i] == 'squeeze':
output_layer = tf.squeeze(output_layer, axis=3)
else:
resize_params = finalize_method[i].split('resize')
if len(resize_params) != 2 or resize_params[0]:
raise ValueError('Cannot finalize with ' + finalize_method[i] + '.')
resize_to_size = int(resize_params[1])
if discrete:
output_layer = tf.image.resize(
output_layer, [resize_to_size, resize_to_size],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
else:
output_layer = tf.image.resize(
output_layer, [resize_to_size, resize_to_size],
method=tf.image.ResizeMethod.BILINEAR)
return output_layer
def preprocess_for_quantization(image_data, image_size, crop_padding=32):
"""Crops to center of image with padding then scales, normalizes image_size.
Args:
image_data: A 3D Tensor representing the RGB image data. Image can be of
arbitrary height and width.
image_size: image height/width dimension.
crop_padding: the padding size to use when centering the crop.
Returns:
A decoded and cropped image Tensor. Image is normalized to [-1,1].
"""
shape = tf.shape(image_data)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
(image_size * 1.0 / (image_size + crop_padding)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32), tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
image = tf.image.crop_to_bounding_box(
image_data,
offset_height=offset_height,
offset_width=offset_width,
target_height=padded_center_crop_size,
target_width=padded_center_crop_size)
image = tf.image.resize([image], [image_size, image_size],
method=tf.image.ResizeMethod.BILINEAR)[0]
image = tf.cast(image, tf.float32)
image -= tf.constant(MEAN_RGB)
image /= tf.constant(STDDEV_RGB)
return image
def representative_dataset_gen(export_config):
"""Gets a python generator of numpy arrays for the given dataset."""
quantization_config = export_config.quantization_config
dataset = tfds.builder(
quantization_config.dataset_name, try_gcs=True)
dataset.download_and_prepare()
data = dataset.as_dataset()[quantization_config.dataset_split]
iterator = data.as_numpy_iterator()
for _ in range(quantization_config.num_calibration_steps):
features = next(iterator)
image = features['image']
image = preprocess_for_quantization(image, export_config.image_size)
image = tf.reshape(
image, [1, export_config.image_size, export_config.image_size, 3])
yield [image]
def configure_tflite_converter(export_config, converter):
"""Common code for picking up quantization parameters."""
quantization_config = export_config.quantization_config
if quantization_config.quantize:
if (quantization_config.dataset_dir is
None) and (quantization_config.dataset_name is None):
raise ValueError(
'Must provide a representative dataset when quantizing the model.')
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8
]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
if quantization_config.quantize_less_restrictive:
converter.target_spec.supported_ops += [
tf.lite.OpsSet.TFLITE_BUILTINS
]
converter.inference_output_type = tf.float32
def _representative_dataset_gen():
return representative_dataset_gen(export_config)
converter.representative_dataset = _representative_dataset_gen
def build_experiment_model(experiment_type):
"""Builds model from experiment type configuration."""
params = exp_factory.get_exp_config(experiment_type)
params.validate()
params.lock()
task = task_factory.get_task(params.task)
return task.build_model()
| 9,508 | 38.786611 | 105 | py |
models | models-master/official/projects/edgetpu/vision/serving/tflite_imagenet_evaluator_run.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Evaluates image classification accuracy using tflite_imagenet_evaluator.
Usage:
tflite_imagenet_evaluator_run --tflite_model_path=/PATH/TO/MODEL.tflite
"""
from typing import Sequence
from absl import app
from absl import flags
import tensorflow as tf
from official.core import exp_factory
from official.projects.edgetpu.vision.serving import tflite_imagenet_evaluator
from official.projects.edgetpu.vision.tasks import image_classification
flags.DEFINE_string('tflite_model_path', None,
'Path to the tflite file to be evaluated.')
flags.DEFINE_integer('num_threads', 16, 'Number of local threads.')
flags.DEFINE_integer('batch_size', 256, 'Batch size per thread.')
flags.DEFINE_string(
'model_name', 'mobilenet_edgetpu_v2_xs',
'Model name to identify a registered data pipeline setup and use as the '
'validation dataset.')
FLAGS = flags.FLAGS
def main(argv: Sequence[str]):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
with tf.io.gfile.GFile(FLAGS.tflite_model_path, 'rb') as f:
model_content = f.read()
config = exp_factory.get_exp_config(FLAGS.model_name)
global_batch_size = FLAGS.num_threads * FLAGS.batch_size
config.task.validation_data.global_batch_size = global_batch_size
config.task.validation_data.dtype = 'float32'
task = image_classification.EdgeTPUTask(config.task)
dataset = task.build_inputs(config.task.validation_data)
evaluator = tflite_imagenet_evaluator.AccuracyEvaluator(
model_content=model_content,
dataset=dataset,
num_threads=FLAGS.num_threads)
evals, corrects = evaluator.evaluate_all()
accuracy = 100.0 * corrects / evals if evals > 0 else 0
print('Final accuracy: {}, Evaluated: {}, Correct: {} '.format(
accuracy, evals, corrects))
if __name__ == '__main__':
flags.mark_flag_as_required('tflite_model_path')
app.run(main)
| 2,504 | 33.791667 | 78 | py |
models | models-master/official/projects/edgetpu/vision/serving/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/edgetpu/vision/serving/export_tflite.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""Export model (float or quantized tflite, and saved model) from a trained checkpoint.
Example:
To export dummy quantized model:
export_tflite --model_name=mobilenet_edgetpu_v2_s --output_dir=/tmp --quantize
Using a training checkpoint:
export_tflite --model_name=mobilenet_edgetpu_v2_s \
--ckpt_path=/path/to/training/checkpoint \
--dataset_dir=/path/to/your/dataset --output_dir=/tmp --quantize
Exporting w/o final squeeze layer:
export_tflite --model_name=mobilenet_edgetpu_v2_xs \
--output_layer=probs \
--dataset_dir=/path/to/your/dataset --output_dir=/tmp --quantize
"""
# pylint: enable=line-too-long
import os
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from official.projects.edgetpu.vision.modeling import common_modules
from official.projects.edgetpu.vision.serving import export_util
flags.DEFINE_string('model_name', None,
'Used to build model using experiment config factory.')
flags.DEFINE_string(
'ckpt_path', None, 'Path to the checkpoint. '
'If not provided tflite with random parameters is exported.')
flags.DEFINE_enum(
'ckpt_format', 'tf_checkpoint',
['tf_checkpoint', 'keras_checkpoint'],
'tf_checkpoint is for ckpt files from tf.train.Checkpoint.save() method'
'keras_checkpoint is for ckpt files from keras.Model.save_weights() method')
flags.DEFINE_bool(
'export_keras_model', False,
'Export SavedModel format: if False, export TF SavedModel with'
'tf.saved_model API; if True, export Keras SavedModel with tf.keras.Model'
'API.')
flags.DEFINE_string('output_dir', None, 'Directory to output exported files.')
flags.DEFINE_integer(
'image_size', 224,
'Size of the input image. Ideally should be the same as the image_size used '
'in training config.')
flags.DEFINE_bool(
'fix_batch_size', True, 'Whether to export model with fixed batch size.')
flags.DEFINE_string(
'output_layer', None,
'Layer name to take the output from. Can be used to take the output from '
'an intermediate layer. None means use the original model output.')
flags.DEFINE_string(
'finalize_method', 'none',
'Additional layers to be added to customize serving output.\n'
'Supported are (none|(argmax|resize<?>)[,...]).\n'
'- none: do not add extra serving layers.\n'
'- argmax: adds argmax.\n'
'- squeeze: removes dimensions of size 1 from the shape of a tensor.\n'
'- resize<?> (for example resize512): adds resize bilinear|nn to <?> size.'
'For example: --finalize_method=resize128,argmax,resize512,squeeze\n'
'Will do resize bilinear to 128x128, then argmax then resize nn to 512x512')
# Quantization related parameters
flags.DEFINE_bool(
'quantize', False,
'Quantize model before exporting tflite. Note that only the exported '
'TFLite is quantized not the SavedModel.')
flags.DEFINE_bool('use_experimental_quantizer', True, 'Enables experimental '
'quantizer of TFLiteConverter 2.0.')
flags.DEFINE_bool(
'quantize_less_restrictive', False,
'Allows non int8 based intermediate types, automatic model output type.')
flags.DEFINE_integer(
'num_calibration_steps', 100,
'Number of post-training quantization calibration steps to run.')
flags.DEFINE_string('dataset_name', 'imagenet2012',
'Name of the dataset to use for quantization calibration.')
flags.DEFINE_string('dataset_dir', None, 'Dataset location.')
flags.DEFINE_string(
'dataset_split', 'train',
'The dataset split (train, validation etc.) to use for calibration.')
FLAGS = flags.FLAGS
def get_export_config_from_flags():
"""Creates ExportConfig from cmd line flags."""
quantization_config = export_util.QuantizationConfig(
quantize=FLAGS.quantize,
quantize_less_restrictive=FLAGS.quantize_less_restrictive,
use_experimental_quantizer=FLAGS.use_experimental_quantizer,
num_calibration_steps=FLAGS.num_calibration_steps,
dataset_name=FLAGS.dataset_name,
dataset_dir=FLAGS.dataset_dir,
dataset_split=FLAGS.dataset_split)
export_config = export_util.ExportConfig(
model_name=FLAGS.model_name,
output_layer=FLAGS.output_layer,
ckpt_path=FLAGS.ckpt_path,
ckpt_format=FLAGS.ckpt_format,
output_dir=FLAGS.output_dir,
image_size=FLAGS.image_size,
finalize_method=FLAGS.finalize_method.lower().split(','),
quantization_config=quantization_config)
return export_config
def run_export():
"""Exports TFLite with PTQ."""
export_config = get_export_config_from_flags()
model = export_util.build_experiment_model(
experiment_type=export_config.model_name)
if export_config.ckpt_path:
logging.info('Loading checkpoint from %s', FLAGS.ckpt_path)
common_modules.load_weights(
model,
export_config.ckpt_path,
checkpoint_format=export_config.ckpt_format)
else:
logging.info('No checkpoint provided. Using randomly initialized weights.')
if export_config.output_layer is not None:
all_layer_names = {l.name for l in model.layers}
if export_config.output_layer not in all_layer_names:
model.summary()
logging.info(
'Cannot find the layer %s in the model. See the above summary to '
'chose an output layer.', export_config.output_layer)
return
output_layer = model.get_layer(export_config.output_layer)
model = tf.keras.Model(model.input, output_layer.output)
batch_size = 1 if FLAGS.fix_batch_size else None
model_input = tf.keras.Input(
shape=(export_config.image_size, export_config.image_size, 3),
batch_size=batch_size)
model_output = export_util.finalize_serving(model(model_input), export_config)
model_for_inference = tf.keras.Model(model_input, model_output)
# Convert to tflite. Quantize if quantization parameters are specified.
converter = tf.lite.TFLiteConverter.from_keras_model(model_for_inference)
export_util.configure_tflite_converter(export_config, converter)
tflite_buffer = converter.convert()
# Make sure the base directory exists and write tflite.
tf.io.gfile.makedirs(os.path.dirname(export_config.output_dir))
tflite_path = os.path.join(export_config.output_dir,
f'{export_config.model_name}.tflite')
tf.io.gfile.GFile(tflite_path, 'wb').write(tflite_buffer)
print('TfLite model exported to {}'.format(tflite_path))
# Export saved model.
saved_model_path = os.path.join(export_config.output_dir,
export_config.model_name)
if FLAGS.export_keras_model:
model_for_inference.save(saved_model_path)
else:
tf.saved_model.save(model_for_inference, saved_model_path)
print('SavedModel exported to {}'.format(saved_model_path))
def main(_):
run_export()
if __name__ == '__main__':
flags.mark_flag_as_required('model_name')
app.run(main)
| 7,532 | 39.069149 | 88 | py |
models | models-master/official/projects/edgetpu/vision/configs/mobilenet_edgetpu_config.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
# type: ignore
"""Configuration definitions for MobilenetEdgeTPU losses, learning rates, optimizers, and training."""
import dataclasses
import os
from typing import Any, Mapping, Optional
# Import libraries
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.vision.configs import common
from official.vision.configs import image_classification as base_config
@dataclasses.dataclass
class MobilenetEdgeTPUModelConfig(base_config.ImageClassificationModel):
"""Configuration for the MobilenetEdgeTPU model.
Attributes:
name: The name of the model. Defaults to 'MobilenetEdgeTPU'.
model_params: A dictionary that represents the parameters of the
EfficientNet model. These will be passed in to the "from_name" function.
"""
model_params: Mapping[str, Any] = dataclasses.field(
default_factory=lambda: { # pylint: disable=g-long-lambda
'model_name': 'mobilenet_edgetpu_v2_xs',
'model_weights_path': '',
'checkpoint_format': 'tf_checkpoint',
'overrides': {
'batch_norm': 'tpu',
'num_classes': 1001,
'rescale_input': False,
'dtype': 'bfloat16'
}
})
@dataclasses.dataclass
class MobilenetEdgeTPUTaskConfig(base_config.ImageClassificationTask):
"""Task defination for MobileNetEdgeTPU.
Attributes:
model: A `ModelConfig` instance.
saved_model_path: Instead of initializing a model from the model config,
the model can be loaded from a file path.
"""
model: MobilenetEdgeTPUModelConfig = dataclasses.field(
default_factory=MobilenetEdgeTPUModelConfig
)
saved_model_path: Optional[str] = None
IMAGENET_TRAIN_EXAMPLES = 1281167
IMAGENET_VAL_EXAMPLES = 50000
IMAGENET_INPUT_PATH_BASE = 'imagenet-2012-tfrecord'
def mobilenet_edgetpu_base_experiment_config(
model_name: str) -> cfg.ExperimentConfig:
"""Image classification on imagenet with mobilenet_edgetpu.
Experiment config common across all mobilenet_edgetpu variants.
Args:
model_name: Name of the mobilenet_edgetpu model variant
Returns:
ExperimentConfig
"""
train_batch_size = 4096
eval_batch_size = 4096
steps_per_epoch = IMAGENET_TRAIN_EXAMPLES // train_batch_size
mobilenet_edgetpu_config = MobilenetEdgeTPUModelConfig(
num_classes=1001, input_size=[224, 224, 3])
mobilenet_edgetpu_config.model_params.model_name = model_name
config = cfg.ExperimentConfig(
task=MobilenetEdgeTPUTaskConfig(
model=mobilenet_edgetpu_config,
losses=base_config.Losses(label_smoothing=0.1),
train_data=base_config.DataConfig(
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
dtype='bfloat16',
aug_type=common.Augmentation(type='autoaug')),
validation_data=base_config.DataConfig(
input_path=os.path.join(IMAGENET_INPUT_PATH_BASE, 'valid*'),
is_training=False,
dtype='bfloat16',
drop_remainder=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch * 5,
max_to_keep=10,
train_steps=550 * steps_per_epoch,
validation_steps=IMAGENET_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'rmsprop',
'rmsprop': {
'rho': 0.9,
'momentum': 0.9,
'epsilon': 0.001,
}
},
'ema': {
'average_decay': 0.99,
'trainable_weights_only': False,
},
'learning_rate': {
'type': 'exponential',
'exponential': {
'initial_learning_rate':
0.008 * (train_batch_size // 128),
'decay_steps':
int(2.4 * steps_per_epoch),
'decay_rate':
0.97,
'staircase':
True
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
},
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
# Registration for MobileNet-EdgeTPU-Search models.
# When this config is used, users need to specify the saved model path via
# --params_override=task.saved_model_path='your/saved_model/path/'.
@exp_factory.register_config_factory('mobilenet_edgetpu_search')
def mobilenet_edgetpu_search() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_search')
# Registration for MobileNet-EdgeTPU-V2 models.
@exp_factory.register_config_factory('mobilenet_edgetpu_v2_tiny')
def mobilenet_edgetpu_v2_tiny() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_tiny')
# Registration for MobileNet-EdgeTPU-V2 models.
@exp_factory.register_config_factory('mobilenet_edgetpu_v2_xs')
def mobilenet_edgetpu_v2_xs() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_xs')
@exp_factory.register_config_factory('mobilenet_edgetpu_v2_s')
def mobilenet_edgetpu_v2_s() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_s')
@exp_factory.register_config_factory('mobilenet_edgetpu_v2_m')
def mobilenet_edgetpu_v2_m() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_m')
@exp_factory.register_config_factory('mobilenet_edgetpu_v2_l')
def mobilenet_edgetpu_v2_l() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_v2_l')
# Registration for MobileNet-EdgeTPU-V1 models.
@exp_factory.register_config_factory('mobilenet_edgetpu')
def mobilenet_edgetpu() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu')
# Registration for MobileNet-EdgeTPU-V1 models.
# We use 'depth_multiplier' to scale the models.
# E.g. dm0p75 implies depth multiplier of 0.75x
@exp_factory.register_config_factory('mobilenet_edgetpu_dm0p75')
def mobilenet_edgetpu_dm0p75() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_dm0p75')
@exp_factory.register_config_factory('mobilenet_edgetpu_dm1p25')
def mobilenet_edgetpu_dm1p25() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_dm1p25')
@exp_factory.register_config_factory('mobilenet_edgetpu_dm1p5')
def mobilenet_edgetpu_dm1p5() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_dm1p5')
@exp_factory.register_config_factory('mobilenet_edgetpu_dm1p75')
def mobilenet_edgetpu_dm1p75() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('mobilenet_edgetpu_dm1p75')
# Registration for AutoSeg-EdgeTPU backbones
@exp_factory.register_config_factory('autoseg_edgetpu_backbone_xs')
def autoseg_edgetpu_backbone_xs() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('autoseg_edgetpu_backbone_xs')
@exp_factory.register_config_factory('autoseg_edgetpu_backbone_s')
def autoseg_edgetpu_backbone_s() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('autoseg_edgetpu_backbone_s')
@exp_factory.register_config_factory('autoseg_edgetpu_backbone_m')
def autoseg_edgetpu_backbone_m() -> cfg.ExperimentConfig:
return mobilenet_edgetpu_base_experiment_config('autoseg_edgetpu_backbone_m')
| 8,875 | 37.094421 | 102 | py |
models | models-master/official/projects/edgetpu/vision/configs/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/edgetpu/vision/configs/semantic_segmentation_searched_config.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
# type: ignore
"""Semantic segmentation configuration definition for AutoML built models."""
import dataclasses
import os
from typing import Any, List, Mapping, Optional
# Import libraries
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.vision.configs import backbones
from official.vision.configs import semantic_segmentation as base_cfg
# ADE 20K Dataset
ADE20K_TRAIN_EXAMPLES = 20210
ADE20K_VAL_EXAMPLES = 2000
ADE20K_INPUT_PATH_BASE = 'gs://**/ADE20K'
PRETRAINED_CKPT_PATH_BASE = 'gs://**/placeholder_for_edgetpu_models/pretrained_checkpoints'
BACKBONE_PRETRAINED_CHECKPOINT = {
'autoseg_edgetpu_backbone_xs':
PRETRAINED_CKPT_PATH_BASE +
'/autoseg_edgetpu_backbone_xs/ckpt-171600',
'autoseg_edgetpu_backbone_s':
PRETRAINED_CKPT_PATH_BASE +
'/autoseg_edgetpu_backbone_s/ckpt-171600',
'autoseg_edgetpu_backbone_m':
PRETRAINED_CKPT_PATH_BASE +
'/autoseg_edgetpu_backbone_m/ckpt-171600',
}
@dataclasses.dataclass
class BiFPNHeadConfig(hyperparams.Config):
"""BiFPN-based segmentation head config."""
min_level: int = 3
max_level: int = 8
fpn_num_filters: int = 96
@dataclasses.dataclass
class Losses(base_cfg.Losses):
label_smoothing: float = 0.0
ignore_label: int = 255
class_weights: List[float] = dataclasses.field(default_factory=list)
l2_weight_decay: float = 0.0
use_groundtruth_dimension: bool = True
top_k_percent_pixels: float = 1.0
@dataclasses.dataclass
class AutosegEdgeTPUModelConfig(hyperparams.Config):
"""Autoseg-EdgeTPU segmentation model config."""
num_classes: int = 0
input_size: List[int] = dataclasses.field(default_factory=list)
backbone: backbones.Backbone = dataclasses.field(
default_factory=backbones.Backbone
)
head: BiFPNHeadConfig = dataclasses.field(default_factory=BiFPNHeadConfig)
model_params: Mapping[str, Any] = dataclasses.field(
default_factory=lambda: { # pylint: disable=g-long-lambda
'model_name': 'autoseg_edgetpu_backbone_s',
'checkpoint_format': 'tf_checkpoint',
'overrides': {
'batch_norm': 'tpu',
'rescale_input': False,
'backbone_only': True,
'resolution': 512
}
})
@dataclasses.dataclass
class AutosegEdgeTPUTaskConfig(base_cfg.SemanticSegmentationTask):
"""The task config inherited from the base segmentation task."""
model: AutosegEdgeTPUModelConfig = dataclasses.field(
default_factory=AutosegEdgeTPUModelConfig
)
train_data: base_cfg.DataConfig = dataclasses.field(
default_factory=lambda: base_cfg.DataConfig(is_training=True)
)
validation_data: base_cfg.DataConfig = dataclasses.field(
default_factory=lambda: base_cfg.DataConfig(is_training=False)
)
losses: Losses = dataclasses.field(default_factory=Losses)
init_checkpoint: Optional[str] = None
init_checkpoint_modules: str = 'backbone' # all or backbone
model_output_keys: Optional[List[int]] = dataclasses.field(
default_factory=list)
def autoseg_edgetpu_experiment_config(backbone_name: str,
init_backbone: bool = True
) -> cfg.ExperimentConfig:
"""Experiment using the semantic segmenatation searched model.
Args:
backbone_name: Name of the backbone used for this model
init_backbone: Whether to initialize backbone from a pretrained checkpoint
Returns:
ExperimentConfig
"""
epochs = 300
train_batch_size = 64
eval_batch_size = 32
image_size = 512
steps_per_epoch = ADE20K_TRAIN_EXAMPLES // train_batch_size
train_steps = epochs * steps_per_epoch
model_config = AutosegEdgeTPUModelConfig(
num_classes=32, input_size=[image_size, image_size, 3])
model_config.model_params.model_name = backbone_name
if init_backbone:
model_config.model_params.model_weights_path = (
BACKBONE_PRETRAINED_CHECKPOINT[backbone_name])
model_config.model_params.overrides.resolution = image_size
config = cfg.ExperimentConfig(
task=AutosegEdgeTPUTaskConfig(
model=model_config,
train_data=base_cfg.DataConfig(
input_path=os.path.join(ADE20K_INPUT_PATH_BASE, 'train-*'),
output_size=[image_size, image_size],
is_training=True,
global_batch_size=train_batch_size,
aug_scale_min=0.5,
aug_scale_max=2.0),
validation_data=base_cfg.DataConfig(
input_path=os.path.join(ADE20K_INPUT_PATH_BASE, 'val-*'),
output_size=[image_size, image_size],
is_training=False,
resize_eval_groundtruth=True,
drop_remainder=True,
global_batch_size=eval_batch_size),
evaluation=base_cfg.Evaluation(report_train_mean_iou=False)),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch * 5,
max_to_keep=10,
train_steps=train_steps,
validation_steps=ADE20K_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'nesterov': True,
'momentum': 0.9,
}
},
'ema': {
'average_decay': 0.9998,
'trainable_weights_only': False,
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate': 0.12,
'decay_steps': train_steps
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
},
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
# Registration for searched segmentation model.
@exp_factory.register_config_factory('autoseg_edgetpu_xs')
def autoseg_edgetpu_xs() -> cfg.ExperimentConfig:
return autoseg_edgetpu_experiment_config('autoseg_edgetpu_backbone_xs')
# Registration for searched segmentation model.
@exp_factory.register_config_factory('autoseg_edgetpu_s')
def autoseg_edgetpu_s() -> cfg.ExperimentConfig:
return autoseg_edgetpu_experiment_config('autoseg_edgetpu_backbone_s')
# Registration for searched segmentation model.
@exp_factory.register_config_factory('autoseg_edgetpu_m')
def autoseg_edgetpu_m() -> cfg.ExperimentConfig:
return autoseg_edgetpu_experiment_config('autoseg_edgetpu_backbone_m')
| 7,670 | 35.183962 | 91 | py |
models | models-master/official/projects/edgetpu/vision/configs/semantic_segmentation_config.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Semantic segmentation configuration definition.
The segmentation model is built using the mobilenet edgetpu v2 backbone and
deeplab v3 segmentation head.
"""
import dataclasses
import os
from typing import Optional
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.vision.configs import backbones
from official.vision.configs import common
from official.vision.configs import decoders
from official.vision.configs import semantic_segmentation as base_cfg
@dataclasses.dataclass
class MobileNetEdgeTPU(hyperparams.Config):
"""MobileNetEdgeTPU config."""
model_id: str = 'mobilenet_edgetpu_v2_s'
freeze_large_filters: Optional[int] = None
pretrained_checkpoint_path: Optional[str] = None
@dataclasses.dataclass
class Backbone(backbones.Backbone):
"""Configuration for backbones.
Attributes:
type: 'str', type of backbone be used, on the of fields below.
spinenet_seg: spinenet-seg backbone config.
"""
type: Optional[str] = None
mobilenet_edgetpu: MobileNetEdgeTPU = dataclasses.field(
default_factory=MobileNetEdgeTPU
)
@dataclasses.dataclass
class CustomSemanticSegmentationTaskConfig(base_cfg.SemanticSegmentationTask):
"""Same config for custom taks."""
model: Optional[base_cfg.SemanticSegmentationModel] = None
train_data: base_cfg.DataConfig = dataclasses.field(
default_factory=lambda: base_cfg.DataConfig(is_training=True)
)
validation_data: base_cfg.DataConfig = dataclasses.field(
default_factory=lambda: base_cfg.DataConfig(is_training=False)
)
evaluation: base_cfg.Evaluation = dataclasses.field(
default_factory=base_cfg.Evaluation
)
# ADE 20K Dataset
ADE20K_TRAIN_EXAMPLES = 20210
ADE20K_VAL_EXAMPLES = 2000
ADE20K_INPUT_PATH_BASE = 'gs://**/ADE20K'
PRETRAINED_CKPT_PATH_BASE = 'gs://**/placeholder_for_edgetpu_models'
BACKBONE_PRETRAINED_CHECKPOINT = {
'mobilenet_edgetpu_v2_l':
PRETRAINED_CKPT_PATH_BASE +
'/pretrained_checkpoints/mobilenet_edgetpu_v2_l/ckpt-171600',
'mobilenet_edgetpu_v2_m':
PRETRAINED_CKPT_PATH_BASE +
'/pretrained_checkpoints/mobilenet_edgetpu_v2_m/ckpt-171600',
'mobilenet_edgetpu_v2_s':
PRETRAINED_CKPT_PATH_BASE +
'/pretrained_checkpoints/mobilenet_edgetpu_v2_s/ckpt-171600',
'mobilenet_edgetpu_v2_xs':
PRETRAINED_CKPT_PATH_BASE +
'/pretrained_checkpoints/mobilenet_edgetpu_v2_xs/ckpt-171600',
}
BACKBONE_HEADPOINT = {
'mobilenet_edgetpu_v2_l': 4,
'mobilenet_edgetpu_v2_m': 4,
'mobilenet_edgetpu_v2_s': 4,
'mobilenet_edgetpu_v2_xs': 4,
}
BACKBONE_LOWER_FEATURES = {
'mobilenet_edgetpu_v2_l': 3,
'mobilenet_edgetpu_v2_m': 3,
'mobilenet_edgetpu_v2_s': 3,
'mobilenet_edgetpu_v2_xs': 3,
}
def seg_deeplabv3plus_ade20k_32(backbone: str,
init_backbone: bool = True
) -> cfg.ExperimentConfig:
"""Semantic segmentation on ADE20K dataset with deeplabv3+."""
epochs = 200
train_batch_size = 128
eval_batch_size = 32
image_size = 512
steps_per_epoch = ADE20K_TRAIN_EXAMPLES // train_batch_size
aspp_dilation_rates = [5, 10, 15]
pretrained_checkpoint_path = BACKBONE_PRETRAINED_CHECKPOINT[
backbone] if init_backbone else None
config = cfg.ExperimentConfig(
task=CustomSemanticSegmentationTaskConfig(
model=base_cfg.SemanticSegmentationModel(
# ADE20K uses only 32 semantic classes for train/evaluation.
# The void (background) class is ignored in train and evaluation.
num_classes=32,
input_size=[None, None, 3],
backbone=Backbone(
type='mobilenet_edgetpu',
mobilenet_edgetpu=MobileNetEdgeTPU(
model_id=backbone,
pretrained_checkpoint_path=pretrained_checkpoint_path,
freeze_large_filters=500,
)),
decoder=decoders.Decoder(
type='aspp',
aspp=decoders.ASPP(
level=BACKBONE_HEADPOINT[backbone],
use_depthwise_convolution=True,
dilation_rates=aspp_dilation_rates,
pool_kernel_size=[256, 256],
num_filters=128,
dropout_rate=0.3,
)),
head=base_cfg.SegmentationHead(
level=BACKBONE_HEADPOINT[backbone],
num_convs=2,
num_filters=256,
use_depthwise_convolution=True,
feature_fusion='deeplabv3plus',
low_level=BACKBONE_LOWER_FEATURES[backbone],
low_level_num_filters=48),
norm_activation=common.NormActivation(
activation='relu',
norm_momentum=0.99,
norm_epsilon=2e-3,
use_sync_bn=False)),
train_data=base_cfg.DataConfig(
input_path=os.path.join(ADE20K_INPUT_PATH_BASE, 'train-*'),
output_size=[image_size, image_size],
is_training=True,
global_batch_size=train_batch_size),
validation_data=base_cfg.DataConfig(
input_path=os.path.join(ADE20K_INPUT_PATH_BASE, 'val-*'),
output_size=[image_size, image_size],
is_training=False,
global_batch_size=eval_batch_size,
resize_eval_groundtruth=True,
drop_remainder=False),
evaluation=base_cfg.Evaluation(report_train_mean_iou=False),
),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=epochs * steps_per_epoch,
validation_steps=ADE20K_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adam',
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 0.0001,
'decay_steps': epochs * steps_per_epoch,
'end_learning_rate': 0.0,
'power': 0.9
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 4 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
def seg_deeplabv3plus_ade20k(backbone: str):
config = seg_deeplabv3plus_ade20k_32(backbone)
config.task.model.num_classes = 151
config.trainer.optimizer_config.learning_rate.polynomial.power = 1.1
config.task.model.decoder.aspp.num_filters = 160
config.task.model.head.low_level_num_filters = 64
return config
# Experiment configs for 32 output classes
@exp_factory.register_config_factory(
'deeplabv3plus_mobilenet_edgetpuv2_m_ade20k_32')
def deeplabv3plus_mobilenet_edgetpuv2_m_ade20k_32() -> cfg.ExperimentConfig:
return seg_deeplabv3plus_ade20k_32('mobilenet_edgetpu_v2_m')
@exp_factory.register_config_factory(
'deeplabv3plus_mobilenet_edgetpuv2_s_ade20k_32')
def deeplabv3plus_mobilenet_edgetpuv2_s_ade20k_32() -> cfg.ExperimentConfig:
return seg_deeplabv3plus_ade20k_32('mobilenet_edgetpu_v2_s')
@exp_factory.register_config_factory(
'deeplabv3plus_mobilenet_edgetpuv2_xs_ade20k_32')
def deeplabv3plus_mobilenet_edgetpuv2_xs_ade20k_32() -> cfg.ExperimentConfig:
return seg_deeplabv3plus_ade20k_32('mobilenet_edgetpu_v2_xs')
# Experiment configs for 151 output classes
@exp_factory.register_config_factory(
'deeplabv3plus_mobilenet_edgetpuv2_m_ade20k')
def deeplabv3plus_mobilenet_edgetpuv2_m_ade20k() -> cfg.ExperimentConfig:
config = seg_deeplabv3plus_ade20k('mobilenet_edgetpu_v2_m')
return config
@exp_factory.register_config_factory(
'deeplabv3plus_mobilenet_edgetpuv2_s_ade20k')
def deeplabv3plus_mobilenet_edgetpuv2_s_ade20k() -> cfg.ExperimentConfig:
config = seg_deeplabv3plus_ade20k('mobilenet_edgetpu_v2_s')
return config
@exp_factory.register_config_factory(
'deeplabv3plus_mobilenet_edgetpuv2_xs_ade20k')
def deeplabv3plus_mobilenet_edgetpuv2_xs_ade20k() -> cfg.ExperimentConfig:
config = seg_deeplabv3plus_ade20k('mobilenet_edgetpu_v2_xs')
return config
| 9,396 | 35.85098 | 79 | py |
models | models-master/official/projects/edgetpu/vision/dataloaders/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/edgetpu/vision/dataloaders/classification_input_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests classification_input.py."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.edgetpu.vision.dataloaders import classification_input
from official.vision.configs import common
from official.vision.dataloaders import tfexample_utils
IMAGE_FIELD_KEY = 'image/encoded'
LABEL_FIELD_KEY = 'image/class/label'
class DecoderTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
(100, 100, 0),
(100, 100, 1),
(100, 100, 2),
)
def test_decoder(self, image_height, image_width, num_instances):
decoder = classification_input.Decoder(
image_field_key=IMAGE_FIELD_KEY, label_field_key=LABEL_FIELD_KEY)
serialized_example = tfexample_utils.create_classification_example(
image_height, image_width)
decoded_tensors = decoder.decode(tf.convert_to_tensor(serialized_example))
results = tf.nest.map_structure(lambda x: x.numpy(), decoded_tensors)
self.assertCountEqual([IMAGE_FIELD_KEY, LABEL_FIELD_KEY], results.keys())
self.assertEqual(0, results[LABEL_FIELD_KEY])
class ParserTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
([224, 224, 3], 'float32', True, 'autoaug', False, True, 'JPEG'),
([224, 224, 3], 'float16', True, 'randaug', False, False, 'PNG'),
([224, 224, 3], 'float32', False, None, False, True, 'JPEG'),
([224, 224, 3], 'float16', False, None, False, False, 'PNG'),
([512, 640, 3], 'float32', True, 'randaug', False, False, 'JPEG'),
([512, 640, 3], 'float16', True, 'autoaug', False, False, 'PNG'),
([512, 640, 3], 'float32', False, None, False, True, 'JPEG'),
([512, 640, 3], 'float16', False, None, False, False, 'PNG'),
([640, 640, 3], 'float32', True, None, False, False, 'JPEG'),
([640, 640, 3], 'bfloat16', True, None, False, False, 'PNG'),
([640, 640, 3], 'float32', False, None, False, False, 'JPEG'),
([640, 640, 3], 'bfloat16', False, None, False, False, 'PNG'),
([224, 224, 3], 'float32', True, 'autoaug', True, True, 'JPEG'),
([224, 224, 3], 'float16', True, 'randaug', True, False, 'PNG'),
)
def test_parser(self, output_size, dtype, is_training, aug_name,
is_multilabel, decode_jpeg_only, image_format):
serialized_example = tfexample_utils.create_classification_example(
output_size[0], output_size[1], image_format, is_multilabel)
if aug_name == 'randaug':
aug_type = common.Augmentation(
type=aug_name, randaug=common.RandAugment(magnitude=10))
elif aug_name == 'autoaug':
aug_type = common.Augmentation(
type=aug_name, autoaug=common.AutoAugment(augmentation_name='test'))
else:
aug_type = None
decoder = classification_input.Decoder(
image_field_key=IMAGE_FIELD_KEY, label_field_key=LABEL_FIELD_KEY,
is_multilabel=is_multilabel)
parser = classification_input.Parser(
output_size=output_size[:2],
num_classes=10,
image_field_key=IMAGE_FIELD_KEY,
label_field_key=LABEL_FIELD_KEY,
is_multilabel=is_multilabel,
decode_jpeg_only=decode_jpeg_only,
aug_rand_hflip=False,
aug_type=aug_type,
dtype=dtype)
decoded_tensors = decoder.decode(serialized_example)
image, label = parser.parse_fn(is_training)(decoded_tensors)
self.assertAllEqual(image.numpy().shape, output_size)
if not is_multilabel:
self.assertAllEqual(label, 0)
else:
self.assertAllEqual(label.numpy().shape, [10])
if dtype == 'float32':
self.assertAllEqual(image.dtype, tf.float32)
elif dtype == 'float16':
self.assertAllEqual(image.dtype, tf.float16)
elif dtype == 'bfloat16':
self.assertAllEqual(image.dtype, tf.bfloat16)
if __name__ == '__main__':
tf.test.main()
| 4,455 | 38.087719 | 78 | py |
models | models-master/official/projects/edgetpu/vision/dataloaders/classification_input.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classification decoder and parser."""
# Import libraries
import tensorflow as tf
from official.vision.dataloaders import classification_input
from official.vision.ops import preprocess_ops
MEAN_RGB = (0.5 * 255, 0.5 * 255, 0.5 * 255)
STDDEV_RGB = (0.5 * 255, 0.5 * 255, 0.5 * 255)
def random_crop_image(image,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100):
"""Randomly crop an arbitrary shaped slice from the input image.
Args:
image: a Tensor of shape [height, width, 3] representing the input image.
aspect_ratio_range: a list of floats. The cropped area of the image must
have an aspect ratio = width / height within this range.
area_range: a list of floats. The cropped reas of the image must contain
a fraction of the input image within this range.
max_attempts: the number of attempts at generating a cropped region of the
image of the specified constraints. After max_attempts failures, return
the entire image.
Returns:
cropped_image: a Tensor representing the random cropped image. Can be the
original image if max_attempts is exhausted.
"""
with tf.name_scope('random_crop_image'):
crop_offset, crop_size, _ = tf.image.sample_distorted_bounding_box(
tf.shape(image),
tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]),
min_object_covered=0.1,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
cropped_image = tf.slice(image, crop_offset, crop_size)
return cropped_image
def random_crop_image_v2(image_bytes,
image_shape,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100):
"""Randomly crop an arbitrary shaped slice from the input image.
This is a faster version of `random_crop_image` which takes the original
image bytes and image size as the inputs, and partially decode the JPEG
bytes according to the generated crop.
Args:
image_bytes: a Tensor of type string representing the raw image bytes.
image_shape: a Tensor specifying the shape of the raw image.
aspect_ratio_range: a list of floats. The cropped area of the image must
have an aspect ratio = width / height within this range.
area_range: a list of floats. The cropped reas of the image must contain
a fraction of the input image within this range.
max_attempts: the number of attempts at generating a cropped region of the
image of the specified constraints. After max_attempts failures, return
the entire image.
Returns:
cropped_image: a Tensor representing the random cropped image. Can be the
original image if max_attempts is exhausted.
"""
with tf.name_scope('random_crop_image_v2'):
crop_offset, crop_size, _ = tf.image.sample_distorted_bounding_box(
image_shape,
tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]),
min_object_covered=0.1,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
offset_y, offset_x, _ = tf.unstack(crop_offset)
crop_height, crop_width, _ = tf.unstack(crop_size)
crop_window = tf.stack([offset_y, offset_x, crop_height, crop_width])
cropped_image = tf.image.decode_and_crop_jpeg(
image_bytes, crop_window, channels=3)
return cropped_image
class Decoder(classification_input.Decoder):
"""A tf.Example decoder for classification task."""
pass
class Parser(classification_input.Parser):
"""Parser to parse an image and its annotations into a dictionary of tensors."""
def _parse_train_image(self, decoded_tensors):
"""Parses image data for training."""
image_bytes = decoded_tensors[self._image_field_key]
if self._decode_jpeg_only:
image_shape = tf.image.extract_jpeg_shape(image_bytes)
# Crops image.
cropped_image = random_crop_image_v2(
image_bytes, image_shape)
image = tf.cond(
tf.reduce_all(tf.equal(tf.shape(cropped_image), image_shape)),
lambda: preprocess_ops.center_crop_image_v2(image_bytes, image_shape),
lambda: cropped_image)
else:
# Decodes image.
image = tf.io.decode_image(image_bytes, channels=3)
image.set_shape([None, None, 3])
# Crops image.
cropped_image = random_crop_image(image)
image = tf.cond(
tf.reduce_all(tf.equal(tf.shape(cropped_image), tf.shape(image))),
lambda: preprocess_ops.center_crop_image(image),
lambda: cropped_image)
if self._aug_rand_hflip:
image = tf.image.random_flip_left_right(image)
# Resizes image.
image = tf.image.resize(
image, self._output_size, method=tf.image.ResizeMethod.BILINEAR)
# Apply autoaug or randaug.
if self._augmenter is not None:
image = self._augmenter.distort(image)
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(image,
offset=MEAN_RGB,
scale=STDDEV_RGB)
# Convert image to self._dtype.
image = tf.image.convert_image_dtype(image, self._dtype)
return image
def _parse_eval_image(self, decoded_tensors):
"""Parses image data for evaluation."""
image_bytes = decoded_tensors[self._image_field_key]
if self._decode_jpeg_only:
image_shape = tf.image.extract_jpeg_shape(image_bytes)
# Center crops.
image = preprocess_ops.center_crop_image_v2(image_bytes, image_shape)
else:
# Decodes image.
image = tf.io.decode_image(image_bytes, channels=3)
image.set_shape([None, None, 3])
# Center crops.
image = preprocess_ops.center_crop_image(image)
image = tf.image.resize(
image, self._output_size, method=tf.image.ResizeMethod.BILINEAR)
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(image,
offset=MEAN_RGB,
scale=STDDEV_RGB)
# Convert image to self._dtype.
image = tf.image.convert_image_dtype(image, self._dtype)
return image
| 7,102 | 36.983957 | 82 | py |
models | models-master/official/projects/edgetpu/vision/modeling/custom_layers_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for custom_layers."""
import itertools
from absl.testing import parameterized
import tensorflow as tf
from official.projects.edgetpu.vision.modeling import custom_layers
GROUPS = [2, 4]
INPUT_CHANNEL = [8, 16]
OUTPUT_CHANNEL = [8, 16]
USE_BATCH_NORM = [True, False]
ACTIVATION = ['relu', 'linear']
BATCH_NORM_LAYER = tf.keras.layers.BatchNormalization
# 2 functionally identical group conv implementations.
GROUP_CONV_IMPL = {
'layer': custom_layers.GroupConv2D,
'model': custom_layers.GroupConv2DKerasModel
}
def _get_random_inputs(input_shape):
return tf.random.uniform(shape=input_shape)
class GroupConv2DTest(tf.test.TestCase, parameterized.TestCase):
# Test for combinations of groups, input_channel, output_channel, and
# whether to use batch_norm
@parameterized.parameters(
itertools.product(GROUPS, INPUT_CHANNEL, OUTPUT_CHANNEL, USE_BATCH_NORM))
def test_construction(self, groups, input_channel, output_channel,
use_batch_norm):
batch_norm_layer = BATCH_NORM_LAYER if use_batch_norm else None
l = custom_layers.GroupConv2D(
output_channel,
3,
groups=groups,
use_bias=True,
batch_norm_layer=batch_norm_layer)
inputs = _get_random_inputs(input_shape=(1, 4, 4, output_channel))
_ = l(inputs)
# kernel and bias for each group. When using batch norm, 2 additional
# trainable weights per group for batchnorm layers: gamma and beta.
expected_num_trainable_weights = groups * (2 + 2 * use_batch_norm)
self.assertLen(l.trainable_weights, expected_num_trainable_weights)
@parameterized.parameters(
itertools.product(GROUPS, INPUT_CHANNEL, OUTPUT_CHANNEL))
def test_kernel_shapes(self, groups, input_channel, output_channel):
l = custom_layers.GroupConv2D(
output_channel, 3, groups=groups, use_bias=False)
_ = l(_get_random_inputs(input_shape=(1, 32, 32, input_channel)))
expected_kernel_shapes = [(3, 3, int(input_channel / groups),
int(output_channel / groups))
for _ in range(groups)]
kernel_shapes = [
l.trainable_weights[i].get_shape()
for i in range(len(l.trainable_weights))
]
self.assertListEqual(kernel_shapes, expected_kernel_shapes)
@parameterized.parameters(
itertools.product(GROUPS, INPUT_CHANNEL, OUTPUT_CHANNEL))
def test_output_shapes(self, groups, input_channel, output_channel):
l = custom_layers.GroupConv2D(
output_channel, 3, groups=groups, use_bias=False, padding='same')
outputs = l(_get_random_inputs(input_shape=[2, 32, 32, input_channel]))
self.assertListEqual(outputs.get_shape().as_list(),
[2, 32, 32, output_channel])
@parameterized.parameters(
itertools.product(GROUPS, USE_BATCH_NORM, ACTIVATION))
def test_serialization_deserialization(self, groups, use_batch_norm,
activation):
batch_norm_layer = BATCH_NORM_LAYER if use_batch_norm else None
l = custom_layers.GroupConv2D(
filters=8,
kernel_size=1,
groups=groups,
use_bias=False,
padding='same',
batch_norm_layer=batch_norm_layer,
activation=activation)
config = l.get_config()
# New layer from config
new_l = custom_layers.GroupConv2D.from_config(config)
# Copy the weights too.
l.build(input_shape=(1, 1, 4))
new_l.build(input_shape=(1, 1, 4))
new_l.set_weights(l.get_weights())
inputs = _get_random_inputs((1, 1, 1, 4))
self.assertNotEqual(l, new_l)
self.assertAllEqual(l(inputs), new_l(inputs))
@parameterized.parameters(
itertools.product(GROUPS, INPUT_CHANNEL, OUTPUT_CHANNEL, USE_BATCH_NORM,
ACTIVATION))
def test_equivalence(self, groups, input_channel, output_channel,
use_batch_norm, activation):
batch_norm_layer = BATCH_NORM_LAYER if use_batch_norm else None
kwargs = dict(
filters=output_channel,
groups=groups,
kernel_size=1,
use_bias=False,
batch_norm_layer=batch_norm_layer,
activation=activation)
gc_layer = tf.keras.Sequential([custom_layers.GroupConv2D(**kwargs)])
gc_model = custom_layers.GroupConv2DKerasModel(**kwargs)
gc_layer.build(input_shape=(None, 3, 3, input_channel))
gc_model.build(input_shape=(None, 3, 3, input_channel))
inputs = _get_random_inputs((2, 3, 3, input_channel))
gc_layer.set_weights(gc_model.get_weights())
self.assertAllEqual(gc_layer(inputs), gc_model(inputs))
@parameterized.parameters(('layer', 1, 4), ('layer', 4, 4), ('model', 1, 4),
('model', 4, 4))
def test_invalid_groups_raises_value_error(self, gc_type, groups,
output_channel):
with self.assertRaisesRegex(ValueError, r'^(Number of groups)'):
_ = GROUP_CONV_IMPL[gc_type](
filters=output_channel, groups=groups, kernel_size=3)
@parameterized.parameters(('layer', 3, 4), ('layer', 4, 6), ('model', 3, 4),
('model', 4, 6))
def test_non_group_divisible_raises_value_error(self, gc_type, groups,
input_channel):
with self.assertRaisesRegex(ValueError, r'^(Number of input channels)'):
l = GROUP_CONV_IMPL[gc_type](
filters=groups * 4, groups=groups, kernel_size=3)
l.build(input_shape=(4, 4, input_channel))
@parameterized.parameters(('layer'), ('model'))
def test_non_supported_data_format_raises_value_error(self, gc_type):
with self.assertRaisesRegex(ValueError, r'^(.*(channels_last).*)'):
_ = GROUP_CONV_IMPL[gc_type](
filters=4, groups=2, kernel_size=1, data_format='channels_first')
@parameterized.parameters(('layer'), ('model'))
def test_invalid_batch_norm_raises_value_error(self, gc_type):
def my_batch_norm(x):
return x**2
with self.assertRaisesRegex(ValueError, r'^(.*(not a class).*)'):
_ = GROUP_CONV_IMPL[gc_type](
filters=4, groups=2, kernel_size=1, batch_norm_layer=my_batch_norm)
@parameterized.parameters(('layer'), ('model'))
def test_invalid_padding_raises_value_error(self, gc_type):
with self.assertRaisesRegex(ValueError, r'^(.*(same, or valid).*)'):
_ = GROUP_CONV_IMPL[gc_type](
filters=4, groups=2, kernel_size=1, padding='causal')
class ArgmaxTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(([16, 32, 64], tf.dtypes.float32, tf.dtypes.int32),
([255, 19], tf.dtypes.int32, tf.dtypes.int64))
def test_reference_match(self, shape, input_type, output_type):
random_inputs = tf.random.uniform(shape=shape, maxval=10, dtype=input_type)
for axis in range(-len(shape) + 1, len(shape)):
control_output = tf.math.argmax(
random_inputs, axis=axis, output_type=output_type)
test_output = custom_layers.argmax(
random_inputs, axis=axis, output_type=output_type)
self.assertAllEqual(control_output, test_output)
| 7,740 | 40.395722 | 79 | py |
models | models-master/official/projects/edgetpu/vision/modeling/mobilenet_edgetpu_v2_model_blocks.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions for MobilenetEdgeTPUV2 model's building blocks."""
import dataclasses
import math
from typing import Any, Dict, List, Optional, Tuple, Union
# Import libraries
from absl import logging
import tensorflow as tf
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.modeling.hyperparams import oneof
from official.projects.edgetpu.vision.modeling import common_modules
from official.projects.edgetpu.vision.modeling import custom_layers
InitializerType = Optional[Union[str, tf.keras.initializers.Initializer]]
@dataclasses.dataclass
class BlockType(oneof.OneOfConfig):
"""Block OP types representing IBN version."""
type: str = 'ibn_dw'
skip: str = 'skip'
ibn_dw: str = 'ibn_dw'
ibn_fused: str = 'ibn_fused'
ibn_grouped: str = 'ibn_grouped'
ibn_fused_grouped: str = 'ibn_fused_grouped'
@dataclasses.dataclass
class BlockSearchConfig(base_config.Config):
"""Config for searchable BlockConfig parameters."""
op_type: BlockType = dataclasses.field(default_factory=BlockType)
kernel_size: Optional[int] = None
expand_ratio: Optional[int] = None
stride: Optional[int] = None
group_size: Optional[int] = None
@dataclasses.dataclass
class BlockConfig(base_config.Config):
"""Full config for a single MB Conv Block."""
input_filters: int = 0
output_filters: int = 0
kernel_size: int = 3
num_repeat: int = 1
expand_ratio: int = 1
strides: Tuple[int, int] = (1, 1)
se_ratio: Optional[float] = None
id_skip: bool = True
fused_expand: bool = False
fused_project: bool = False
conv_type: str = 'depthwise'
group_size: Optional[int] = None
@classmethod
def from_search_config(cls,
input_filters: int,
output_filters: int,
block_search_config: BlockSearchConfig,
num_repeat: int = 1,
se_ratio: Optional[float] = None,
id_skip: bool = True) -> 'BlockConfig':
"""Creates BlockConfig from the given parameters."""
block_op_type = block_search_config.op_type
if block_op_type.type == BlockType.skip:
raise ValueError('Received skip type within block creation.')
elif block_op_type.type == BlockType.ibn_dw:
fused_expand = False
fused_project = False
conv_type = 'depthwise'
elif block_op_type.type == BlockType.ibn_fused:
fused_expand = True
fused_project = False
conv_type = 'full'
elif block_op_type.type == BlockType.ibn_fused_grouped:
fused_expand = True
fused_project = False
conv_type = 'group'
elif block_op_type.type == BlockType.ibn_grouped:
fused_expand = False
fused_project = False
conv_type = 'group'
else:
raise NotImplementedError(f'Unsupported IBN type {block_op_type.type}.')
return cls.from_args(
input_filters=input_filters,
output_filters=output_filters,
kernel_size=block_search_config.kernel_size,
num_repeat=num_repeat,
expand_ratio=block_search_config.expand_ratio,
strides=(block_search_config.stride, block_search_config.stride),
se_ratio=se_ratio,
id_skip=id_skip,
fused_expand=fused_expand,
fused_project=fused_project,
conv_type=conv_type,
group_size=block_search_config.group_size)
@dataclasses.dataclass
class BlockGroupConfig(base_config.Config):
"""Config for group of blocks that share the same filter size."""
blocks: List[BlockSearchConfig] = dataclasses.field(default_factory=list)
filters: int = 64
def _default_mobilenet_edgetpu_v2_topology():
return [
# Block Group 0
BlockGroupConfig(
blocks=[
# BlockSearchConfig: op_type, kernel_size, expand_ratio, stride
BlockSearchConfig.from_args(
BlockType.from_args('ibn_fused'), 3, 1, 1),
],
filters=24),
# Block Group 1
BlockGroupConfig(
blocks=[
BlockSearchConfig.from_args(
BlockType.from_args('ibn_fused'), 3, 8, 2),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_fused_grouped'), 3, 4, 1),
],
filters=48),
# Block Group 2
BlockGroupConfig(
blocks=[
BlockSearchConfig.from_args(
BlockType.from_args('ibn_fused'), 3, 8, 2),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_fused_grouped'), 3, 4, 1),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_fused'), 3, 4, 1),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_fused_grouped'), 3, 4, 1),
],
filters=64),
# Block Group 3
BlockGroupConfig(
blocks=[
BlockSearchConfig.from_args(
BlockType.from_args('ibn_fused'), 3, 8, 2),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 4, 1),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 4, 1),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 4, 1),
],
filters=128),
# Block Group 4
BlockGroupConfig(
blocks=[
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 8, 1),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 4, 1),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 4, 1),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 4, 1),
],
filters=160),
# Block Group 5
BlockGroupConfig(
blocks=[
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 8, 2),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 4, 1),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 4, 1),
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 4, 1),
],
filters=192),
# Block Group 6
BlockGroupConfig(
blocks=[
BlockSearchConfig.from_args(
BlockType.from_args('ibn_dw'), 3, 8, 1),
],
filters=256),
]
@dataclasses.dataclass
class TopologyConfig(base_config.Config):
"""Config for model topology as a collection of BlockGroupConfigs."""
block_groups: List[BlockGroupConfig] = dataclasses.field(
default_factory=_default_mobilenet_edgetpu_v2_topology)
@dataclasses.dataclass
class ModelConfig(base_config.Config):
"""Default Config for MobilenetEdgeTPUV2."""
width_coefficient: float = 1.0
depth_coefficient: float = 1.0
resolution: Union[int, Tuple[int, int]] = 224
dropout_rate: float = 0.1
stem_base_filters: int = 64
stem_kernel_size: int = 5
top_base_filters: int = 1280
conv_kernel_initializer: InitializerType = None
dense_kernel_initializer: InitializerType = None
blocks: Tuple[BlockConfig, ...] = (
# (input_filters, output_filters, kernel_size, num_repeat,
# expand_ratio, strides, se_ratio, id_skip, fused_conv, conv_type)
# pylint: disable=bad-whitespace
BlockConfig.from_args(
stem_base_filters, 24, 3, 1, 1, (1, 1), conv_type='full'),
BlockConfig.from_args(
24, 48, 3, 1, 8, (2, 2), fused_expand=True, conv_type='full'),
BlockConfig.from_args(
48, 48, 3, 1, 4, (1, 1), fused_expand=True, conv_type='group'),
BlockConfig.from_args(
48, 64, 3, 1, 8, (2, 2), fused_expand=True, conv_type='full'),
BlockConfig.from_args(
64, 64, 3, 1, 4, (1, 1), fused_expand=True, conv_type='group'),
BlockConfig.from_args(
64, 64, 3, 1, 4, (1, 1), fused_expand=True, conv_type='full'),
BlockConfig.from_args(
64, 64, 3, 1, 4, (1, 1), fused_expand=True, conv_type='group'),
BlockConfig.from_args(
64, 128, 3, 1, 8, (2, 2), fused_expand=True, conv_type='full'),
BlockConfig.from_args(128, 128, 3, 3, 4, (1, 1)),
BlockConfig.from_args(128, 160, 3, 1, 8, (1, 1)),
BlockConfig.from_args(160, 160, 3, 3, 4, (1, 1)),
BlockConfig.from_args(160, 192, 5, 1, 8, (2, 2)),
BlockConfig.from_args(192, 192, 5, 3, 4, (1, 1)),
BlockConfig.from_args(192, 256, 5, 1, 8, (1, 1)),
# pylint: enable=bad-whitespace
)
activation: str = 'relu'
batch_norm: str = 'default'
bn_momentum: float = 0.99
bn_epsilon: float = 1e-3
# While the original implementation used a weight decay of 1e-5,
# tf.nn.l2_loss divides it by 2, so we halve this to compensate in Keras
weight_decay: float = 5e-6
drop_connect_rate: float = 0.1
depth_divisor: int = 8
min_depth: Optional[int] = None
# No Squeeze/Excite for MobilenetEdgeTPUV2
use_se: bool = False
input_channels: int = 3
num_classes: int = 1001
model_name: str = 'mobilenet_edgetpu_v2'
rescale_input: bool = False
data_format: str = 'channels_last'
dtype: str = 'float32'
# The number of filters in each group. HW arch dependent.
group_base_size: int = 64
backbone_only: bool = False
features_as_dict: bool = False
def mobilenet_edgetpu_v2_base(
width_coefficient: float = 1.0,
depth_coefficient: float = 1.0,
stem_base_filters: int = 64,
stem_kernel_size: int = 5,
top_base_filters: int = 1280,
group_base_size: int = 64,
dropout_rate: float = 0.2,
drop_connect_rate: float = 0.1,
filter_size_overrides: Optional[Dict[int, int]] = None,
block_op_overrides: Optional[Dict[int, Dict[int, Dict[str, Any]]]] = None,
block_group_overrides: Optional[Dict[int, Dict[str, Any]]] = None,
topology: Optional[TopologyConfig] = None):
"""Creates MobilenetEdgeTPUV2 ModelConfig based on tuning parameters."""
config = ModelConfig()
param_overrides = {
'width_coefficient': width_coefficient,
'depth_coefficient': depth_coefficient,
'stem_base_filters': stem_base_filters,
'stem_kernel_size': stem_kernel_size,
'top_base_filters': top_base_filters,
'group_base_size': group_base_size,
'dropout_rate': dropout_rate,
'drop_connect_rate': drop_connect_rate
}
config = config.replace(**param_overrides)
topology_config = TopologyConfig() if topology is None else topology
if filter_size_overrides:
for group_id in filter_size_overrides:
topology_config.block_groups[group_id].filters = filter_size_overrides[
group_id]
if block_op_overrides:
for group_id in block_op_overrides:
for block_id in block_op_overrides[group_id]:
replaced_block = topology_config.block_groups[group_id].blocks[
block_id].replace(**block_op_overrides[group_id][block_id])
topology_config.block_groups[group_id].blocks[block_id] = replaced_block
if block_group_overrides:
for group_id in block_group_overrides:
replaced_group = topology_config.block_groups[group_id].replace(
**block_group_overrides[group_id])
topology_config.block_groups[group_id] = replaced_group
blocks = ()
input_filters = stem_base_filters
for group in topology_config.block_groups:
for block_search in group.blocks:
if block_search.op_type != BlockType.skip:
block = BlockConfig.from_search_config(
input_filters=input_filters,
output_filters=group.filters,
block_search_config=block_search)
blocks += (block,)
# Set input filters for the next block
input_filters = group.filters
config = config.replace(blocks=blocks)
return config
def autoseg_edgetpu_backbone_base(
width_coefficient: float = 1.0,
depth_coefficient: float = 1.0,
stem_base_filters: int = 64,
stem_kernel_size: int = 5,
top_base_filters: int = 1280,
group_base_size: int = 64,
dropout_rate: float = 0.2,
drop_connect_rate: float = 0.1,
blocks_overrides: Optional[Tuple[BlockConfig, ...]] = None):
"""Creates a edgetpu ModelConfig based on search on segmentation."""
config = ModelConfig()
config.depth_divisor = 4
param_overrides = {
'width_coefficient': width_coefficient,
'depth_coefficient': depth_coefficient,
'stem_base_filters': stem_base_filters,
'stem_kernel_size': stem_kernel_size,
'top_base_filters': top_base_filters,
'group_base_size': group_base_size,
'dropout_rate': dropout_rate,
'drop_connect_rate': drop_connect_rate,
}
if blocks_overrides:
param_overrides['blocks'] = blocks_overrides
config = config.replace(**param_overrides)
return config
def autoseg_edgetpu_backbone_s() -> ModelConfig:
"""AutoML searched model with 2.5ms target simulated latency."""
stem_base_filters = 32
stem_kernel_size = 3
top_base_filters = 1280
blocks = (
# (input_filters, output_filters, kernel_size, num_repeat,
# expand_ratio, strides, se_ratio, id_skip, fused_conv, conv_type)
# pylint: disable=bad-whitespace
BlockConfig.from_args(
stem_base_filters,
12,
3,
1,
1, (1, 1),
fused_expand=True,
conv_type='full'),
BlockConfig.from_args(
12, 36, 3, 1, 6, (2, 2), fused_expand=True, conv_type='full'),
BlockConfig.from_args(36, 18, 5, 1, 3, (1, 1)),
BlockConfig.from_args(
18, 60, 5, 1, 6, (2, 2), fused_expand=True, conv_type='full'),
BlockConfig.from_args(60, 60, 3, 1, 3, (1, 1)),
BlockConfig.from_args(60, 120, 5, 1, 6, (2, 2)),
BlockConfig.from_args(120, 120, 3, 1, 3, (1, 1)),
BlockConfig.from_args(120, 120, 5, 1, 6, (1, 1)),
BlockConfig.from_args(120, 112, 3, 1, 6, (1, 1)),
BlockConfig.from_args(112, 112, 5, 2, 6, (1, 1)),
BlockConfig.from_args(112, 112, 5, 1, 1, (2, 2), id_skip=False),
BlockConfig.from_args(
112, 192, 1, 1, 6, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(192, 192, 5, 1, 1, (1, 1), id_skip=False),
BlockConfig.from_args(
192, 96, 1, 1, 6, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(96, 96, 5, 1, 3, (1, 1)),
BlockConfig.from_args(96, 96, 5, 1, 1, (1, 1), id_skip=False),
BlockConfig.from_args(
96, 192, 1, 1, 6, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(192, 192, 5, 1, 1, (1, 1), id_skip=False),
BlockConfig.from_args(
192, 160, 1, 1, 3, (1, 1), fused_expand=True, id_skip=False),
# pylint: enable=bad-whitespace
)
return autoseg_edgetpu_backbone_base(
stem_base_filters=stem_base_filters,
stem_kernel_size=stem_kernel_size,
top_base_filters=top_base_filters,
blocks_overrides=blocks,
dropout_rate=0.2,
drop_connect_rate=0.2)
def autoseg_edgetpu_backbone_xs() -> ModelConfig:
"""AutoML searched model with 2ms target simulated latency."""
stem_base_filters = 32
stem_kernel_size = 3
top_base_filters = 1280
blocks = (
# (input_filters, output_filters, kernel_size, num_repeat,
# expand_ratio, strides, se_ratio, id_skip, fused_conv, conv_type)
# pylint: disable=bad-whitespace
BlockConfig.from_args(
stem_base_filters,
12,
3,
1,
1, (1, 1),
fused_expand=True,
conv_type='full'),
BlockConfig.from_args(
12, 24, 3, 1, 6, (2, 2), fused_expand=True, conv_type='full'),
BlockConfig.from_args(24, 24, 3, 1, 3, (1, 1)),
BlockConfig.from_args(
24, 60, 3, 1, 3, (2, 2), fused_expand=True, conv_type='full'),
BlockConfig.from_args(60, 40, 3, 1, 6, (1, 1)),
BlockConfig.from_args(40, 40, 5, 1, 3, (2, 2)),
BlockConfig.from_args(40, 40, 3, 1, 6, (1, 1)),
BlockConfig.from_args(
40, 120, 3, 1, 6, (1, 1), fused_expand=True, conv_type='full'),
BlockConfig.from_args(120, 168, 3, 1, 6, (1, 1)),
BlockConfig.from_args(168, 84, 5, 1, 6, (1, 1)),
BlockConfig.from_args(84, 84, 5, 1, 3, (1, 1)),
BlockConfig.from_args(84, 84, 5, 1, 1, (2, 2), id_skip=False),
BlockConfig.from_args(
84, 288, 1, 1, 6, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(288, 288, 5, 1, 1, (1, 1), id_skip=False),
BlockConfig.from_args(
288, 96, 1, 1, 3, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(96, 96, 5, 1, 1, (1, 1), id_skip=False),
BlockConfig.from_args(
96, 96, 1, 1, 6, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(96, 96, 5, 1, 1, (1, 1), id_skip=False),
BlockConfig.from_args(
96, 96, 1, 1, 6, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(96, 480, 5, 1, 3, (1, 1)),
# pylint: enable=bad-whitespace
)
return autoseg_edgetpu_backbone_base(
stem_base_filters=stem_base_filters,
stem_kernel_size=stem_kernel_size,
top_base_filters=top_base_filters,
blocks_overrides=blocks,
dropout_rate=0.2,
drop_connect_rate=0.2)
def autoseg_edgetpu_backbone_m() -> ModelConfig:
"""AutoML searched model with 3ms target simulated latency."""
stem_base_filters = 32
stem_kernel_size = 3
top_base_filters = 1280
blocks = (
# (input_filters, output_filters, kernel_size, num_repeat,
# expand_ratio, strides, se_ratio, id_skip, fused_conv, conv_type)
# pylint: disable=bad-whitespace
BlockConfig.from_args(stem_base_filters, 16, 5, 1, 1, (1, 1)),
BlockConfig.from_args(
16, 36, 3, 1, 6, (2, 2), fused_expand=True, conv_type='full'),
BlockConfig.from_args(36, 36, 3, 1, 3, (1, 1)),
BlockConfig.from_args(
36, 60, 3, 1, 6, (2, 2), fused_expand=True, conv_type='full'),
BlockConfig.from_args(60, 60, 3, 1, 6, (1, 1)),
BlockConfig.from_args(
60, 120, 5, 1, 6, (2, 2), fused_expand=True, conv_type='full'),
BlockConfig.from_args(120, 120, 5, 1, 6, (1, 1)),
BlockConfig.from_args(
120, 80, 3, 1, 6, (1, 1), fused_expand=True, conv_type='full'),
BlockConfig.from_args(80, 168, 3, 1, 6, (1, 1)),
BlockConfig.from_args(168, 168, 5, 1, 6, (1, 1)),
BlockConfig.from_args(168, 168, 5, 1, 1, (1, 1), id_skip=False),
BlockConfig.from_args(
168, 168, 1, 1, 6, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(168, 168, 3, 1, 1, (2, 2), id_skip=False),
BlockConfig.from_args(
168, 192, 1, 1, 3, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(192, 192, 5, 1, 1, (1, 1), id_skip=False),
BlockConfig.from_args(
192, 288, 1, 1, 6, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(288, 288, 5, 1, 1, (1, 1), id_skip=False),
BlockConfig.from_args(
288, 96, 1, 1, 6, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(96, 96, 5, 1, 1, (1, 1), id_skip=False),
BlockConfig.from_args(
96, 192, 1, 1, 3, (1, 1), fused_expand=True, id_skip=False),
BlockConfig.from_args(192, 192, 5, 1, 1, (1, 1), id_skip=False),
BlockConfig.from_args(
192, 320, 1, 1, 3, (1, 1), fused_expand=True, id_skip=False),
# pylint: enable=bad-whitespace
)
return autoseg_edgetpu_backbone_base(
stem_base_filters=stem_base_filters,
stem_kernel_size=stem_kernel_size,
top_base_filters=top_base_filters,
blocks_overrides=blocks,
dropout_rate=0.3,
drop_connect_rate=0.3)
def mobilenet_edgetpu_v2_tiny() -> ModelConfig:
"""MobilenetEdgeTPUV2 tiny model config."""
stem_base_filters = 32
stem_kernel_size = 5
top_base_filters = 1280
filter_sizes = [16, 32, 48, 80, 112, 160, 192]
filter_size_overrides = {
k: v for (k, v) in zip(range(len(filter_sizes)), filter_sizes)
}
block_op_overrides = {
2: {
0: {'op_type': BlockType.from_args('ibn_fused_grouped')},
2: {'op_type': BlockType.from_args('ibn_fused_grouped')},
},
3: {
0: {'op_type': BlockType.from_args('ibn_fused_grouped')},
}
}
return mobilenet_edgetpu_v2_base(
stem_base_filters=stem_base_filters,
stem_kernel_size=stem_kernel_size,
top_base_filters=top_base_filters,
filter_size_overrides=filter_size_overrides,
block_op_overrides=block_op_overrides,
dropout_rate=0.05,
drop_connect_rate=0.05)
def mobilenet_edgetpu_v2_xs() -> ModelConfig:
"""MobilenetEdgeTPUV2 extra small model config."""
stem_base_filters = 32
stem_kernel_size = 5
top_base_filters = 1280
filter_sizes = [16, 32, 48, 96, 144, 160, 192]
filter_size_overrides = {
k: v for (k, v) in zip(range(len(filter_sizes)), filter_sizes)
}
return mobilenet_edgetpu_v2_base(
stem_base_filters=stem_base_filters,
stem_kernel_size=stem_kernel_size,
top_base_filters=top_base_filters,
filter_size_overrides=filter_size_overrides,
dropout_rate=0.05,
drop_connect_rate=0.05)
def mobilenet_edgetpu_v2_s():
"""MobilenetEdgeTPUV2 small model config."""
stem_base_filters = 64
stem_kernel_size = 5
top_base_filters = 1280
filter_sizes = [24, 48, 64, 128, 160, 192, 256]
filter_size_overrides = {
k: v for (k, v) in zip(range(len(filter_sizes)), filter_sizes)
}
return mobilenet_edgetpu_v2_base(
stem_base_filters=stem_base_filters,
stem_kernel_size=stem_kernel_size,
top_base_filters=top_base_filters,
filter_size_overrides=filter_size_overrides)
def mobilenet_edgetpu_v2_m():
"""MobilenetEdgeTPUV2 medium model config."""
stem_base_filters = 64
stem_kernel_size = 5
top_base_filters = 1344
filter_sizes = [32, 64, 80, 160, 192, 240, 320]
filter_size_overrides = {
k: v for (k, v) in zip(range(len(filter_sizes)), filter_sizes)
}
return mobilenet_edgetpu_v2_base(
stem_base_filters=stem_base_filters,
stem_kernel_size=stem_kernel_size,
top_base_filters=top_base_filters,
filter_size_overrides=filter_size_overrides)
def mobilenet_edgetpu_v2_l():
"""MobilenetEdgeTPUV2 large model config."""
stem_base_filters = 64
stem_kernel_size = 7
top_base_filters = 1408
filter_sizes = [32, 64, 96, 192, 240, 256, 384]
filter_size_overrides = {
k: v for (k, v) in zip(range(len(filter_sizes)), filter_sizes)
}
group_base_size = 128
return mobilenet_edgetpu_v2_base(
stem_base_filters=stem_base_filters,
stem_kernel_size=stem_kernel_size,
top_base_filters=top_base_filters,
group_base_size=group_base_size,
filter_size_overrides=filter_size_overrides)
CONV_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 2.0,
'mode': 'fan_out',
# Note: this is a truncated normal distribution
'distribution': 'normal'
}
}
DENSE_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 1 / 3.0,
'mode': 'fan_out',
'distribution': 'uniform'
}
}
def round_filters(filters: int,
config: ModelConfig) -> int:
"""Round number of filters based on width coefficient."""
width_coefficient = config.width_coefficient
min_depth = config.min_depth
divisor = config.depth_divisor
orig_filters = filters
if not width_coefficient:
return filters
filters *= width_coefficient
min_depth = min_depth or divisor
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
logging.info('round_filter input=%s output=%s', orig_filters, new_filters)
return int(new_filters)
def round_repeats(repeats: int, depth_coefficient: float) -> int:
"""Round number of repeats based on depth coefficient."""
return int(math.ceil(depth_coefficient * repeats))
def groupconv2d_block(conv_filters: Optional[int],
config: ModelConfig,
kernel_size: Any = (1, 1),
strides: Any = (1, 1),
group_size: Optional[int] = None,
use_batch_norm: bool = True,
use_bias: bool = False,
activation: Any = None,
name: Optional[str] = None) -> tf.keras.layers.Layer:
"""2D group convolution with batchnorm and activation."""
batch_norm = common_modules.get_batch_norm(config.batch_norm)
bn_momentum = config.bn_momentum
bn_epsilon = config.bn_epsilon
data_format = tf.keras.backend.image_data_format()
weight_decay = config.weight_decay
if group_size is None:
group_size = config.group_base_size
name = name or ''
# Compute the # of groups
if conv_filters % group_size != 0:
raise ValueError(f'Number of filters: {conv_filters} is not divisible by '
f'size of the groups: {group_size}')
groups = int(conv_filters / group_size)
# Collect args based on what kind of groupconv2d block is desired
init_kwargs = {
'kernel_size': kernel_size,
'strides': strides,
'use_bias': use_bias,
'padding': 'same',
'name': name + '_groupconv2d',
'kernel_regularizer': tf.keras.regularizers.l2(weight_decay),
'bias_regularizer': tf.keras.regularizers.l2(weight_decay),
'filters': conv_filters,
'groups': groups,
'batch_norm_layer': batch_norm if use_batch_norm else None,
'bn_epsilon': bn_epsilon,
'bn_momentum': bn_momentum,
'activation': activation,
'data_format': data_format,
}
return custom_layers.GroupConv2D(**init_kwargs)
def conv2d_block_as_layers(
conv_filters: Optional[int],
config: ModelConfig,
kernel_size: Any = (1, 1),
strides: Any = (1, 1),
use_batch_norm: bool = True,
use_bias: bool = False,
activation: Any = None,
depthwise: bool = False,
kernel_initializer: InitializerType = None,
name: Optional[str] = None) -> List[tf.keras.layers.Layer]:
"""A conv2d followed by batch norm and an activation."""
batch_norm = common_modules.get_batch_norm(config.batch_norm)
bn_momentum = config.bn_momentum
bn_epsilon = config.bn_epsilon
data_format = tf.keras.backend.image_data_format()
weight_decay = config.weight_decay
name = name or ''
# Collect args based on what kind of conv2d block is desired
init_kwargs = {
'kernel_size': kernel_size,
'strides': strides,
'use_bias': use_bias,
'padding': 'same',
'name': name + '_conv2d',
'kernel_regularizer': tf.keras.regularizers.l2(weight_decay),
'bias_regularizer': tf.keras.regularizers.l2(weight_decay),
}
sequential_layers: List[tf.keras.layers.Layer] = []
if depthwise:
conv2d = tf.keras.layers.DepthwiseConv2D
init_kwargs.update({'depthwise_initializer': kernel_initializer})
else:
conv2d = tf.keras.layers.Conv2D
init_kwargs.update({
'filters': conv_filters,
'kernel_initializer': kernel_initializer
})
sequential_layers.append(conv2d(**init_kwargs))
if use_batch_norm:
bn_axis = 1 if data_format == 'channels_first' else -1
sequential_layers.append(
batch_norm(
axis=bn_axis,
momentum=bn_momentum,
epsilon=bn_epsilon,
name=name + '_bn'))
if activation is not None:
sequential_layers.append(
tf.keras.layers.Activation(activation, name=name + '_activation'))
return sequential_layers
def conv2d_block(inputs: tf.Tensor,
conv_filters: Optional[int],
config: ModelConfig,
kernel_size: Any = (1, 1),
strides: Any = (1, 1),
use_batch_norm: bool = True,
use_bias: bool = False,
activation: Any = None,
depthwise: bool = False,
kernel_initializer: Optional[InitializerType] = None,
name: Optional[str] = None) -> tf.Tensor:
"""Compatibility with third_party/car/deep_nets."""
x = inputs
for layer in conv2d_block_as_layers(
conv_filters=conv_filters,
config=config,
kernel_size=kernel_size,
strides=strides,
use_batch_norm=use_batch_norm,
use_bias=use_bias,
activation=activation,
depthwise=depthwise,
kernel_initializer=kernel_initializer,
name=name):
x = layer(x)
return x
# Do not inherit from (tf.keras.layers.Layer), will break weights loading.
class _MbConvBlock:
"""Mobile Inverted Residual Bottleneck composite layer."""
def __call__(self, inputs: tf.Tensor, training=False):
x = inputs
for layer in self.expand_block:
x = layer(x)
if self.squeeze_excitation:
se = x
for layer in self.squeeze_excitation:
se = layer(se)
x = tf.keras.layers.multiply([x, se], name=self.name + 'se_excite')
for layer in self.project_block:
x = layer(x)
if self.has_skip_add:
x = tf.keras.layers.add([x, inputs], name=self.name + 'add')
return x
def __init__(self,
block: BlockConfig,
config: ModelConfig,
prefix: Optional[str] = None):
"""Mobile Inverted Residual Bottleneck.
Args:
block: BlockConfig, arguments to create a Block
config: ModelConfig, a set of model parameters
prefix: prefix for naming all layers
"""
use_se = config.use_se
activation = tf_utils.get_activation(config.activation)
drop_connect_rate = config.drop_connect_rate
data_format = tf.keras.backend.image_data_format()
use_depthwise = block.conv_type == 'depthwise'
use_groupconv = block.conv_type == 'group'
prefix = prefix or ''
self.name = prefix
conv_kernel_initializer = (
config.conv_kernel_initializer if config.conv_kernel_initializer
is not None else CONV_KERNEL_INITIALIZER)
filters = block.input_filters * block.expand_ratio
self.expand_block: List[tf.keras.layers.Layer] = []
self.squeeze_excitation: List[tf.keras.layers.Layer] = []
self.project_block: List[tf.keras.layers.Layer] = []
if block.fused_project:
raise NotImplementedError('Fused projection is not supported.')
if block.fused_expand and block.expand_ratio != 1:
# If we use fused mbconv, fuse expansion with the main kernel.
# If conv_type is depthwise we still fuse it to a full conv.
if use_groupconv:
self.expand_block.append(groupconv2d_block(
filters,
config,
kernel_size=block.kernel_size,
strides=block.strides,
group_size=block.group_size,
activation=activation,
name=prefix + 'fused'))
else:
self.expand_block.extend(
conv2d_block_as_layers(
conv_filters=filters,
config=config,
kernel_size=block.kernel_size,
strides=block.strides,
activation=activation,
kernel_initializer=conv_kernel_initializer,
name=prefix + 'fused'))
else:
if block.expand_ratio != 1:
# Expansion phase with a pointwise conv
self.expand_block.extend(
conv2d_block_as_layers(
conv_filters=filters,
config=config,
kernel_size=(1, 1),
activation=activation,
kernel_initializer=conv_kernel_initializer,
name=prefix + 'expand'))
# Main kernel, after the expansion (if applicable, i.e. not fused).
if use_depthwise:
self.expand_block.extend(conv2d_block_as_layers(
conv_filters=filters,
config=config,
kernel_size=block.kernel_size,
strides=block.strides,
activation=activation,
kernel_initializer=conv_kernel_initializer,
depthwise=True,
name=prefix + 'depthwise'))
elif use_groupconv:
self.expand_block.append(groupconv2d_block(
conv_filters=filters,
config=config,
kernel_size=block.kernel_size,
strides=block.strides,
group_size=block.group_size,
activation=activation,
name=prefix + 'group'))
# Squeeze and Excitation phase
if use_se:
assert block.se_ratio is not None
assert 0 < block.se_ratio <= 1
num_reduced_filters = max(1, int(
block.input_filters * block.se_ratio
))
if data_format == 'channels_first':
se_shape = (filters, 1, 1)
else:
se_shape = (1, 1, filters)
self.squeeze_excitation.append(
tf.keras.layers.GlobalAveragePooling2D(name=prefix + 'se_squeeze'))
self.squeeze_excitation.append(
tf.keras.layers.Reshape(se_shape, name=prefix + 'se_reshape'))
self.squeeze_excitation.extend(
conv2d_block_as_layers(
conv_filters=num_reduced_filters,
config=config,
use_bias=True,
use_batch_norm=False,
activation=activation,
kernel_initializer=conv_kernel_initializer,
name=prefix + 'se_reduce'))
self.squeeze_excitation.extend(
conv2d_block_as_layers(
conv_filters=filters,
config=config,
use_bias=True,
use_batch_norm=False,
activation='sigmoid',
kernel_initializer=conv_kernel_initializer,
name=prefix + 'se_expand'))
# Output phase
self.project_block.extend(
conv2d_block_as_layers(
conv_filters=block.output_filters,
config=config,
activation=None,
kernel_initializer=conv_kernel_initializer,
name=prefix + 'project'))
# Add identity so that quantization-aware training can insert quantization
# ops correctly.
self.project_block.append(
tf.keras.layers.Activation('linear', name=prefix + 'id'))
self.has_skip_add = False
if (block.id_skip
and all(s == 1 for s in block.strides)
and block.input_filters == block.output_filters):
self.has_skip_add = True
if drop_connect_rate and drop_connect_rate > 0:
# Apply dropconnect
# The only difference between dropout and dropconnect in TF is scaling
# by drop_connect_rate during training. See:
# https://github.com/keras-team/keras/pull/9898#issuecomment-380577612
self.project_block.append(
tf.keras.layers.Dropout(
drop_connect_rate,
noise_shape=(None, 1, 1, 1),
name=prefix + 'drop'))
def mb_conv_block(inputs: tf.Tensor,
block: BlockConfig,
config: ModelConfig,
prefix: Optional[str] = None) -> tf.Tensor:
"""Mobile Inverted Residual Bottleneck.
Args:
inputs: the Keras input to the block
block: BlockConfig, arguments to create a Block
config: ModelConfig, a set of model parameters
prefix: prefix for naming all layers
Returns:
the output of the block
"""
return _MbConvBlock(block, config, prefix)(inputs)
def mobilenet_edgetpu_v2(image_input: tf.keras.layers.Input,
config: ModelConfig): # pytype: disable=invalid-annotation # typed-keras
"""Creates a MobilenetEdgeTPUV2 graph given the model parameters.
This function is wrapped by the `MobilenetEdgeTPUV2` class to make a
tf.keras.Model.
Args:
image_input: the input batch of images
config: the model config
Returns:
The output of classification model or if backbone is needed, dictionary with
backbone feature levels.
"""
depth_coefficient = config.depth_coefficient
blocks = config.blocks
stem_base_filters = config.stem_base_filters
stem_kernel_size = config.stem_kernel_size
top_base_filters = config.top_base_filters
activation = tf_utils.get_activation(config.activation)
dropout_rate = config.dropout_rate
drop_connect_rate = config.drop_connect_rate
conv_kernel_initializer = (
config.conv_kernel_initializer if config.conv_kernel_initializer
is not None else CONV_KERNEL_INITIALIZER)
dense_kernel_initializer = (
config.dense_kernel_initializer if config.dense_kernel_initializer
is not None else DENSE_KERNEL_INITIALIZER)
num_classes = config.num_classes
input_channels = config.input_channels
rescale_input = config.rescale_input
data_format = tf.keras.backend.image_data_format()
dtype = config.dtype
weight_decay = config.weight_decay
x = image_input
if data_format == 'channels_first':
# Happens on GPU/TPU if available.
x = tf.keras.layers.Permute((3, 1, 2))(x)
if rescale_input:
x = common_modules.normalize_images(
x, num_channels=input_channels, dtype=dtype, data_format=data_format)
# Build stem
x = conv2d_block(
inputs=x,
conv_filters=round_filters(stem_base_filters, config),
config=config,
kernel_size=[stem_kernel_size, stem_kernel_size],
strides=[2, 2],
activation=activation,
kernel_initializer=conv_kernel_initializer,
name='stem')
# Build blocks
num_blocks_total = sum(block.num_repeat for block in blocks)
block_num = 0
backbone_levels = []
for stack_idx, block in enumerate(blocks):
is_reduction = False
assert block.num_repeat > 0
# Update block input and output filters based on depth multiplier
block = block.replace(
input_filters=round_filters(block.input_filters, config),
output_filters=round_filters(block.output_filters, config),
num_repeat=round_repeats(block.num_repeat, depth_coefficient))
if stack_idx == 0:
backbone_levels.append(x)
elif (stack_idx == len(blocks) - 1) or (blocks[stack_idx + 1].strides
== (2, 2)):
is_reduction = True
# The first block needs to take care of stride and filter size increase
drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
config = config.replace(drop_connect_rate=drop_rate)
block_prefix = 'stack_{}/block_0/'.format(stack_idx)
x = _MbConvBlock(block, config, block_prefix)(x)
block_num += 1
if block.num_repeat > 1:
block = block.replace(
input_filters=block.output_filters,
strides=[1, 1]
)
for block_idx in range(block.num_repeat - 1):
drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
config = config.replace(drop_connect_rate=drop_rate)
block_prefix = 'stack_{}/block_{}/'.format(stack_idx, block_idx + 1)
x = _MbConvBlock(block, config, prefix=block_prefix)(x)
block_num += 1
if is_reduction:
backbone_levels.append(x)
if config.backbone_only:
return backbone_levels
# Build top
x = conv2d_block(
inputs=x,
conv_filters=round_filters(top_base_filters, config),
config=config,
activation=activation,
kernel_initializer=conv_kernel_initializer,
name='top')
# Build classifier
pool_size = (x.shape.as_list()[1], x.shape.as_list()[2])
x = tf.keras.layers.AveragePooling2D(pool_size, name='top_pool')(x)
if dropout_rate and dropout_rate > 0:
x = tf.keras.layers.Dropout(dropout_rate, name='top_dropout')(x)
x = tf.keras.layers.Conv2D(
num_classes,
1,
kernel_initializer=dense_kernel_initializer,
kernel_regularizer=tf.keras.regularizers.l2(weight_decay),
bias_regularizer=tf.keras.regularizers.l2(weight_decay),
name='logits')(
x)
x = tf.keras.layers.Activation('softmax', name='probs')(x)
x = tf.squeeze(x, axis=[1, 2])
return x
| 40,716 | 35.225089 | 99 | py |
models | models-master/official/projects/edgetpu/vision/modeling/mobilenet_edgetpu_v2_model_blocks_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mobilenet_edgetpu_v2_model_blocks."""
import tensorflow as tf
from official.projects.edgetpu.vision.modeling import custom_layers
from official.projects.edgetpu.vision.modeling import mobilenet_edgetpu_v2_model_blocks
class MobilenetEdgetpuV2ModelBlocksTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self.model_config = mobilenet_edgetpu_v2_model_blocks.ModelConfig()
def test_model_creatation(self):
model_input = tf.keras.layers.Input(shape=(224, 224, 1))
model_output = mobilenet_edgetpu_v2_model_blocks.mobilenet_edgetpu_v2(
image_input=model_input,
config=self.model_config)
test_model = tf.keras.Model(inputs=model_input, outputs=model_output)
self.assertIsInstance(test_model, tf.keras.Model)
self.assertEqual(test_model.input.shape, (None, 224, 224, 1))
self.assertEqual(test_model.output.shape, (None, 1001))
def test_model_with_customized_kernel_initializer(self):
self.model_config.conv_kernel_initializer = 'he_uniform'
self.model_config.dense_kernel_initializer = 'glorot_normal'
model_input = tf.keras.layers.Input(shape=(224, 224, 1))
model_output = mobilenet_edgetpu_v2_model_blocks.mobilenet_edgetpu_v2(
image_input=model_input,
config=self.model_config)
test_model = tf.keras.Model(inputs=model_input, outputs=model_output)
conv_layer_stack = []
for layer in test_model.layers:
if (isinstance(layer, tf.keras.layers.Conv2D) or
isinstance(layer, tf.keras.layers.DepthwiseConv2D) or
isinstance(layer, custom_layers.GroupConv2D)):
conv_layer_stack.append(layer)
self.assertGreater(len(conv_layer_stack), 2)
# The last Conv layer is used as a Dense layer.
for layer in conv_layer_stack[:-1]:
if isinstance(layer, custom_layers.GroupConv2D):
self.assertIsInstance(layer.kernel_initializer,
tf.keras.initializers.GlorotUniform)
elif isinstance(layer, tf.keras.layers.Conv2D):
self.assertIsInstance(layer.kernel_initializer,
tf.keras.initializers.HeUniform)
elif isinstance(layer, tf.keras.layers.DepthwiseConv2D):
self.assertIsInstance(layer.depthwise_initializer,
tf.keras.initializers.HeUniform)
self.assertIsInstance(conv_layer_stack[-1].kernel_initializer,
tf.keras.initializers.GlorotNormal)
if __name__ == '__main__':
tf.test.main()
| 3,102 | 41.506849 | 87 | py |
models | models-master/official/projects/edgetpu/vision/modeling/mobilenet_edgetpu_v2_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mobilenet_edgetpu model."""
import os
from absl.testing import parameterized
import tensorflow as tf
from official.projects.edgetpu.vision.modeling import common_modules
from official.projects.edgetpu.vision.modeling import mobilenet_edgetpu_v2_model
class MobilenetEdgeTPUV2BuildTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(tf.test.TestCase, self).setUp()
# Ensure no model duplicates
tf.keras.backend.clear_session()
def test_create_mobilenet_edgetpu(self):
model = mobilenet_edgetpu_v2_model.MobilenetEdgeTPUV2()
self.assertEqual(common_modules.count_params(model), 6069657)
def test_export_tflite(self):
model = mobilenet_edgetpu_v2_model.MobilenetEdgeTPUV2()
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tmp_dir = self.create_tempdir()
output_tflite = os.path.join(tmp_dir, 'model_quant.tflite')
tflite_buffer = converter.convert()
tf.io.gfile.GFile(output_tflite, 'wb').write(tflite_buffer)
self.assertTrue(tf.io.gfile.exists(output_tflite))
def test_model_save_load(self):
"""Serializes and de-serializeds the model."""
model_builder = mobilenet_edgetpu_v2_model.MobilenetEdgeTPUV2
model = model_builder.from_name(model_name='mobilenet_edgetpu_v2')
# Model always has a conv2d layer followed by the input layer, and we
# compare the weight parameters of this layers for the original model and
# the save-then-load model.
first_conv_layer = model.get_layer('stem_conv2d')
kernel_tensor = first_conv_layer.trainable_weights[0].numpy()
model.save('/tmp/test_model')
loaded_model = tf.keras.models.load_model('/tmp/test_model')
loaded_first_conv_layer = loaded_model.get_layer('stem_conv2d')
loaded_kernel_tensor = loaded_first_conv_layer.trainable_weights[0].numpy()
self.assertAllClose(kernel_tensor, loaded_kernel_tensor)
def test_model_initialization_failure(self):
"""Tests model can only be initialized with predefined model name."""
model_builder = mobilenet_edgetpu_v2_model.MobilenetEdgeTPUV2
with self.assertRaises(ValueError):
_ = model_builder.from_name(model_name='undefined_model_name')
if __name__ == '__main__':
tf.test.main()
| 2,909 | 39.416667 | 80 | py |
models | models-master/official/projects/edgetpu/vision/modeling/optimized_multiheadattention_layer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MultiHeadAttention layer optimized for EdgeTPU.
Compared to tf.keras.layers.MultiHeadAttention, this layer performs query-key
multiplication instead of key-query multiplication to remove an unnecessary
transpose.
"""
import math
import string
from typing import Optional, Tuple
import numpy as np
import tensorflow as tf
_CHR_IDX = string.ascii_lowercase
def _build_attention_equation(
rank: int, attn_axes: Tuple[int, ...]) -> Tuple[str, str, int]:
"""Builds einsum equations for the attention computation.
Query, key, value inputs after projection are expected to have the shape as:
`(bs, <non-attention dims>, <attention dims>, num_heads, channels)`.
`bs` and `<non-attention dims>` are treated as `<batch dims>`.
The attention operations can be generalized:
(1) Query-key dot product:
`(<batch dims>, <query attention dims>, num_heads, channels), (<batch dims>,
<key attention dims>, num_heads, channels) -> (<batch dims>,
num_heads, <query attention dims>, <key attention dims>)`
(2) Combination:
`(<batch dims>, num_heads, <query attention dims>, <key attention dims>),
(<batch dims>, <value attention dims>, num_heads, channels) -> (<batch
dims>, <query attention dims>, num_heads, channels)`
Args:
rank: Rank of query, key, value tensors.
attn_axes: List/tuple of axes, `[-1, rank)`, that attention will be
applied to.
Returns:
Einsum equations.
"""
target_notation = _CHR_IDX[:rank]
# `batch_dims` includes the head dim.
batch_dims = tuple(np.delete(range(rank), attn_axes + (rank - 1,)))
letter_offset = rank
source_notation = ""
for i in range(rank):
if i in batch_dims or i == rank - 1:
source_notation += target_notation[i]
else:
source_notation += _CHR_IDX[letter_offset]
letter_offset += 1
product_notation = "".join([target_notation[i] for i in batch_dims] +
[target_notation[i] for i in attn_axes] +
[source_notation[i] for i in attn_axes])
dot_product_equation = "%s,%s->%s" % (
target_notation,
source_notation,
product_notation,
)
attn_scores_rank = len(product_notation)
combine_equation = "%s,%s->%s" % (
product_notation,
source_notation,
target_notation,
)
return dot_product_equation, combine_equation, attn_scores_rank
class OptimizedMultiHeadAttention(tf.keras.layers.MultiHeadAttention):
"""MultiHeadAttention with query-key multiplication.
Currently, this layer only works for self-attention but not for
cross-attention. TODO(b/243166060).
"""
def _build_attention(self, rank: int) -> None:
"""Builds multi-head dot-product attention computations.
This function builds attributes necessary for `_compute_attention` to
customize attention computation to replace the default dot-product
attention.
Args:
rank: the rank of query, key, value tensors.
"""
if self._attention_axes is None:
self._attention_axes = tuple(range(1, rank - 2))
else:
self._attention_axes = tuple(self._attention_axes)
(
self._dot_product_equation,
self._combine_equation,
attn_scores_rank,
) = _build_attention_equation(
rank, attn_axes=self._attention_axes)
norm_axes = tuple(
range(attn_scores_rank - len(self._attention_axes), attn_scores_rank))
self._softmax = tf.keras.layers.Softmax(axis=norm_axes)
self._dropout_layer = tf.keras.layers.Dropout(rate=self._dropout)
def _compute_attention(
self,
query: tf.Tensor,
key: tf.Tensor,
value: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
training: Optional[bool] = None) -> Tuple[tf.Tensor, tf.Tensor]:
"""Applies Dot-product attention with query, key, value tensors.
This function defines the computation inside `call` with projected
multi-head Q, K, V inputs. Users can override this function for
customized attention implementation.
Args:
query: Projected query `Tensor` of shape `(B, T, N, key_dim)`.
key: Projected key `Tensor` of shape `(B, S, N, key_dim)`.
value: Projected value `Tensor` of shape `(B, S, N, value_dim)`.
attention_mask: a boolean mask of shape `(B, T, S)`, that prevents
attention to certain positions. It is generally not needed if the
`query` and `value` (and/or `key`) are masked.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Returns:
attention_output: Multi-headed outputs of attention computation.
attention_scores: Multi-headed attention weights.
"""
# Note: Applying scalar multiply at the smaller end of einsum improves
# XLA performance, but may introduce slight numeric differences in
# the Transformer attention head.
query = tf.multiply(query, 1.0 / math.sqrt(float(self._key_dim)))
# Take the dot product between "query" and "key" to get the raw
# attention scores.
attention_scores = tf.einsum(self._dot_product_equation, query, key)
attention_scores = self._masked_softmax(attention_scores, attention_mask)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_scores_dropout = self._dropout_layer(
attention_scores, training=training)
# `context_layer` = [B, T, N, H]
attention_output = tf.einsum(self._combine_equation,
attention_scores_dropout, value)
return attention_output, attention_scores
| 6,277 | 37.048485 | 78 | py |
models | models-master/official/projects/edgetpu/vision/modeling/common_modules.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common modeling utilities."""
from typing import Optional, Tuple
# Import libraries
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf1
from tensorflow.python.tpu import tpu_function # pylint: disable=g-direct-tensorflow-import
MEAN_RGB = (0.5 * 255, 0.5 * 255, 0.5 * 255)
STDDEV_RGB = (0.5 * 255, 0.5 * 255, 0.5 * 255)
@tf.keras.utils.register_keras_serializable(package='Vision')
class TpuBatchNormalization(tf.keras.layers.BatchNormalization):
"""Cross replica batch normalization."""
def __init__(self, fused: Optional[bool] = False, **kwargs):
if fused in (True, None):
raise ValueError('TpuBatchNormalization does not support fused=True.')
super(TpuBatchNormalization, self).__init__(fused=fused, **kwargs)
def _cross_replica_average(self, t: tf.Tensor, num_shards_per_group: int):
"""Calculates the average value of input tensor across TPU replicas."""
num_shards = tpu_function.get_tpu_context().number_of_shards
group_assignment = None
if num_shards_per_group > 1:
if num_shards % num_shards_per_group != 0:
raise ValueError(
'num_shards: %d mod shards_per_group: %d, should be 0' %
(num_shards, num_shards_per_group))
num_groups = num_shards // num_shards_per_group
group_assignment = [[
x for x in range(num_shards) if x // num_shards_per_group == y
] for y in range(num_groups)]
return tf1.tpu.cross_replica_sum(t, group_assignment) / tf.cast(
num_shards_per_group, t.dtype)
def _moments(self,
inputs: tf.Tensor,
reduction_axes: int,
keep_dims: int,
mask: Optional[tf.Tensor] = None):
"""Compute the mean and variance: it overrides the original _moments."""
shard_mean, shard_variance = super(TpuBatchNormalization, self)._moments(
inputs, reduction_axes, keep_dims=keep_dims, mask=mask)
num_shards = tpu_function.get_tpu_context().number_of_shards or 1
if num_shards <= 8: # Skip cross_replica for 2x2 or smaller slices.
num_shards_per_group = 1
else:
num_shards_per_group = max(8, num_shards // 8)
if num_shards_per_group > 1:
# Compute variance using: Var[X]= E[X^2] - E[X]^2.
shard_square_of_mean = tf.math.square(shard_mean)
shard_mean_of_square = shard_variance + shard_square_of_mean
group_mean = self._cross_replica_average(shard_mean, num_shards_per_group)
group_mean_of_square = self._cross_replica_average(
shard_mean_of_square, num_shards_per_group)
group_variance = group_mean_of_square - tf.math.square(group_mean)
return (group_mean, group_variance)
else:
return (shard_mean, shard_variance)
def get_batch_norm(batch_norm_type: str) -> tf.keras.layers.BatchNormalization:
"""A helper to create a batch normalization getter.
Args:
batch_norm_type: The type of batch normalization layer implementation. `tpu`
will use `TpuBatchNormalization`.
Returns:
An instance of `tf.keras.layers.BatchNormalization`.
"""
if batch_norm_type == 'tpu':
return TpuBatchNormalization
return tf.keras.layers.BatchNormalization # pytype: disable=bad-return-type # typed-keras
def count_params(model, trainable_only=True):
"""Returns the count of all model parameters, or just trainable ones."""
if not trainable_only:
return model.count_params()
else:
return int(np.sum([tf.keras.backend.count_params(p)
for p in model.trainable_weights]))
def load_weights(model: tf.keras.Model,
model_weights_path: str,
checkpoint_format: str = 'tf_checkpoint'):
"""Load model weights from the given file path.
Args:
model: the model to load weights into
model_weights_path: the path of the model weights
checkpoint_format: The source of checkpoint files. By default, we assume the
checkpoint is saved by tf.train.Checkpoint().save(). For legacy reasons,
we can also resotre checkpoint from keras model.save_weights() method by
setting checkpoint_format = 'keras_checkpoint'.
"""
if checkpoint_format == 'tf_checkpoint':
checkpoint_dict = {'model': model}
checkpoint = tf.train.Checkpoint(**checkpoint_dict)
checkpoint.restore(model_weights_path).assert_existing_objects_matched()
elif checkpoint_format == 'keras_checkpoint':
# Assert makes sure load is successeful.
model.load_weights(model_weights_path).assert_existing_objects_matched()
else:
raise ValueError(f'Unsupported checkpoint format {checkpoint_format}.')
def normalize_images(
features: tf.Tensor,
num_channels: int = 3,
dtype: str = 'float32',
data_format: str = 'channels_last',
mean_rgb: Tuple[float, ...] = MEAN_RGB,
stddev_rgb: Tuple[float, ...] = STDDEV_RGB,
) -> tf.Tensor:
"""Normalizes the input image channels with the given mean and stddev.
Args:
features: `Tensor` representing decoded images in float format.
num_channels: the number of channels in the input image tensor.
dtype: the dtype to convert the images to. Set to `None` to skip conversion.
data_format: the format of the input image tensor ['channels_first',
'channels_last'].
mean_rgb: the mean of the channels to subtract.
stddev_rgb: the stddev of the channels to divide.
Returns:
A normalized image `Tensor`.
"""
if data_format == 'channels_first':
stats_shape = [num_channels, 1, 1]
else:
stats_shape = [1, 1, num_channels]
if dtype is not None:
if dtype == 'bfloat16':
features = tf.image.convert_image_dtype(features, dtype=tf.bfloat16)
if mean_rgb is not None:
mean_rgb = tf.constant(mean_rgb, shape=stats_shape, dtype=features.dtype)
mean_rgb = tf.broadcast_to(mean_rgb, tf.shape(features))
features = features - mean_rgb
if stddev_rgb is not None:
stddev_rgb = tf.constant(
stddev_rgb, shape=stats_shape, dtype=features.dtype)
stddev_rgb = tf.broadcast_to(stddev_rgb, tf.shape(features))
features = features / stddev_rgb
return features
| 6,720 | 37.849711 | 93 | py |
models | models-master/official/projects/edgetpu/vision/modeling/mobilenet_edgetpu_v1_model_blocks.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions for MobilenetEdgeTPU image classification models."""
import dataclasses
import math
from typing import Any, Optional, Tuple, Union
# Import libraries
from absl import logging
import tensorflow as tf
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.projects.edgetpu.vision.modeling import common_modules
@dataclasses.dataclass
class BlockConfig(base_config.Config):
"""Config for a single MB Conv Block."""
input_filters: int = 0
output_filters: int = 0
kernel_size: int = 3
num_repeat: int = 1
expand_ratio: int = 1
strides: Tuple[int, int] = (1, 1)
se_ratio: Optional[float] = None
id_skip: bool = True
fused_conv: bool = False
conv_type: str = 'depthwise'
@dataclasses.dataclass
class ModelConfig(base_config.Config):
"""Default Config for MobilenetEdgeTPU."""
width_coefficient: float = 1.0
depth_coefficient: float = 1.0
resolution: Union[int, Tuple[int, int]] = 224
dropout_rate: float = 0.1
blocks: Tuple[BlockConfig, ...] = (
# (input_filters, output_filters, kernel_size, num_repeat,
# expand_ratio, strides, se_ratio, id_skip, fused_conv, conv_type)
# pylint: disable=bad-whitespace
BlockConfig.from_args(32, 16, 3, 1, 1, (1, 1), conv_type='no_depthwise'),
BlockConfig.from_args(16, 32, 3, 1, 8, (2, 2), fused_conv=True),
BlockConfig.from_args(32, 32, 3, 3, 4, (1, 1), conv_type='no_depthwise'),
BlockConfig.from_args(32, 48, 3, 1, 8, (2, 2), fused_conv=True),
BlockConfig.from_args(48, 48, 3, 3, 4, (1, 1), conv_type='no_depthwise'),
BlockConfig.from_args(48, 96, 3, 1, 8, (2, 2)),
BlockConfig.from_args(96, 96, 3, 3, 4, (1, 1)),
BlockConfig.from_args(96, 96, 3, 1, 8, (1, 1), id_skip=False),
BlockConfig.from_args(96, 96, 3, 3, 4, (1, 1)),
BlockConfig.from_args(96, 160, 5, 1, 8, (2, 2)),
BlockConfig.from_args(160, 160, 5, 3, 4, (1, 1)),
BlockConfig.from_args(160, 192, 3, 1, 8, (1, 1)),
# pylint: enable=bad-whitespace
)
stem_base_filters: int = 32
top_base_filters: int = 1280
activation: str = 'relu'
batch_norm: str = 'default'
bn_momentum: float = 0.99
bn_epsilon: float = 1e-3
# While the original implementation used a weight decay of 1e-5,
# tf.nn.l2_loss divides it by 2, so we halve this to compensate in Keras
weight_decay: float = 5e-6
drop_connect_rate: float = 0.1
depth_divisor: int = 8
min_depth: Optional[int] = None
# No Squeeze/Excite for MobilenetEdgeTPU
use_se: bool = False
input_channels: int = 3
num_classes: int = 1001
model_name: str = 'mobilenet_edgetpu'
rescale_input: bool = False
data_format: str = 'channels_last'
dtype: str = 'float32'
backbone_only: bool = False
CONV_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 2.0,
'mode': 'fan_out',
# Note: this is a truncated normal distribution
'distribution': 'normal'
}
}
DENSE_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 1 / 3.0,
'mode': 'fan_out',
'distribution': 'uniform'
}
}
# TODO(longy): Reuse the utility functions for V1/V2 models.
def round_filters(filters: int,
config: ModelConfig) -> int:
"""Round number of filters based on width coefficient."""
width_coefficient = config.width_coefficient
min_depth = config.min_depth
divisor = config.depth_divisor
orig_filters = filters
if not width_coefficient:
return filters
filters *= width_coefficient
min_depth = min_depth or divisor
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
logging.info('round_filter input=%s output=%s', orig_filters, new_filters)
return int(new_filters)
def round_repeats(repeats: int, depth_coefficient: float) -> int:
"""Round number of repeats based on depth coefficient."""
return int(math.ceil(depth_coefficient * repeats))
def conv2d_block(inputs: tf.Tensor,
conv_filters: Optional[int],
config: ModelConfig,
kernel_size: Any = (1, 1),
strides: Any = (1, 1),
use_batch_norm: bool = True,
use_bias: bool = False,
activation: Any = None,
depthwise: bool = False,
name: Optional[str] = None):
"""A conv2d followed by batch norm and an activation."""
batch_norm = common_modules.get_batch_norm(config.batch_norm)
bn_momentum = config.bn_momentum
bn_epsilon = config.bn_epsilon
data_format = tf.keras.backend.image_data_format()
weight_decay = config.weight_decay
name = name or ''
# Collect args based on what kind of conv2d block is desired
init_kwargs = {
'kernel_size': kernel_size,
'strides': strides,
'use_bias': use_bias,
'padding': 'same',
'name': name + '_conv2d',
'kernel_regularizer': tf.keras.regularizers.l2(weight_decay),
'bias_regularizer': tf.keras.regularizers.l2(weight_decay),
}
if depthwise:
conv2d = tf.keras.layers.DepthwiseConv2D
init_kwargs.update({'depthwise_initializer': CONV_KERNEL_INITIALIZER})
else:
conv2d = tf.keras.layers.Conv2D
init_kwargs.update({'filters': conv_filters,
'kernel_initializer': CONV_KERNEL_INITIALIZER})
x = conv2d(**init_kwargs)(inputs)
if use_batch_norm:
bn_axis = 1 if data_format == 'channels_first' else -1
x = batch_norm(axis=bn_axis,
momentum=bn_momentum,
epsilon=bn_epsilon,
name=name + '_bn')(x)
if activation is not None:
x = tf.keras.layers.Activation(activation,
name=name + '_activation')(x)
return x
def mb_conv_block(inputs: tf.Tensor,
block: BlockConfig,
config: ModelConfig,
prefix: Optional[str] = None):
"""Mobile Inverted Residual Bottleneck.
Args:
inputs: the Keras input to the block
block: BlockConfig, arguments to create a Block
config: ModelConfig, a set of model parameters
prefix: prefix for naming all layers
Returns:
the output of the block
"""
use_se = config.use_se
activation = tf_utils.get_activation(config.activation)
drop_connect_rate = config.drop_connect_rate
data_format = tf.keras.backend.image_data_format()
use_depthwise = block.conv_type == 'depthwise'
prefix = prefix or ''
filters = block.input_filters * block.expand_ratio
x = inputs
if block.fused_conv:
# If we use fused mbconv, skip expansion and use regular conv.
x = conv2d_block(x,
filters,
config,
kernel_size=block.kernel_size,
strides=block.strides,
activation=activation,
name=prefix + 'fused')
else:
if block.expand_ratio != 1:
# Expansion phase
kernel_size = (1, 1) if use_depthwise else (3, 3)
x = conv2d_block(x,
filters,
config,
kernel_size=kernel_size,
activation=activation,
name=prefix + 'expand')
# Depthwise Convolution
if use_depthwise:
x = conv2d_block(x,
conv_filters=None,
config=config,
kernel_size=block.kernel_size,
strides=block.strides,
activation=activation,
depthwise=True,
name=prefix + 'depthwise')
# Squeeze and Excitation phase
if use_se:
assert block.se_ratio is not None
assert 0 < block.se_ratio <= 1
num_reduced_filters = max(1, int(
block.input_filters * block.se_ratio
))
if data_format == 'channels_first':
se_shape = (filters, 1, 1)
else:
se_shape = (1, 1, filters)
se = tf.keras.layers.GlobalAveragePooling2D(name=prefix + 'se_squeeze')(x)
se = tf.keras.layers.Reshape(se_shape, name=prefix + 'se_reshape')(se)
se = conv2d_block(se,
num_reduced_filters,
config,
use_bias=True,
use_batch_norm=False,
activation=activation,
name=prefix + 'se_reduce')
se = conv2d_block(se,
filters,
config,
use_bias=True,
use_batch_norm=False,
activation='sigmoid',
name=prefix + 'se_expand')
x = tf.keras.layers.multiply([x, se], name=prefix + 'se_excite')
# Output phase
x = conv2d_block(x,
block.output_filters,
config,
activation=None,
name=prefix + 'project')
# Add identity so that quantization-aware training can insert quantization
# ops correctly.
x = tf.keras.layers.Activation('linear', name=prefix + 'id')(x)
if (block.id_skip
and all(s == 1 for s in block.strides)
and block.input_filters == block.output_filters):
if drop_connect_rate and drop_connect_rate > 0:
# Apply dropconnect
# The only difference between dropout and dropconnect in TF is scaling by
# drop_connect_rate during training. See:
# https://github.com/keras-team/keras/pull/9898#issuecomment-380577612
x = tf.keras.layers.Dropout(drop_connect_rate,
noise_shape=(None, 1, 1, 1),
name=prefix + 'drop')(x)
x = tf.keras.layers.add([x, inputs], name=prefix + 'add')
return x
def mobilenet_edgetpu(image_input: tf.keras.layers.Input, config: ModelConfig): # pytype: disable=invalid-annotation # typed-keras
"""Creates a MobilenetEdgeTPU graph given the model parameters.
This function is wrapped by the `MobilenetEdgeTPU` class to make a
tf.keras.Model.
Args:
image_input: the input batch of images
config: the model config
Returns:
The output of clossification model or if backbone is needed, dictionary with
backbone feature levels.
"""
depth_coefficient = config.depth_coefficient
blocks = config.blocks
stem_base_filters = config.stem_base_filters
top_base_filters = config.top_base_filters
activation = tf_utils.get_activation(config.activation)
dropout_rate = config.dropout_rate
drop_connect_rate = config.drop_connect_rate
num_classes = config.num_classes
input_channels = config.input_channels
rescale_input = config.rescale_input
data_format = tf.keras.backend.image_data_format()
dtype = config.dtype
weight_decay = config.weight_decay
x = image_input
if data_format == 'channels_first':
# Happens on GPU/TPU if available.
x = tf.keras.layers.Permute((3, 1, 2))(x)
if rescale_input:
x = common_modules.normalize_images(
x, num_channels=input_channels, dtype=dtype, data_format=data_format)
# Build stem
x = conv2d_block(x,
round_filters(stem_base_filters, config),
config,
kernel_size=[3, 3],
strides=[2, 2],
activation=activation,
name='stem')
# Build blocks
num_blocks_total = sum(block.num_repeat for block in blocks)
block_num = 0
backbone_levels = {}
for stack_idx, block in enumerate(blocks):
assert block.num_repeat > 0
# Update block input and output filters based on depth multiplier
block = block.replace(
input_filters=round_filters(block.input_filters, config),
output_filters=round_filters(block.output_filters, config),
num_repeat=round_repeats(block.num_repeat, depth_coefficient))
# The first block needs to take care of stride and filter size increase
drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
config = config.replace(drop_connect_rate=drop_rate)
block_prefix = 'stack_{}/block_0/'.format(stack_idx)
x = mb_conv_block(x, block, config, block_prefix)
block_num += 1
if block.num_repeat > 1:
block = block.replace(
input_filters=block.output_filters,
strides=[1, 1]
)
for block_idx in range(block.num_repeat - 1):
drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
config = config.replace(drop_connect_rate=drop_rate)
block_prefix = 'stack_{}/block_{}/'.format(stack_idx, block_idx + 1)
x = mb_conv_block(x, block, config, prefix=block_prefix)
block_num += 1
backbone_levels[str(stack_idx)] = x
if config.backbone_only:
return backbone_levels
# Build top
x = conv2d_block(x,
round_filters(top_base_filters, config),
config,
activation=activation,
name='top')
# Build classifier
pool_size = (x.shape.as_list()[1], x.shape.as_list()[2])
x = tf.keras.layers.AveragePooling2D(pool_size, name='top_pool')(x)
if dropout_rate and dropout_rate > 0:
x = tf.keras.layers.Dropout(dropout_rate, name='top_dropout')(x)
x = tf.keras.layers.Conv2D(
num_classes,
1,
kernel_initializer=DENSE_KERNEL_INITIALIZER,
kernel_regularizer=tf.keras.regularizers.l2(weight_decay),
bias_regularizer=tf.keras.regularizers.l2(weight_decay),
name='logits')(
x)
x = tf.keras.layers.Activation('softmax', name='probs')(x)
x = tf.squeeze(x, axis=[1, 2])
return x
| 14,355 | 33.676329 | 132 | py |
models | models-master/official/projects/edgetpu/vision/modeling/mobilenet_edgetpu_v1_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions for MobilenetEdgeTPU image classification models."""
from typing import Any, Dict, Optional, Text
# Import libraries
from absl import logging
import tensorflow as tf
from official.projects.edgetpu.vision.modeling import common_modules
from official.projects.edgetpu.vision.modeling import mobilenet_edgetpu_v1_model_blocks
ModelConfig = mobilenet_edgetpu_v1_model_blocks.ModelConfig
MODEL_CONFIGS = {
# (width, depth, resolution, dropout)
'mobilenet_edgetpu': ModelConfig.from_args(1.0, 1.0, 224, 0.1),
'mobilenet_edgetpu_dm0p75': ModelConfig.from_args(0.75, 1.0, 224, 0.1),
'mobilenet_edgetpu_dm1p25': ModelConfig.from_args(1.25, 1.0, 224, 0.1),
'mobilenet_edgetpu_dm1p5': ModelConfig.from_args(1.5, 1.0, 224, 0.1),
'mobilenet_edgetpu_dm1p75': ModelConfig.from_args(1.75, 1.0, 224, 0.1)
}
@tf.keras.utils.register_keras_serializable(package='Vision')
class MobilenetEdgeTPU(tf.keras.Model):
"""Wrapper class for a MobilenetEdgeTPU Keras model.
Contains helper methods to build, manage, and save metadata about the model.
"""
def __init__(self,
config: Optional[ModelConfig] = None,
overrides: Optional[Dict[Text, Any]] = None):
"""Create a MobilenetEdgeTPU model.
Args:
config: (optional) the main model parameters to create the model
overrides: (optional) a dict containing keys that can override config
"""
overrides = overrides or {}
config = config or ModelConfig()
self.config = config.replace(**overrides)
input_channels = self.config.input_channels
model_name = self.config.model_name
if isinstance(self.config.resolution, tuple):
input_shape = (self.config.resolution[0], self.config.resolution[1],
input_channels)
else:
input_shape = (self.config.resolution, self.config.resolution,
input_channels)
image_input = tf.keras.layers.Input(shape=input_shape)
output = mobilenet_edgetpu_v1_model_blocks.mobilenet_edgetpu(
image_input, self.config)
if not isinstance(output, dict):
# Cast to float32 in case we have a different model dtype
output = tf.cast(output, tf.float32)
self._output_specs = output.get_shape()
else:
self._output_specs = {
feature: output[feature].get_shape() for feature in output
}
logging.info('Building model %s with params %s',
model_name,
self.config)
super(MobilenetEdgeTPU, self).__init__(
inputs=image_input, outputs=output, name=model_name)
@classmethod
def from_name(cls,
model_name: str,
model_weights_path: Optional[str] = None,
checkpoint_format: Optional[str] = 'tf_checkpoint',
overrides: Optional[Dict[str, Any]] = None):
"""Construct an MobilenetEdgeTPU model from a predefined model name.
E.g., `MobilenetEdgeTPU.from_name('mobilenet_edgetpu')`.
Args:
model_name: the predefined model name
model_weights_path: the path to the weights (h5 file or saved model dir)
checkpoint_format: the model weights format. One of 'tf_checkpoint' or
'keras_checkpoint'.
overrides: (optional) a dict containing keys that can override config
Returns:
A constructed EfficientNet instance.
"""
model_configs = dict(MODEL_CONFIGS)
overrides = dict(overrides) if overrides else {}
# One can define their own custom models if necessary
model_configs.update(overrides.pop('model_config', {}))
if model_name not in model_configs:
raise ValueError('Unknown model name {}'.format(model_name))
config = model_configs[model_name]
model = cls(config=config, overrides=overrides)
if model_weights_path:
common_modules.load_weights(model,
model_weights_path,
checkpoint_format=checkpoint_format)
return model
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
| 4,742 | 35.206107 | 87 | py |
models | models-master/official/projects/edgetpu/vision/modeling/custom_layers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Customized keras layers used in the EdgeTPU models."""
from collections.abc import MutableMapping
import inspect
from typing import Any, Optional, Union
import tensorflow as tf
from official.modeling import tf_utils
class GroupConv2D(tf.keras.layers.Conv2D):
"""2D group convolution as a Keras Layer."""
def __init__(self,
filters: int,
kernel_size: Union[int, tuple[int, int]],
groups: int,
strides: tuple[int, int] = (1, 1),
padding: str = 'valid',
data_format: str = 'channels_last',
dilation_rate: tuple[int, int] = (1, 1),
activation: Any = None,
use_bias: bool = True,
kernel_initializer: Any = 'glorot_uniform',
bias_initializer: Any = 'zeros',
kernel_regularizer: Any = None,
bias_regularizer: Any = None,
activity_regularizer: Any = None,
kernel_constraint: Any = None,
bias_constraint: Any = None,
batch_norm_layer: Optional[tf.keras.layers.Layer] = None,
bn_epsilon: float = 1e-3,
bn_momentum: float = 0.99,
**kwargs: Any) -> tf.keras.layers.Layer:
"""Creates a 2D group convolution keras layer.
Args:
filters: Integer, the dimensionality of the output space (i.e. the number
of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the height
and width of the 2D convolution window. Can be a single integer to
specify the same value for all spatial dimensions.
groups: The number of input/output channel groups.
strides: An integer or tuple/list of n integers, specifying the stride
length of the convolution. Specifying any stride value != 1 is
incompatible with specifying any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: The ordering of the dimensions in the inputs. `channels_last`
corresponds to inputs with shape `(batch_size, height, width, channels)`
dilation_rate: an integer or tuple/list of 2 integers, specifying the
dilation rate to use for dilated convolution. Can be a single integer to
specify the same value for all spatial dimensions. Currently, specifying
any `dilation_rate` value != 1 is incompatible with specifying any
stride value != 1.
activation: Activation function to use. If you don't specify anything, no
activation is applied ( see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix ( see
`keras.initializers`).
bias_initializer: Initializer for the bias vector ( see
`keras.initializers`).
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector ( see
`keras.regularizers`).
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation") ( see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix ( see
`keras.constraints`).
bias_constraint: Constraint function applied to the bias vector ( see
`keras.constraints`).
batch_norm_layer: The batch normalization layer to use. This is typically
tf.keras.layer.BatchNormalization or a derived class.
bn_epsilon: Batch normalization epsilon.
bn_momentum: Momentum used for moving average in batch normalization.
**kwargs: Additional keyword arguments.
Input shape:
4D tensor with shape: `(batch_size, rows, cols, channels)`
Output shape:
4D tensor with shape: `(batch_size, new_rows, new_cols, filters)` `rows`
and `cols` values might have changed due to padding.
Returns:
A tensor of rank 4 representing
`activation(GroupConv2D(inputs, kernel) + bias)`.
Raises:
ValueError: if groups < 1 or groups > filters
ValueError: if data_format is not "channels_last".
ValueError: if `padding` is not `same` or `valid`.
ValueError: if `batch_norm_layer` is not a callable when provided.
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
"""
if groups <= 1 or groups > filters:
raise ValueError(f'Number of groups {groups} should be greater than 1 and'
f' less or equal than the output filters {filters}.')
self._groups = groups
if data_format != 'channels_last':
raise ValueError(
'GroupConv2D expects input to be in channels_last format.')
if padding.lower() not in ('same', 'valid'):
raise ValueError('Valid padding options are : same, or valid.')
self.use_batch_norm = False
if batch_norm_layer is not None:
if not inspect.isclass(batch_norm_layer):
raise ValueError('batch_norm_layer is not a class.')
self.use_batch_norm = True
self.bn_epsilon = bn_epsilon
self.bn_momentum = bn_momentum
self.batch_norm_layer = []
if self.use_batch_norm:
self.batch_norm_layer = [
batch_norm_layer(
axis=-1, momentum=self.bn_momentum, epsilon=self.bn_epsilon)
for i in range(self._groups)
]
super().__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
groups=1,
**kwargs) # pytype: disable=bad-return-type # typed-keras
def build(self, input_shape: tuple[int, ...]) -> None:
"""Builds GroupConv2D layer as a collection of smaller Conv2D layers."""
input_shape = tf.TensorShape(input_shape)
input_channel = self._get_input_channel(input_shape)
if input_channel % self._groups != 0:
raise ValueError(
f'Number of input channels: {input_channel} are not divisible '
f'by number of groups: {self._groups}.')
self.group_input_channel = int(input_channel / self._groups)
self.group_output_channel = int(self.filters / self._groups)
self.group_kernel_shape = self.kernel_size + (self.group_input_channel,
self.group_output_channel)
self.kernel = []
self.bias = []
for g in range(self._groups):
self.kernel.append(
self.add_weight(
name='kernel_{}'.format(g),
shape=self.group_kernel_shape,
initializer=tf_utils.clone_initializer(self.kernel_initializer),
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype))
if self.use_bias:
self.bias.append(
self.add_weight(
name='bias_{}'.format(g),
shape=(self.group_output_channel,),
initializer=tf_utils.clone_initializer(self.bias_initializer),
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype))
channel_axis = self._get_channel_axis()
self.input_spec = tf.keras.layers.InputSpec(
ndim=self.rank + 2, axes={channel_axis: input_channel})
self._build_conv_op_data_shape = input_shape[-(self.rank + 1):]
self._build_input_channel = input_channel
self._padding_op = self._get_padding_op()
# channels_last corresponds to 'NHWC' data format.
self._conv_op_data_format = 'NHWC'
self.bn_layers = []
if self.use_batch_norm:
for group_index in range(self._groups):
self.bn_layers.append(self.batch_norm_layer[group_index])
self.built = True
def call(self, inputs: Any, training: Optional[bool] = None) -> Any:
"""Performs the GroupConv2D operation on the inputs."""
input_slices = tf.split(inputs, num_or_size_splits=self._groups, axis=-1)
output_slices = []
for i in range(self._groups):
# Apply conv2d to each slice
output_slice = tf.nn.conv2d(
input_slices[i],
self.kernel[i],
strides=self.strides,
padding=self._padding_op,
data_format=self._conv_op_data_format,
dilations=self.dilation_rate)
if self.use_bias:
output_slice = tf.nn.bias_add(
output_slice, self.bias[i], data_format='NHWC')
# Apply batch norm after bias addition.
if self.use_batch_norm:
output_slice = self.bn_layers[i](output_slice, training=training)
if self.activation is not None:
output_slice = self.activation(output_slice)
output_slices.append(output_slice)
# Concat the outputs back along the channel dimension
outputs = tf.concat(output_slices, axis=-1)
return outputs
def get_config(self) -> MutableMapping[str, Any]:
"""Enables serialization for the group convolution layer."""
config = super().get_config()
config['groups'] = self._groups
config['batch_norm_layer'] = self.batch_norm_layer
config['bn_epsilon'] = self.bn_epsilon
config['bn_momentum'] = self.bn_momentum
return config
@classmethod
def from_config(cls, config):
"""Creates a layer from its config.
This method is the reverse of `get_config`, capable of instantiating the
same layer from the config dictionary. It does not handle layer connectivity
(handled by Network), nor weights (handled by `set_weights`).
Also, the get_config returns a config with a list type of `batch_norm_layer`
we need to convert it either to None or the batch_norm class.
Arguments:
config: A Python dictionary, typically the output of get_config.
Returns:
A layer instance.
"""
if not config['batch_norm_layer']:
config['batch_norm_layer'] = None
else:
config['batch_norm_layer'] = type(config['batch_norm_layer'][0])
return cls(**config)
class GroupConv2DKerasModel(tf.keras.Model):
"""2D group convolution as a keras model."""
def __init__(self,
filters: int,
kernel_size: tuple[int, int],
groups: int,
batch_norm_layer: Optional[tf.keras.layers.Layer] = None,
bn_epsilon: float = 1e-3,
bn_momentum: float = 0.99,
data_format: str = 'channels_last',
padding: str = 'valid',
**kwargs: Any) -> tf.keras.Model:
"""Creates a 2D group convolution layer as a keras model.
Args:
filters: Integer, the dimensionality of the output space (i.e. the number
of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the height
and width of the 2D convolution window. Can be a single integer to
specify the same value for all spatial dimensions.
groups: The number of input/output channel groups.
batch_norm_layer: The batch normalization layer to use. This is typically
tf.keras.layer.BatchNormalization or a derived class.
bn_epsilon: Batch normalization epsilon.
bn_momentum: Momentum used for moving average in batch normalization.
data_format: The ordering of the dimensions in the inputs. `channels_last`
corresponds to inputs with shape `(batch_size, height, width, channels)`
padding: one of `"valid"` or `"same"` (case-insensitive).
**kwargs: Additional keyword arguments passed to the underlying conv
layers.
Raises:
ValueError: if groups < 1 or groups > filters
ValueError: if `batch_norm_layer` is not a callable when provided.
ValueError: if `data_format` is not channels_last
ValueError: if `padding` is not `same` or `valid`.
"""
super().__init__()
self.conv_layers = []
self.bn_layers = []
per_conv_filter_size = filters / groups
if groups <= 1 or groups >= filters:
raise ValueError('Number of groups should be greater than 1 and less '
'than the output filters.')
self.batch_norm_layer = batch_norm_layer
self.use_batch_norm = False
if self.batch_norm_layer is not None:
if not inspect.isclass(self.batch_norm_layer): # pytype: disable=not-supported-yet
raise ValueError('batch_norm_layer is not a class.')
self.use_batch_norm = True
if 'activation' in kwargs.keys():
self.activation = tf.keras.activations.get(kwargs['activation'])
kwargs.pop('activation')
else:
self.activation = None
if data_format != 'channels_last':
raise ValueError(
'GroupConv2D expects input to be in channels_last format.')
if padding.lower() not in ('same', 'valid'):
raise ValueError('Valid padding options are : same, or valid.')
self._groups = groups
for _ in range(self._groups):
# Override the activation so that batchnorm can be applied after the conv.
self.conv_layers.append(
tf.keras.layers.Conv2D(per_conv_filter_size, kernel_size, **kwargs))
if self.use_batch_norm:
for _ in range(self._groups):
self.bn_layers.append(
self.batch_norm_layer(
axis=-1, momentum=bn_momentum, epsilon=bn_epsilon)) # pytype: disable=bad-return-type # typed-keras
def call(self, inputs: Any) -> Any: # pytype: disable=signature-mismatch # overriding-parameter-count-checks
"""Applies 2d group convolution on the inputs."""
input_shape = inputs.get_shape().as_list()
if input_shape[-1] % self._groups != 0:
raise ValueError(
f'Number of input channels: {input_shape[-1]} are not divisible '
f'by number of groups: {self._groups}.')
input_slices = tf.split(inputs, num_or_size_splits=self._groups, axis=-1)
output_slices = []
for g in range(self._groups):
output_slice = self.conv_layers[g](input_slices[g])
if self.use_batch_norm:
output_slice = self.bn_layers[g](output_slice)
output_slice = self.activation(output_slice)
output_slices.append(output_slice)
outputs = tf.concat(output_slices, axis=-1)
return outputs
def _nnapi_scalar(value, dtype):
# Resolves "Scalar operand should be constant" at cost of broadcasting
return tf.constant(value, dtype=dtype, shape=(1,))
def _fqop(x, min_val=-128, max_val=127):
"""Wraps an op x with fake quant op and given min/max."""
return tf.quantization.fake_quant_with_min_max_args(
x, min=min_val, max=max_val)
def argmax(input_tensor,
axis=-1,
output_type: tf.DType = tf.dtypes.float32,
name: Optional[str] = None,
keepdims: bool = False,
epsilon: Optional[float] = None):
"""Returns the index with the largest value across axes of a tensor.
Approximately tf.compat.v1.argmax, but not equivalent. If arithmetic allows
value to be anomalously close to the maximum, but not equal to it, the
behavior is undefined.
Args:
input_tensor: A Tensor.
axis: A Value. Must be in the range [-rank(input), rank(input)). Describes
which axis of the input Tensor to reduce across. For vectors, use axis =
0.
output_type: An optional tf.DType. Note that default is different from
tflite (int64) to make default behavior compatible with darwinn.
name: Optional name for operations.
keepdims: If true, retains reduced dimensions with length 1.
epsilon: Optional small number which is intended to be always below
quantization threshold, used to distinguish equal and not equal numbers.
Returns:
A Tensor of type output_type.
"""
fqop = _fqop if output_type.is_floating else tf.identity
safe_axis = axis
if safe_axis < 0:
safe_axis = len(input_tensor.shape) + safe_axis
reduction_size = input_tensor.shape[axis]
axis_max = tf.math.reduce_max(input_tensor, axis=axis, keepdims=True)
zero_if_max = tf.subtract(axis_max, input_tensor)
eps = epsilon if epsilon else 1e-6
if input_tensor.dtype.is_floating:
zero_if_max_else_eps = tf.math.minimum(
_nnapi_scalar(eps, input_tensor.dtype), zero_if_max)
zero_if_max_else_one = zero_if_max_else_eps * _nnapi_scalar(
1 / eps, input_tensor.dtype)
elif input_tensor.dtype.is_integer:
zero_if_max_else_one = tf.math.minimum(
_nnapi_scalar(1, input_tensor.dtype), zero_if_max)
else:
raise ValueError('Please specify epsilon for unknown input data type')
# Input type ends here, output type starts here
zero_if_max_else_one = tf.cast(zero_if_max_else_one, dtype=output_type)
zero_if_max_else_one = fqop(zero_if_max_else_one)
one_if_max_else_zero = fqop(
tf.math.subtract(
fqop(_nnapi_scalar(1, output_type)), zero_if_max_else_one))
rev_index = tf.range(reduction_size, 0, -1, dtype=output_type)
for index in range(safe_axis + 1, len(input_tensor.shape)):
rev_index = tf.expand_dims(rev_index, axis=index - safe_axis)
rev_index = fqop(rev_index)
rev_index_if_max_else_zero = fqop(
tf.math.multiply(one_if_max_else_zero, rev_index))
reverse_argmax = fqop(
tf.math.reduce_max(
rev_index_if_max_else_zero, axis=axis, keepdims=keepdims, name=name))
# Final operation obtains name if argmax layer if provided
return fqop(
tf.math.subtract(
fqop(_nnapi_scalar(reduction_size, output_type)),
reverse_argmax,
name=name))
class ArgmaxKerasLayer(tf.keras.layers.Layer):
"""Implements argmax as a keras model."""
def __init__(self,
axis=-1,
name=None,
output_type=tf.dtypes.int32,
**kwargs: Any) -> tf.keras.Model:
"""Implements argmax as a keras model.
Args:
axis: A Value. Must be in the range [-rank(input), rank(input)). Describes
which axis of the input Tensor to reduce across. For vectors, use axis =
0.
name: Optional name for operations.
output_type: An optional tf.DType.
**kwargs: Other arguments passed to model constructor.
Returns:
A Tensor of type output_type.
"""
super().__init__(name=name, **kwargs)
self.axis = axis
self.output_type = output_type # pytype: disable=bad-return-type # typed-keras
def call(self, inputs: Any) -> Any:
"""Applies argmax on the inputs."""
return argmax(
input_tensor=inputs,
axis=self.axis,
output_type=self.output_type,
name=self.name)
| 19,671 | 39.898129 | 117 | py |
models | models-master/official/projects/edgetpu/vision/modeling/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/edgetpu/vision/modeling/optimized_multiheadattention_layer_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for optimized_multiheadattention_layer."""
import numpy as np
import tensorflow as tf
from official.projects.edgetpu.vision.modeling import optimized_multiheadattention_layer
_BATCH_SIZE = 32
_SEQ_LEN = 4
_EMBEDDING_SIZE = 8
_NUM_HEADS = 2
_KEY_DIM = 2
class OptimizedMultiheadattentionLayerTest(tf.test.TestCase):
def test_same_output(self):
"""Tests that OptimizedMultiHeadAttention returns the expected outputs."""
input_tensor_1 = tf.random.uniform((_BATCH_SIZE, _SEQ_LEN, _EMBEDDING_SIZE))
input_tensor_2 = tf.random.uniform((_BATCH_SIZE, _SEQ_LEN, _EMBEDDING_SIZE))
# Instantiate layer and call with inputs to build.
orig_layer = tf.keras.layers.MultiHeadAttention(
num_heads=_NUM_HEADS, key_dim=_KEY_DIM)
_ = orig_layer(input_tensor_1, input_tensor_2)
opt_layer = optimized_multiheadattention_layer.OptimizedMultiHeadAttention(
num_heads=_NUM_HEADS, key_dim=_KEY_DIM)
_ = opt_layer(input_tensor_1, input_tensor_2)
# Set the weights of the two layers to be the same.
query_dense_weights = np.random.uniform(
size=(_EMBEDDING_SIZE, _NUM_HEADS, _KEY_DIM))
query_dense_bias = np.random.uniform(size=(_NUM_HEADS, _KEY_DIM))
key_dense_weights = np.random.uniform(
size=(_EMBEDDING_SIZE, _NUM_HEADS, _KEY_DIM))
key_dense_bias = np.random.uniform(size=(_NUM_HEADS, _KEY_DIM))
value_dense_weights = np.random.uniform(
size=(_EMBEDDING_SIZE, _NUM_HEADS, _KEY_DIM))
value_dense_bias = np.random.uniform(size=(_NUM_HEADS, _KEY_DIM))
attention_output_dense_weights = np.random.uniform(
size=(_NUM_HEADS, _KEY_DIM, _EMBEDDING_SIZE))
attention_output_dense_bias = np.random.uniform(size=(_EMBEDDING_SIZE,))
orig_layer._query_dense.set_weights([query_dense_weights, query_dense_bias])
orig_layer._key_dense.set_weights([key_dense_weights, key_dense_bias])
orig_layer._value_dense.set_weights([value_dense_weights, value_dense_bias])
orig_layer._output_dense.set_weights(
[attention_output_dense_weights, attention_output_dense_bias])
opt_layer._query_dense.set_weights([query_dense_weights, query_dense_bias])
opt_layer._key_dense.set_weights([key_dense_weights, key_dense_bias])
opt_layer._value_dense.set_weights([value_dense_weights, value_dense_bias])
opt_layer._output_dense.set_weights(
[attention_output_dense_weights, attention_output_dense_bias])
# Calculate two sets of attention outputs and scores and compare.
orig_attn_output, orig_attn_score = orig_layer(
input_tensor_1, input_tensor_2, return_attention_scores=True)
opt_attn_output, opt_attn_score = opt_layer(
input_tensor_1, input_tensor_2, return_attention_scores=True)
self.assertAllClose(orig_attn_output, opt_attn_output)
self.assertAllClose(orig_attn_score, opt_attn_score)
if __name__ == '__main__':
tf.test.main()
| 3,516 | 41.890244 | 88 | py |
models | models-master/official/projects/edgetpu/vision/modeling/mobilenet_edgetpu_v2_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions for MobilenetEdgeTPUV2 image classification models."""
from typing import Any, Mapping, Optional
from absl import logging
import tensorflow as tf
from official.projects.edgetpu.vision.modeling import common_modules
from official.projects.edgetpu.vision.modeling import mobilenet_edgetpu_v2_model_blocks
ModelConfig = mobilenet_edgetpu_v2_model_blocks.ModelConfig
MODEL_CONFIGS = {
'mobilenet_edgetpu_v2':
mobilenet_edgetpu_v2_model_blocks.mobilenet_edgetpu_v2_s(),
'mobilenet_edgetpu_v2_tiny':
mobilenet_edgetpu_v2_model_blocks.mobilenet_edgetpu_v2_tiny(),
'mobilenet_edgetpu_v2_xs':
mobilenet_edgetpu_v2_model_blocks.mobilenet_edgetpu_v2_xs(),
'mobilenet_edgetpu_v2_s':
mobilenet_edgetpu_v2_model_blocks.mobilenet_edgetpu_v2_s(),
'mobilenet_edgetpu_v2_m':
mobilenet_edgetpu_v2_model_blocks.mobilenet_edgetpu_v2_m(),
'mobilenet_edgetpu_v2_l':
mobilenet_edgetpu_v2_model_blocks.mobilenet_edgetpu_v2_l(),
'autoseg_edgetpu_backbone_xs':
mobilenet_edgetpu_v2_model_blocks.autoseg_edgetpu_backbone_xs(),
'autoseg_edgetpu_backbone_s':
mobilenet_edgetpu_v2_model_blocks.autoseg_edgetpu_backbone_s(),
'autoseg_edgetpu_backbone_m':
mobilenet_edgetpu_v2_model_blocks.autoseg_edgetpu_backbone_m(),
}
@tf.keras.utils.register_keras_serializable(package='Vision')
class MobilenetEdgeTPUV2(tf.keras.Model):
"""Wrapper class for a MobilenetEdgeTPUV2 Keras model.
Contains helper methods to build, manage, and save metadata about the model.
"""
def __init__(self,
model_config_name: Optional[str] = None,
overrides: Optional[Mapping[str, Any]] = None,
**kwargs):
"""Creates a MobilenetEdgeTPUV2 model.
Args:
model_config_name: (optional) the model parameters to create the model.
overrides: (optional) a dict containing keys that can override config.
**kwargs: All the rest model arguments in a dictionary.
"""
self.model_config_name = model_config_name
self._self_setattr_tracking = False
self.overrides = overrides or {}
if model_config_name is None:
model_config = ModelConfig()
else:
if model_config_name not in MODEL_CONFIGS:
supported_model_list = list(MODEL_CONFIGS.keys())
raise ValueError(f'Unknown model name {model_config_name}. Only support'
f'model configs in {supported_model_list}.')
model_config = MODEL_CONFIGS[model_config_name]
self.config = model_config.replace(**self.overrides)
input_channels = self.config.input_channels
model_name = self.config.model_name
if isinstance(self.config.resolution, tuple):
input_shape = (self.config.resolution[0], self.config.resolution[1],
input_channels)
else:
input_shape = (self.config.resolution, self.config.resolution,
input_channels)
image_input = tf.keras.layers.Input(shape=input_shape)
output = mobilenet_edgetpu_v2_model_blocks.mobilenet_edgetpu_v2(
image_input, self.config)
if not isinstance(output, list):
# Cast to float32 in case we have a different model dtype
output = tf.cast(output, tf.float32)
self._output_specs = output.get_shape()
else:
if self.config.features_as_dict:
# Dict output is required for the decoder ASPP module.
self._output_specs = {
str(i): output[i].get_shape() for i in range(len(output))
}
output = {str(i): output[i] for i in range(len(output))}
else:
# edgetpu/tasks/segmentation assumes features as list.
self._output_specs = [feat.get_shape() for feat in output]
logging.info('Building model %s with params %s',
model_name,
self.config)
super(MobilenetEdgeTPUV2, self).__init__(
inputs=image_input, outputs=output, **kwargs)
self._self_setattr_tracking = True
@classmethod
def from_name(cls,
model_name: str,
model_weights_path: Optional[str] = None,
checkpoint_format: Optional[str] = 'tf_checkpoint',
overrides: Optional[Mapping[str, Any]] = None):
"""Constructs an MobilenetEdgeTPUV2 model from a predefined model name.
E.g., `MobilenetEdgeTPUV2.from_name('mobilenet_edgetpu_v2_s')`.
Args:
model_name: the predefined model name
model_weights_path: the path to the weights (h5 file or saved model dir)
checkpoint_format: the model weights format. One of 'tf_checkpoint' or
'keras_checkpoint'.
overrides: (optional) a dict containing keys that can override config
Returns:
A constructed EfficientNet instance.
"""
overrides = dict(overrides) if overrides else {}
# One can define their own custom models if necessary
MODEL_CONFIGS.update(overrides.pop('model_config', {}))
model = cls(model_config_name=model_name, overrides=overrides)
if model_weights_path:
common_modules.load_weights(model,
model_weights_path,
checkpoint_format=checkpoint_format)
return model
def get_config(self):
config = {'model_config_name': self.model_config_name,
'overrides': self.overrides}
keras_model_config = super().get_config()
return dict(list(config.items()) + list(keras_model_config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(model_config_name=config['model_config_name'],
overrides=config['overrides'])
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
| 6,403 | 37.812121 | 87 | py |
models | models-master/official/projects/edgetpu/vision/modeling/mobilenet_edgetpu_v1_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mobilenet_edgetpu model."""
import os
import tensorflow as tf
from official.legacy.image_classification import preprocessing
from official.projects.edgetpu.vision.modeling import common_modules
from official.projects.edgetpu.vision.modeling import mobilenet_edgetpu_v1_model
from official.projects.edgetpu.vision.modeling import mobilenet_edgetpu_v1_model_blocks
# TODO(b/151324383): Enable once training is supported for mobilenet-edgetpu
EXAMPLE_IMAGE = ('third_party/tensorflow_models/official/vision/'
'image_classification/testdata/panda.jpg')
CKPTS = 'gs://**/efficientnets'
def _copy_recursively(src: str, dst: str) -> None:
"""Recursively copy directory."""
for src_dir, _, src_files in tf.io.gfile.walk(src):
dst_dir = os.path.join(dst, os.path.relpath(src_dir, src))
if not tf.io.gfile.exists(dst_dir):
tf.io.gfile.makedirs(dst_dir)
for src_file in src_files:
tf.io.gfile.copy(
os.path.join(src_dir, src_file),
os.path.join(dst_dir, src_file),
overwrite=True)
class MobilenetEdgeTPUBlocksTest(tf.test.TestCase):
def setUp(self):
super(tf.test.TestCase, self).setUp()
# Ensure no model duplicates
tf.keras.backend.clear_session()
def test_bottleneck_block(self):
"""Test for creating a model with bottleneck block arguments."""
images = tf.zeros((4, 224, 224, 3), dtype=tf.float32)
tf.keras.backend.set_image_data_format('channels_last')
blocks = [
mobilenet_edgetpu_v1_model_blocks.BlockConfig.from_args(
input_filters=3,
output_filters=6,
kernel_size=3,
num_repeat=3,
expand_ratio=6,
strides=(2, 2),
fused_conv=False,
)
]
config = mobilenet_edgetpu_v1_model.ModelConfig.from_args(
blocks=blocks,
num_classes=10,
use_se=False,
)
model = mobilenet_edgetpu_v1_model.MobilenetEdgeTPU(config)
outputs = model(images, training=True)
self.assertEqual((4, 10), outputs.shape)
ref_var_names = set([
'stem_conv2d/kernel:0',
'stem_bn/gamma:0',
'stem_bn/beta:0',
'stack_0/block_0/expand_conv2d/kernel:0',
'stack_0/block_0/expand_bn/gamma:0',
'stack_0/block_0/expand_bn/beta:0',
'stack_0/block_0/depthwise_conv2d/depthwise_kernel:0',
'stack_0/block_0/depthwise_bn/gamma:0',
'stack_0/block_0/depthwise_bn/beta:0',
'stack_0/block_0/project_conv2d/kernel:0',
'stack_0/block_0/project_bn/gamma:0',
'stack_0/block_0/project_bn/beta:0',
'stack_0/block_1/expand_conv2d/kernel:0',
'stack_0/block_1/expand_bn/gamma:0',
'stack_0/block_1/expand_bn/beta:0',
'stack_0/block_1/depthwise_conv2d/depthwise_kernel:0',
'stack_0/block_1/depthwise_bn/gamma:0',
'stack_0/block_1/depthwise_bn/beta:0',
'stack_0/block_1/project_conv2d/kernel:0',
'stack_0/block_1/project_bn/gamma:0',
'stack_0/block_1/project_bn/beta:0',
'stack_0/block_2/expand_conv2d/kernel:0',
'stack_0/block_2/expand_bn/gamma:0',
'stack_0/block_2/expand_bn/beta:0',
'stack_0/block_2/depthwise_conv2d/depthwise_kernel:0',
'stack_0/block_2/depthwise_bn/gamma:0',
'stack_0/block_2/depthwise_bn/beta:0',
'stack_0/block_2/project_conv2d/kernel:0',
'stack_0/block_2/project_bn/gamma:0',
'stack_0/block_2/project_bn/beta:0',
'top_conv2d/kernel:0',
'top_bn/gamma:0',
'top_bn/beta:0',
'logits/kernel:0',
'logits/bias:0'
])
var_names = set([var.name for var in model.trainable_variables])
self.assertEqual(var_names, ref_var_names)
def test_fused_bottleneck_block(self):
"""Test for creating a model with fused bottleneck block arguments."""
images = tf.zeros((4, 224, 224, 3), dtype=tf.float32)
tf.keras.backend.set_image_data_format('channels_last')
blocks = [
mobilenet_edgetpu_v1_model_blocks.BlockConfig.from_args(
input_filters=3,
output_filters=6,
kernel_size=3,
num_repeat=3,
expand_ratio=6,
strides=(2, 2),
fused_conv=True,
)
]
config = mobilenet_edgetpu_v1_model.ModelConfig.from_args(
blocks=blocks,
num_classes=10,
use_se=False,
)
model = mobilenet_edgetpu_v1_model.MobilenetEdgeTPU(config)
outputs = model(images, training=True)
self.assertEqual((4, 10), outputs.shape)
var_names = {var.name for var in model.trainable_variables}
ref_var_names = [
'stack_0/block_0/fused_conv2d/kernel:0',
'stack_0/block_1/fused_conv2d/kernel:0',
'stack_0/block_2/fused_conv2d/kernel:0',
]
for ref_var_name in ref_var_names:
self.assertIn(ref_var_name, var_names)
def test_variables(self):
"""Test for variables in blocks to be included in `model.variables`."""
images = tf.zeros((4, 224, 224, 3), dtype=tf.float32)
tf.keras.backend.set_image_data_format('channels_last')
blocks = [
mobilenet_edgetpu_v1_model_blocks.BlockConfig.from_args(
input_filters=3,
output_filters=6,
kernel_size=3,
num_repeat=3,
expand_ratio=6,
id_skip=False,
strides=(2, 2),
se_ratio=0.8,
fused_conv=False,
)
]
config = mobilenet_edgetpu_v1_model.ModelConfig.from_args(
blocks=blocks,
num_classes=10,
use_se=True,
)
model = mobilenet_edgetpu_v1_model.MobilenetEdgeTPU(config)
_ = model(images, training=True)
var_names = {var.name for var in model.variables}
self.assertIn('stack_0/block_0/depthwise_conv2d/depthwise_kernel:0',
var_names)
class MobilenetEdgeTPUBuildTest(tf.test.TestCase):
def setUp(self):
super(tf.test.TestCase, self).setUp()
# Ensure no model duplicates
tf.keras.backend.clear_session()
def test_create_mobilenet_edgetpu(self):
model = mobilenet_edgetpu_v1_model.MobilenetEdgeTPU()
self.assertEqual(common_modules.count_params(model), 4092713)
class MobilenetEdgeTPUPredictTest(tf.test.TestCase):
def setUp(self):
super(tf.test.TestCase, self).setUp()
# Ensure no model duplicates
tf.keras.backend.clear_session()
def _copy_saved_model_to_local(self, model_ckpt):
# Copy saved model to local first for speed
tmp_path = '/tmp/saved_model'
_copy_recursively(model_ckpt, tmp_path)
return tmp_path
def _test_prediction(self, model_name, image_size):
model = mobilenet_edgetpu_v1_model.MobilenetEdgeTPU.from_name(model_name)
# Predict image filled with zeros
images = tf.zeros((4, image_size, image_size, 3), dtype=tf.float32)
pred = model(images, training=False)
self.assertEqual(pred.shape, (4, 1000))
# Predict image with loaded weights
images = preprocessing.load_eval_image(EXAMPLE_IMAGE, image_size)
images = tf.expand_dims(images, axis=0)
model_ckpt = os.path.join(CKPTS, model_name)
model_ckpt = self._copy_saved_model_to_local(model_ckpt)
model = mobilenet_edgetpu_v1_model.MobilenetEdgeTPU.from_name(
model_name, model_weights_path=model_ckpt)
pred = model(images, training=False)
pred = pred[0].numpy()
pred_idx, pred_prob = pred.argmax(), pred.max()
# 388 is 'giant panda' (see labels_map_file)
self.assertEqual(pred_idx, 388)
self.assertGreater(pred_prob, 0.75)
def test_mobilenet_edgetpu_image_shape(self):
self.skipTest(
'TODO(b/151324383): Enable once training is supported for mobilenet-edgetpu'
)
params = dict(input_channels=5, num_classes=20, rescale_input=False)
model = mobilenet_edgetpu_v1_model.MobilenetEdgeTPU.from_name(
'mobilenet_edgetpu', overrides=params)
images = tf.zeros((6, 100, 38, 5), dtype=tf.float32)
pred = model(images, training=False)
self.assertEqual(pred.shape, (6, 20))
def test_mobilenet_edgetpu_predict(self):
self.skipTest(
'TODO(b/151324383): Enable once training is supported for mobilenet-edgetpu'
)
self._test_prediction('mobilenet_edgetpu', 224)
if __name__ == '__main__':
tf.test.main()
| 8,911 | 32.757576 | 87 | py |
models | models-master/official/projects/edgetpu/vision/modeling/backbones/mobilenet_edgetpu_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for MobileNet."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.projects.edgetpu.vision.modeling.backbones import mobilenet_edgetpu
class TestInputSpec:
def __init__(self, shape):
self.shape = shape
class TestBackboneConfig:
def __init__(self, model_id):
self.model_id = model_id
self.freeze_large_filters = 99
self.pretrained_checkpoint_path = None
self.type = 'mobilenet_edgetpu'
def get(self):
return self
class MobileNetEdgeTPUTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('mobilenet_edgetpu_v2_s', (1, 512, 512, 3)),
('mobilenet_edgetpu_v2_l', (1, None, None, 3)),
('mobilenet_edgetpu', (1, 512, 512, 3)),
('mobilenet_edgetpu_dm1p25', (1, None, None, 3)),
)
def test_mobilenet_creation(self, model_id, input_shape):
"""Test creation of MobileNet family models."""
tf.keras.backend.set_image_data_format('channels_last')
test_model = mobilenet_edgetpu.build_mobilenet_edgetpu(
input_specs=TestInputSpec(input_shape),
backbone_config=TestBackboneConfig(model_id))
self.assertGreater(len(test_model.outputs), 1)
if __name__ == '__main__':
tf.test.main()
| 1,864 | 28.603175 | 81 | py |
models | models-master/official/projects/edgetpu/vision/modeling/backbones/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/edgetpu/vision/modeling/backbones/mobilenet_edgetpu.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of mobilenet_edgetpu_v2 Networks."""
# Import libraries
from absl import logging
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.edgetpu.vision.modeling.mobilenet_edgetpu_v1_model import MobilenetEdgeTPU
from official.projects.edgetpu.vision.modeling.mobilenet_edgetpu_v2_model import MobilenetEdgeTPUV2
from official.vision.modeling.backbones import factory
layers = tf.keras.layers
# MobileNet-EdgeTPU-V2 configs.
MOBILENET_EDGETPU_V2_CONFIGS = frozenset([
'mobilenet_edgetpu_v2_tiny',
'mobilenet_edgetpu_v2_xs',
'mobilenet_edgetpu_v2_s',
'mobilenet_edgetpu_v2_m',
'mobilenet_edgetpu_v2_l',
'autoseg_edgetpu_backbone_xs',
'autoseg_edgetpu_backbone_s',
'autoseg_edgetpu_backbone_m',
])
# MobileNet-EdgeTPU-V1 configs.
MOBILENET_EDGETPU_CONFIGS = frozenset([
'mobilenet_edgetpu',
'mobilenet_edgetpu_dm0p75',
'mobilenet_edgetpu_dm1p25',
'mobilenet_edgetpu_dm1p5',
'mobilenet_edgetpu_dm1p75',
])
def freeze_large_filters(model: tf.keras.Model, threshold: int):
"""Freezes layer with large number of filters."""
for layer in model.layers:
if isinstance(layer.output_shape, tuple):
filter_size = layer.output_shape[-1]
if filter_size >= threshold:
logging.info('Freezing layer: %s', layer.name)
layer.trainable = False
@factory.register_backbone_builder('mobilenet_edgetpu')
def build_mobilenet_edgetpu(input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
**unused_kwargs) -> tf.keras.Model:
"""Builds MobileNetEdgeTpu backbone from a config."""
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert backbone_type == 'mobilenet_edgetpu', (f'Inconsistent backbone type '
f'{backbone_type}')
if backbone_cfg.model_id in MOBILENET_EDGETPU_V2_CONFIGS:
model = MobilenetEdgeTPUV2.from_name(
model_name=backbone_cfg.model_id,
overrides={
'batch_norm': 'tpu',
'rescale_input': False,
'resolution': input_specs.shape[1:3],
'backbone_only': True,
'features_as_dict': True,
'dtype': 'bfloat16'
},
model_weights_path=backbone_cfg.pretrained_checkpoint_path)
if backbone_cfg.freeze_large_filters:
freeze_large_filters(model, backbone_cfg.freeze_large_filters)
return model
elif backbone_cfg.model_id in MOBILENET_EDGETPU_CONFIGS:
model = MobilenetEdgeTPU.from_name(
model_name=backbone_cfg.model_id,
overrides={
'batch_norm': 'tpu',
'rescale_input': False,
'resolution': input_specs.shape[1:3],
'backbone_only': True,
'dtype': 'bfloat16'
},
model_weights_path=backbone_cfg.pretrained_checkpoint_path)
if backbone_cfg.freeze_large_filters:
freeze_large_filters(model, backbone_cfg.freeze_large_filters)
return model
else:
raise ValueError(f'Unsupported model/id type {backbone_cfg.model_id}.')
| 3,755 | 35.823529 | 99 | py |
models | models-master/official/projects/edgetpu/vision/modeling/heads/bifpn_head.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the definitions of Bi-Directional Feature Pyramid Networks (BiFPN)."""
import functools
import itertools
from typing import Text, Optional
# Import libraries
from absl import logging
import numpy as np
import tensorflow as tf
from official.projects.edgetpu.vision.modeling import common_modules
def activation_fn(features: tf.Tensor, act_type: Text):
"""Customized non-linear activation type."""
if act_type in ('silu', 'swish'):
return tf.nn.swish(features)
elif act_type == 'swish_native':
return features * tf.sigmoid(features)
elif act_type == 'hswish':
return features * tf.nn.relu6(features + 3) / 6
elif act_type == 'relu':
return tf.nn.relu(features)
elif act_type == 'relu6':
return tf.nn.relu6(features)
else:
raise ValueError('Unsupported act_type {}'.format(act_type))
def build_batch_norm(is_training_bn: bool,
beta_initializer: Text = 'zeros',
gamma_initializer: Text = 'ones',
data_format: Text = 'channels_last',
momentum: float = 0.99,
epsilon: float = 1e-3,
strategy: Optional[Text] = None,
name: Text = 'tpu_batch_normalization'):
"""Builds a batch normalization layer.
Args:
is_training_bn: `bool` for whether the model is training.
beta_initializer: `str`, beta initializer.
gamma_initializer: `str`, gamma initializer.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
momentum: `float`, momentume of batch norm.
epsilon: `float`, small value for numerical stability.
strategy: `str`, whether to use tpu, gpus or other version of batch norm.
name: the name of the batch normalization layer
Returns:
A normalized `Tensor` with the same `data_format`.
"""
axis = 1 if data_format == 'channels_first' else -1
if is_training_bn:
batch_norm_class = common_modules.get_batch_norm(strategy)
else:
batch_norm_class = tf.keras.layers.BatchNormalization
bn_layer = batch_norm_class(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=True,
scale=True,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
name=name)
return bn_layer
def bifpn_config(min_level, max_level):
"""A dynamic bifpn config that can adapt to different min/max levels."""
p = {}
# Node id starts from the input features and monotonically increase whenever
# a new node is added. Here is an example for level P3 - P7:
# P7 (4) P7" (12)
# P6 (3) P6' (5) P6" (11)
# P5 (2) P5' (6) P5" (10)
# P4 (1) P4' (7) P4" (9)
# P3 (0) P3" (8)
# So output would be like:
# [
# {'feat_level': 6, 'inputs_offsets': [3, 4]}, # for P6'
# {'feat_level': 5, 'inputs_offsets': [2, 5]}, # for P5'
# {'feat_level': 4, 'inputs_offsets': [1, 6]}, # for P4'
# {'feat_level': 3, 'inputs_offsets': [0, 7]}, # for P3"
# {'feat_level': 4, 'inputs_offsets': [1, 7, 8]}, # for P4"
# {'feat_level': 5, 'inputs_offsets': [2, 6, 9]}, # for P5"
# {'feat_level': 6, 'inputs_offsets': [3, 5, 10]}, # for P6"
# {'feat_level': 7, 'inputs_offsets': [4, 11]}, # for P7"
# ]
num_levels = max_level - min_level + 1
node_ids = {min_level + i: [i] for i in range(num_levels)}
level_last_id = lambda level: node_ids[level][-1]
level_all_ids = lambda level: node_ids[level]
id_cnt = itertools.count(num_levels)
p['nodes'] = []
for i in range(max_level - 1, min_level - 1, -1):
# top-down path.
p['nodes'].append({
'feat_level': i,
'inputs_offsets': [level_last_id(i),
level_last_id(i + 1)]
})
node_ids[i].append(next(id_cnt))
for i in range(min_level + 1, max_level + 1):
# bottom-up path.
p['nodes'].append({
'feat_level': i,
'inputs_offsets': level_all_ids(i) + [level_last_id(i - 1)]
})
node_ids[i].append(next(id_cnt))
return p
def get_conv_op(conv_type):
"""Gets convlution op."""
kernel_size = int(conv_type.split('_')[-1])
if conv_type.startswith('sep'):
conv_op = functools.partial(
tf.keras.layers.SeparableConv2D,
depth_multiplier=1,
kernel_size=(kernel_size, kernel_size))
elif conv_type.startswith('conv'):
conv_op = functools.partial(
tf.keras.layers.Conv2D, kernel_size=(kernel_size, kernel_size))
else:
raise ValueError('Unknown conv type: {}'.format(conv_type))
return conv_op
def add_n(nodes):
"""A customized add_n to add up a list of tensors."""
# tf.add_n is not supported by EdgeTPU, while tf.reduce_sum is not supported
# by GPU and runs slow on EdgeTPU because of the 5-dimension op.
with tf.name_scope('add_n'):
new_node = nodes[0]
for n in nodes[1:]:
new_node = new_node + n
return new_node
def resize_nearest_neighbor(data, height_scale, width_scale):
"""Nearest neighbor upsampling implementation."""
with tf.name_scope('nearest_upsampling'):
bs, h, w, c = data.get_shape().as_list()
bs = -1 if bs is None else bs
# Use reshape to quickly upsample the input. The nearest pixel is selected
# implicitly via broadcasting.
data = tf.reshape(data, [bs, h, 1, w, 1, c]) * tf.ones(
[1, 1, height_scale, 1, width_scale, 1], dtype=data.dtype)
return tf.reshape(data, [bs, h * height_scale, w * width_scale, c])
def resize(feat,
target_height,
target_width,
strategy,
training=False,
method='bilinear'):
"""Resizes the spitial dimensions."""
dtype = feat.dtype
feat_shape = feat.get_shape()
if method == 'bilinear':
if strategy == 'tpu' and training:
if dtype == tf.bfloat16:
feat = tf.cast(feat, tf.float32)
feat = tf.image.resize(feat, [target_height, target_width])
feat = tf.cast(feat, dtype)
elif feat_shape.is_fully_defined():
# Batch dimension is known. Mimic resize[h,w] with
# resize[h,1]+resize[1,w] to reduce HBM padding.
b, h, w, c = feat_shape.as_list()
feat = tf.reshape(feat, [b, h, 1, -1])
feat = tf.image.resize(feat, [target_height, 1])
feat = tf.reshape(feat, [-1, 1, w, c])
feat = tf.image.resize(feat, [1, target_width])
feat = tf.reshape(feat, [b, target_height, target_width, c])
else:
feat = tf.image.resize(feat, [target_height, target_width])
else:
feat = tf.image.resize(feat, [target_height, target_width])
elif method == 'nearest':
_, h, w, _ = feat_shape.as_list()
if training and target_height % h == 0 and target_width % w == 0:
feat = resize_nearest_neighbor(feat, target_height // h,
target_width // w)
else:
feat = tf.cast(feat, tf.float32)
feat = tf.image.resize(feat, [target_height, target_width],
tf.image.ResizeMethod.NEAREST_NEIGHBOR)
else:
raise ValueError('Upsampling type {} is not supported.'.format(method))
return tf.cast(feat, dtype)
class ResampleFeatureMap(tf.keras.layers.Layer):
"""Resamples feature map for downsampling or upsampling."""
def __init__(self,
feat_level,
target_num_channels,
apply_bn=False,
is_training_bn=None,
conv_after_downsample=False,
strategy=None,
data_format=None,
pooling_type=None,
upsampling_type=None,
name='resample_p0'):
super().__init__(name=name)
self.apply_bn = apply_bn
self.is_training_bn = is_training_bn
self.data_format = data_format
self.target_num_channels = target_num_channels
self.feat_level = feat_level
self.strategy = strategy
self.conv_after_downsample = conv_after_downsample
self.pooling_type = pooling_type or 'max'
self.upsampling_type = upsampling_type or 'nearest'
def _pool2d(self, inputs, height, width, target_height, target_width):
"""Pools the inputs to target height and width."""
height_stride_size = int((height - 1) // target_height + 1)
width_stride_size = int((width - 1) // target_width + 1)
if self.pooling_type == 'max':
return tf.keras.layers.MaxPooling2D(
pool_size=[height_stride_size + 1, width_stride_size + 1],
strides=[height_stride_size, width_stride_size],
padding='SAME',
data_format=self.data_format)(
inputs)
if self.pooling_type == 'avg':
return tf.keras.layers.AveragePooling2D(
pool_size=[height_stride_size + 1, width_stride_size + 1],
strides=[height_stride_size, width_stride_size],
padding='SAME',
data_format=self.data_format)(
inputs)
raise ValueError('Unsupported pooling type {}.'.format(self.pooling_type))
def _upsample2d(self, inputs, target_height, target_width, training):
return resize(inputs, target_height, target_width, self.strategy, training,
self.upsampling_type)
def _maybe_apply_1x1(self, feat, training, num_channels):
"""Applies 1x1 conv to change layer width if necessary."""
target_num_channels = self.target_num_channels
if target_num_channels is None or num_channels != target_num_channels:
feat = self.conv2d(feat)
if self.apply_bn:
feat = self.bn(feat, training=training)
return feat
def build(self, feat_shape):
num_channels = self.target_num_channels or feat_shape[-1]
self.conv2d = tf.keras.layers.Conv2D(
num_channels, (1, 1),
padding='same',
data_format=self.data_format,
name='conv2d')
self.bn = build_batch_norm(
is_training_bn=self.is_training_bn,
data_format=self.data_format,
strategy=self.strategy,
name='bn')
self.built = True
super().build(feat_shape)
def call(self, feat, training, all_feats):
hwc_idx = (2, 3, 1) if self.data_format == 'channels_first' else (1, 2, 3)
height, width, num_channels = [feat.shape.as_list()[i] for i in hwc_idx]
if all_feats:
target_feat_shape = all_feats[self.feat_level].shape.as_list()
target_height, target_width, _ = [target_feat_shape[i] for i in hwc_idx]
else:
# Default to downsampling if all_feats is empty.
target_height, target_width = (height + 1) // 2, (width + 1) // 2
# If conv_after_downsample is True, when downsampling, apply 1x1 after
# downsampling for efficiency.
if height > target_height and width > target_width:
if not self.conv_after_downsample:
feat = self._maybe_apply_1x1(feat, training, num_channels)
feat = self._pool2d(feat, height, width, target_height, target_width)
if self.conv_after_downsample:
feat = self._maybe_apply_1x1(feat, training, num_channels)
elif height <= target_height and width <= target_width:
feat = self._maybe_apply_1x1(feat, training, num_channels)
if height < target_height or width < target_width:
feat = self._upsample2d(feat, target_height, target_width, training)
else:
raise ValueError(
'Incompatible Resampling : feat shape {}x{} target_shape: {}x{}'
.format(height, width, target_height, target_width))
return feat
class FNode(tf.keras.layers.Layer):
"""A Keras Layer implementing BiFPN Node."""
def __init__(self,
feat_level,
inputs_offsets,
fpn_num_filters,
apply_bn_for_resampling,
is_training_bn,
conv_after_downsample,
conv_bn_act_pattern,
conv_type,
act_type,
strategy,
weight_method,
data_format,
pooling_type,
upsampling_type,
name='fnode'):
super().__init__(name=name)
self.feat_level = feat_level
self.inputs_offsets = inputs_offsets
self.fpn_num_filters = fpn_num_filters
self.apply_bn_for_resampling = apply_bn_for_resampling
self.conv_type = conv_type
self.act_type = act_type
self.is_training_bn = is_training_bn
self.conv_after_downsample = conv_after_downsample
self.strategy = strategy
self.data_format = data_format
self.weight_method = weight_method
self.conv_bn_act_pattern = conv_bn_act_pattern
self.pooling_type = pooling_type
self.upsampling_type = upsampling_type
self.resample_layers = []
self.vars = []
def fuse_features(self, nodes):
"""Fuses features from different resolutions and return a weighted sum.
Args:
nodes: a list of tensorflow features at different levels
Returns:
A tensor denoting the fused feature.
"""
dtype = nodes[0].dtype
if self.weight_method == 'attn':
edge_weights = [tf.cast(var, dtype=dtype) for var in self.vars]
normalized_weights = tf.nn.softmax(tf.stack(edge_weights))
nodes = tf.stack(nodes, axis=-1)
new_node = tf.reduce_sum(nodes * normalized_weights, -1)
elif self.weight_method == 'fastattn':
edge_weights = [
tf.nn.relu(tf.cast(var, dtype=dtype)) for var in self.vars
]
weights_sum = add_n(edge_weights)
nodes = [
nodes[i] * edge_weights[i] / (weights_sum + 0.0001)
for i in range(len(nodes))
]
new_node = add_n(nodes)
elif self.weight_method == 'channel_attn':
edge_weights = [tf.cast(var, dtype=dtype) for var in self.vars]
normalized_weights = tf.nn.softmax(tf.stack(edge_weights, -1), axis=-1)
nodes = tf.stack(nodes, axis=-1)
new_node = tf.reduce_sum(nodes * normalized_weights, -1)
elif self.weight_method == 'channel_fastattn':
edge_weights = [
tf.nn.relu(tf.cast(var, dtype=dtype)) for var in self.vars
]
weights_sum = add_n(edge_weights)
nodes = [
nodes[i] * edge_weights[i] / (weights_sum + 0.0001)
for i in range(len(nodes))
]
new_node = add_n(nodes)
elif self.weight_method == 'sum':
new_node = add_n(nodes)
else:
raise ValueError('unknown weight_method %s' % self.weight_method)
return new_node
def _add_wsm(self, initializer, shape=None):
for i, _ in enumerate(self.inputs_offsets):
name = 'WSM' + ('' if i == 0 else '_' + str(i))
self.vars.append(
self.add_weight(initializer=initializer, name=name, shape=shape))
def build(self, feats_shape):
for i, input_offset in enumerate(self.inputs_offsets):
name = 'resample_{}_{}_{}'.format(i, input_offset, len(feats_shape))
self.resample_layers.append(
ResampleFeatureMap(
self.feat_level,
self.fpn_num_filters,
self.apply_bn_for_resampling,
self.is_training_bn,
self.conv_after_downsample,
strategy=self.strategy,
data_format=self.data_format,
pooling_type=self.pooling_type,
upsampling_type=self.upsampling_type,
name=name))
if self.weight_method == 'attn':
self._add_wsm('ones')
elif self.weight_method == 'fastattn':
self._add_wsm('ones')
elif self.weight_method == 'channel_attn':
num_filters = int(self.fpn_num_filters)
self._add_wsm(tf.ones, num_filters)
elif self.weight_method == 'channel_fastattn':
num_filters = int(self.fpn_num_filters)
self._add_wsm(tf.ones, num_filters)
self.op_after_combine = OpAfterCombine(
self.is_training_bn,
self.conv_bn_act_pattern,
self.conv_type,
self.fpn_num_filters,
self.act_type,
self.data_format,
self.strategy,
name='op_after_combine{}'.format(len(feats_shape)))
self.built = True
super().build(feats_shape)
def call(self, feats, training):
nodes = []
for i, input_offset in enumerate(self.inputs_offsets):
input_node = feats[input_offset]
input_node = self.resample_layers[i](input_node, training, feats)
nodes.append(input_node)
new_node = self.fuse_features(nodes)
new_node = self.op_after_combine(new_node)
return feats + [new_node]
class OpAfterCombine(tf.keras.layers.Layer):
"""Operation after combining input features during feature fusiong."""
def __init__(self,
is_training_bn,
conv_bn_act_pattern,
conv_type,
fpn_num_filters,
act_type,
data_format,
strategy,
name='op_after_combine'):
super().__init__(name=name)
self.conv_bn_act_pattern = conv_bn_act_pattern
self.fpn_num_filters = fpn_num_filters
self.act_type = act_type
self.data_format = data_format
self.strategy = strategy
self.is_training_bn = is_training_bn
self.conv_op = get_conv_op(conv_type)(
filters=fpn_num_filters,
padding='same',
use_bias=not self.conv_bn_act_pattern,
data_format=self.data_format,
name='conv')
self.bn = build_batch_norm(
is_training_bn=self.is_training_bn,
data_format=self.data_format,
strategy=self.strategy,
name='bn')
def call(self, new_node, training):
if not self.conv_bn_act_pattern:
new_node = activation_fn(new_node, self.act_type)
new_node = self.conv_op(new_node)
new_node = self.bn(new_node, training=training)
if self.conv_bn_act_pattern:
new_node = activation_fn(new_node, self.act_type)
return new_node
class FPNCells(tf.keras.layers.Layer):
"""FPN cells."""
def __init__(self,
min_level=3,
max_level=8,
fpn_num_filters=96,
apply_bn_for_resampling=True,
is_training_bn=True,
conv_after_downsample=True,
conv_bn_act_pattern=True,
conv_type='sep_3',
act_type='swish',
strategy='tpu',
fpn_weight_method='sum',
data_format='channels_last',
pooling_type='avg',
upsampling_type='bilinear',
fpn_name='bifpn',
fpn_cell_repeats=4,
**kwargs):
super(FPNCells, self).__init__(**kwargs)
self.min_level = min_level
self.max_level = max_level
if fpn_name != 'bifpn':
raise ValueError('Only bifpn config is supported.')
self.fpn_config = bifpn_config(min_level, max_level)
self.cells = [
FPNCell( # pylint: disable=g-complex-comprehension
min_level=min_level,
max_level=max_level,
fpn_num_filters=fpn_num_filters,
apply_bn_for_resampling=apply_bn_for_resampling,
is_training_bn=is_training_bn,
conv_after_downsample=conv_after_downsample,
conv_bn_act_pattern=conv_bn_act_pattern,
conv_type=conv_type,
act_type=act_type,
strategy=strategy,
fpn_weight_method=fpn_weight_method,
data_format=data_format,
pooling_type=pooling_type,
upsampling_type=upsampling_type,
fpn_name=fpn_name,
name='cell_%d' % rep) for rep in range(fpn_cell_repeats)
]
def call(self, feats, training):
"""Model call function."""
for cell in self.cells:
cell_feats = cell(feats, training)
min_level = self.min_level
max_level = self.max_level
feats = []
for level in range(min_level, max_level + 1):
for i, fnode in enumerate(reversed(self.fpn_config['nodes'])):
if fnode['feat_level'] == level:
feats.append(cell_feats[-1 - i])
break
return feats
class FPNCell(tf.keras.layers.Layer):
"""A single FPN cell."""
def __init__(self,
min_level=3,
max_level=7,
fpn_num_filters=80,
apply_bn_for_resampling=True,
is_training_bn=True,
conv_after_downsample=True,
conv_bn_act_pattern=True,
conv_type='sep_3',
act_type='swish',
strategy='tpu',
fpn_weight_method='sum',
data_format='channels_last',
pooling_type='avg',
upsampling_type='bilinear',
fpn_name='bifpn',
name='fpn_cell',
**kwargs):
super(FPNCell, self).__init__(**kwargs)
if fpn_name != 'bifpn':
raise ValueError('Only bifpn config is supported')
self.fpn_config = bifpn_config(min_level, max_level)
self.fnodes = []
for i, fnode_cfg in enumerate(self.fpn_config['nodes']):
logging.info('fnode %d : %s', i, fnode_cfg)
fnode = FNode(
fnode_cfg['feat_level'] - min_level,
fnode_cfg['inputs_offsets'],
fpn_num_filters=fpn_num_filters,
apply_bn_for_resampling=apply_bn_for_resampling,
is_training_bn=is_training_bn,
conv_after_downsample=conv_after_downsample,
conv_bn_act_pattern=conv_bn_act_pattern,
conv_type=conv_type,
act_type=act_type,
strategy=strategy,
weight_method=fpn_weight_method,
data_format=data_format,
pooling_type=pooling_type,
upsampling_type=upsampling_type,
name='fnode%d' % i)
self.fnodes.append(fnode)
def call(self, feats, training):
def _call(feats):
for fnode in self.fnodes:
feats = fnode(feats, training)
return feats
return _call(feats)
class SegClassNet(tf.keras.layers.Layer):
"""Segmentation class prediction network."""
def __init__(self,
min_level=3,
max_level=7,
output_filters=256,
apply_bn_for_resampling=True,
is_training_bn=True,
conv_after_downsample=True,
conv_bn_act_pattern=True,
head_conv_type='sep_3',
act_type='swish',
strategy='tpu',
output_weight_method='attn',
data_format='channels_last',
pooling_type='avg',
upsampling_type='bilinear',
fullres_output=False,
fullres_skip_connections=False,
num_classes=32,
name='seg_class_net'):
"""Initialize the SegClassNet.
Args:
min_level: minimum feature level to use in the head.
max_level: maximum feature level to use in the head.
output_filters: output filter size.
apply_bn_for_resampling:
whether to apply batch normalization for resampling.
is_training_bn: is training mode.
conv_after_downsample: whether to apply conv after downsample.
conv_bn_act_pattern: conv batch norm activation pattern.
head_conv_type: head convolution type.
act_type: activation type.
strategy: device strategy, eg. tpu.
output_weight_method: output weight method.
data_format: data format.
pooling_type: pooling type.
upsampling_type: upsamplihng type.
fullres_output: full resolution output.
fullres_skip_connections: full resolution skip connection.
num_classes: number of classes.
name: the name of this layer.
"""
super().__init__(name=name)
conv2d_layer = get_conv_op(head_conv_type)
self.min_level = min_level
self.max_level = max_level
self.fullres_output = fullres_output
self.fullres_skip_connections = fullres_skip_connections
self.fnode = FNode(
0, # Always use the first level with highest resolution.
list(range(max_level - min_level + 1)),
output_filters,
apply_bn_for_resampling,
is_training_bn,
conv_after_downsample,
conv_bn_act_pattern,
head_conv_type,
act_type,
strategy,
output_weight_method,
data_format,
pooling_type,
upsampling_type,
name='seg_class_fusion')
if fullres_output:
self.fullres_conv_transpose = {}
self.fullres_conv = {}
for i in reversed(range(min_level)):
num_filters = min(num_classes * 2**(i + 1),
output_filters)
self.fullres_conv[str(i)] = conv2d_layer(
filters=num_filters,
data_format=data_format,
kernel_size=3,
strides=1,
padding='same',
activation=act_type,
name='fullres_conv_%d' % i)
self.fullres_conv_transpose[str(i)] = tf.keras.layers.Conv2DTranspose(
filters=num_filters,
data_format=data_format,
kernel_size=3,
strides=2,
padding='same',
activation=act_type,
name='fullres_conv_transpose_%d' % i)
self.classes = conv2d_layer(
num_classes,
bias_initializer=tf.constant_initializer(-np.log((1 - 0.01) / 0.01)),
padding='same',
name='seg-class-predict')
def call(self, inputs, backbone_feats, training):
"""Call SegClassNet."""
seg_output = self.fnode(inputs, training)
net = seg_output[-1]
if self.fullres_output:
for i in reversed(range(self.min_level)):
if self.fullres_skip_connections:
net = tf.keras.layers.Concatenate()([net, backbone_feats[i + 1]])
net = self.fullres_conv[str(i)](net)
net = self.fullres_conv_transpose[str(i)](net)
class_outputs = self.classes(net)
return class_outputs
| 26,355 | 34.809783 | 82 | py |
models | models-master/official/projects/edgetpu/vision/modeling/heads/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/edgetpu/vision/tasks/semantic_segmentation_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for semantic segmentation task."""
# pylint: disable=unused-import
from absl.testing import parameterized
import orbit
import tensorflow as tf
from official import vision
from official.core import exp_factory
from official.modeling import optimization
from official.projects.edgetpu.vision.configs import semantic_segmentation_config as seg_cfg
from official.projects.edgetpu.vision.configs import semantic_segmentation_searched_config as autoseg_cfg
from official.projects.edgetpu.vision.tasks import semantic_segmentation as img_seg_task
# Dummy ADE20K TF dataset.
def dummy_ade20k_dataset(image_width, image_height):
def dummy_data(_):
dummy_image = tf.zeros((1, image_width, image_height, 3), dtype=tf.float32)
dummy_masks = tf.zeros((1, image_width, image_height, 1), dtype=tf.float32)
dummy_valid_masks = tf.cast(dummy_masks, dtype=tf.bool)
dummy_image_info = tf.zeros((1, 4, 2), dtype=tf.float32)
return (dummy_image, {
'masks': dummy_masks,
'valid_masks': dummy_valid_masks,
'image_info': dummy_image_info,
})
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
class SemanticSegmentationTaskTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(('deeplabv3plus_mobilenet_edgetpuv2_xs_ade20k_32',),
('deeplabv3plus_mobilenet_edgetpuv2_s_ade20k_32',),
('deeplabv3plus_mobilenet_edgetpuv2_m_ade20k_32',))
def test_task(self, config_name):
config_to_backbone_mapping = {
'deeplabv3plus_mobilenet_edgetpuv2_xs_ade20k_32':
'mobilenet_edgetpu_v2_xs',
'deeplabv3plus_mobilenet_edgetpuv2_s_ade20k_32':
'mobilenet_edgetpu_v2_s',
'deeplabv3plus_mobilenet_edgetpuv2_m_ade20k_32':
'mobilenet_edgetpu_v2_m',
}
config = seg_cfg.seg_deeplabv3plus_ade20k_32(
config_to_backbone_mapping[config_name], init_backbone=False)
config.task.train_data.global_batch_size = 1
config.task.train_data.shuffle_buffer_size = 2
config.task.validation_data.shuffle_buffer_size = 2
config.task.validation_data.global_batch_size = 1
config.task.train_data.output_size = [32, 32]
config.task.validation_data.output_size = [32, 32]
config.task.model.decoder.aspp.pool_kernel_size = None
config.task.model.backbone.dilated_resnet.model_id = 50
config.task.model.backbone.dilated_resnet.output_stride = 16
task = img_seg_task.CustomSemanticSegmentationTask(config.task)
model = task.build_model()
metrics = task.build_metrics()
dataset = dummy_ade20k_dataset(32, 32)
iterator = iter(dataset)
opt_factory = optimization.OptimizerFactory(config.trainer.optimizer_config)
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
logs = task.train_step(next(iterator), model, optimizer, metrics=metrics)
self.assertIn('loss', logs)
logs = task.validation_step(next(iterator), model,
metrics=task.build_metrics(training=False))
self.assertIn('loss', logs)
class AutosegEdgeTPUTaskTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('autoseg_edgetpu_xs',), ('autoseg_edgetpu_s',), ('autoseg_edgetpu_m',)
)
def test_task(self, config_name):
config_to_backbone_mapping = {
'autoseg_edgetpu_xs': 'autoseg_edgetpu_backbone_xs',
'autoseg_edgetpu_s': 'autoseg_edgetpu_backbone_s',
'autoseg_edgetpu_m': 'autoseg_edgetpu_backbone_m',
}
config = autoseg_cfg.autoseg_edgetpu_experiment_config(
config_to_backbone_mapping[config_name], init_backbone=False)
config.task.train_data.global_batch_size = 1
config.task.train_data.shuffle_buffer_size = 2
config.task.validation_data.shuffle_buffer_size = 2
config.task.validation_data.global_batch_size = 1
config.task.train_data.output_size = [512, 512]
config.task.validation_data.output_size = [512, 512]
task = img_seg_task.AutosegEdgeTPUTask(config.task)
model = task.build_model()
metrics = task.build_metrics()
dataset = dummy_ade20k_dataset(512, 512)
iterator = iter(dataset)
opt_factory = optimization.OptimizerFactory(config.trainer.optimizer_config)
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
if isinstance(optimizer, optimization.ExponentialMovingAverage
) and not optimizer.has_shadow_copy:
optimizer.shadow_copy(model)
logs = task.train_step(next(iterator), model, optimizer, metrics=metrics)
self.assertIn('loss', logs)
logs = task.validation_step(
next(iterator), model, metrics=task.build_metrics(training=False))
self.assertIn('loss', logs)
model.summary()
if __name__ == '__main__':
tf.test.main()
| 5,536 | 40.320896 | 105 | py |
models | models-master/official/projects/edgetpu/vision/tasks/semantic_segmentation.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image segmentation task definition."""
from typing import Any, Mapping, Optional
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import config_definitions as cfg
from official.core import task_factory
from official.projects.edgetpu.vision.configs import semantic_segmentation_config as exp_cfg
from official.projects.edgetpu.vision.configs import semantic_segmentation_searched_config as searched_cfg
from official.projects.edgetpu.vision.modeling import mobilenet_edgetpu_v1_model
from official.projects.edgetpu.vision.modeling import mobilenet_edgetpu_v2_model
from official.projects.edgetpu.vision.modeling.backbones import mobilenet_edgetpu # pylint: disable=unused-import
from official.projects.edgetpu.vision.modeling.heads import bifpn_head
from official.vision.dataloaders import input_reader_factory
from official.vision.dataloaders import segmentation_input
from official.vision.dataloaders import tfds_factory
from official.vision.ops import preprocess_ops
from official.vision.tasks import semantic_segmentation
class ClassMappingParser(segmentation_input.Parser):
"""Same parser but maps classes max_class+1... to class 0."""
max_class = 31
def _prepare_image_and_label(self, data):
"""Prepare normalized image and label."""
image = tf.io.decode_image(data['image/encoded'], channels=3)
label = tf.io.decode_image(data['image/segmentation/class/encoded'],
channels=1)
height = data['image/height']
width = data['image/width']
image = tf.reshape(image, (height, width, 3))
label = tf.reshape(label, (1, height, width))
label = tf.where(
tf.math.greater(label, self.max_class), tf.zeros_like(label), label)
label = tf.where(tf.math.equal(label, 0), tf.ones_like(label)*255, label)
label = tf.cast(label, tf.float32)
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(
image, offset=[0.5, 0.5, 0.5], scale=[0.5, 0.5, 0.5])
return image, label
@task_factory.register_task_cls(exp_cfg.CustomSemanticSegmentationTaskConfig)
class CustomSemanticSegmentationTask(
semantic_segmentation.SemanticSegmentationTask):
"""A task for semantic segmentation."""
def build_inputs(self,
params: cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None):
"""Builds classification input."""
ignore_label = self.task_config.losses.ignore_label
if params.tfds_name:
decoder = tfds_factory.get_segmentation_decoder(params.tfds_name)
else:
decoder = segmentation_input.Decoder()
parser = ClassMappingParser(
output_size=params.output_size,
crop_size=params.crop_size,
ignore_label=ignore_label,
resize_eval_groundtruth=params.resize_eval_groundtruth,
groundtruth_padded_size=params.groundtruth_padded_size,
aug_scale_min=params.aug_scale_min,
aug_scale_max=params.aug_scale_max,
aug_rand_hflip=params.aug_rand_hflip,
dtype=params.dtype)
parser.max_class = self.task_config.model.num_classes-1
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
class AutosegEdgeTPU(tf.keras.Model):
"""Segmentation keras network without pre/post-processing."""
def __init__(self,
model_params,
min_level=3,
max_level=8,
output_filters=96,
model_config=None,
use_original_backbone_features=False,
is_training_bn=True,
strategy='tpu',
data_format='channels_last',
pooling_type='avg',
fpn_num_filters=96,
apply_bn_for_resampling=True,
conv_after_downsample=True,
upsampling_type='bilinear',
conv_bn_act_pattern=True,
conv_type='sep_3',
head_conv_type='sep_3',
act_type='relu6',
fpn_weight_method='sum',
output_weight_method='sum',
fullres_output=False,
num_classes=32,
name='autoseg_edgetpu'):
"""Initialize model."""
super().__init__()
self.min_level = min_level
self.max_level = max_level
self.use_original_backbone_features = use_original_backbone_features
self.strategy = strategy
self.data_format = data_format
model_name = model_params['model_name']
self.backbone = get_models()[model_name](**model_params)
# Feature network.
self.resample_layers = [] # additional resampling layers.
if use_original_backbone_features:
start_level = 6
else:
# Not using original backbone features will (1) Use convolutions to
# process all backbone features before feeding into FPN. (2) Use an extra
# convolution to get higher level features, while preserve the channel
# size from the last layer of backbone.
start_level = min_level
self.downsample_layers = []
for level in range(start_level, max_level + 1):
self.downsample_layers.append(
bifpn_head.ResampleFeatureMap(
feat_level=(level - min_level),
target_num_channels=fpn_num_filters,
is_training_bn=is_training_bn,
strategy=strategy,
data_format=data_format,
pooling_type=pooling_type,
name='downsample_p%d' % level,
))
for level in range(start_level, max_level + 1):
# Adds a coarser level by downsampling the last feature map.
self.resample_layers.append(
bifpn_head.ResampleFeatureMap(
feat_level=(level - min_level),
target_num_channels=fpn_num_filters,
apply_bn=apply_bn_for_resampling,
is_training_bn=is_training_bn,
conv_after_downsample=conv_after_downsample,
strategy=strategy,
data_format=data_format,
pooling_type=pooling_type,
upsampling_type=upsampling_type,
name='resample_p%d' % level,
))
self.fpn_cells = bifpn_head.FPNCells(
min_level=min_level,
max_level=max_level,
fpn_num_filters=fpn_num_filters,
apply_bn_for_resampling=apply_bn_for_resampling,
is_training_bn=is_training_bn,
conv_after_downsample=conv_after_downsample,
conv_bn_act_pattern=conv_bn_act_pattern,
conv_type=conv_type,
act_type=act_type,
strategy=strategy,
fpn_weight_method=fpn_weight_method,
data_format=data_format,
pooling_type=pooling_type,
upsampling_type=upsampling_type,
fpn_name='bifpn')
self.seg_class_net = bifpn_head.SegClassNet(
min_level=min_level,
max_level=max_level,
output_filters=output_filters,
apply_bn_for_resampling=apply_bn_for_resampling,
is_training_bn=is_training_bn,
conv_after_downsample=conv_after_downsample,
conv_bn_act_pattern=conv_bn_act_pattern,
head_conv_type=head_conv_type,
act_type=act_type,
strategy=strategy,
output_weight_method=output_weight_method,
data_format=data_format,
pooling_type=pooling_type,
upsampling_type=upsampling_type,
fullres_output=fullres_output,
num_classes=num_classes)
def call(self, inputs, training): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
# call backbone network.
all_feats = self.backbone(inputs, training=training)
if self.use_original_backbone_features:
feats = all_feats[self.min_level:self.max_level + 1]
for resample_layer in self.resample_layers:
feats.append(resample_layer(feats[-1], training, None))
else:
feats = []
for downsample_layer in self.downsample_layers:
all_feats.append(downsample_layer(all_feats[-1], training, None))
for level in range(self.min_level - 1, self.max_level):
feats.append(self.resample_layers[level - self.min_level + 1](
all_feats[level], training, all_feats[self.min_level - 1:]))
# call feature network.
feats = self.fpn_cells(feats, training)
# call class/box output network.
class_outputs = self.seg_class_net(feats, all_feats, training)
return class_outputs
def get_models() -> Mapping[str, tf.keras.Model]:
"""Returns the mapping from model type name to Keras model."""
model_mapping = {}
def add_models(name: str, constructor: Any):
if name in model_mapping:
raise ValueError(f'Model {name} already exists in the mapping.')
model_mapping[name] = constructor
for model in mobilenet_edgetpu_v1_model.MODEL_CONFIGS.keys():
add_models(model, mobilenet_edgetpu_v1_model.MobilenetEdgeTPU.from_name)
for model in mobilenet_edgetpu_v2_model.MODEL_CONFIGS.keys():
add_models(model, mobilenet_edgetpu_v2_model.MobilenetEdgeTPUV2.from_name)
return model_mapping
@task_factory.register_task_cls(searched_cfg.AutosegEdgeTPUTaskConfig)
class AutosegEdgeTPUTask(semantic_segmentation.SemanticSegmentationTask):
"""A task for training the AutosegEdgeTPU models."""
def build_model(self):
"""Builds model for training task."""
model_config = self.task_config.model
model_params = model_config.model_params.as_dict()
model = AutosegEdgeTPU(
model_params,
min_level=model_config.head.min_level,
max_level=model_config.head.max_level,
fpn_num_filters=model_config.head.fpn_num_filters,
num_classes=model_config.num_classes)
logging.info(model_params)
return model
# TODO(suyoggupta): Dedup this function across tasks.
def build_inputs(self,
params: cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None):
"""Builds inputs for the segmentation task."""
ignore_label = self.task_config.losses.ignore_label
if params.tfds_name:
decoder = tfds_factory.get_segmentation_decoder(params.tfds_name)
else:
decoder = segmentation_input.Decoder()
parser = ClassMappingParser(
output_size=params.output_size,
crop_size=params.crop_size,
ignore_label=ignore_label,
resize_eval_groundtruth=params.resize_eval_groundtruth,
groundtruth_padded_size=params.groundtruth_padded_size,
aug_scale_min=params.aug_scale_min,
aug_scale_max=params.aug_scale_max,
aug_rand_hflip=params.aug_rand_hflip,
dtype=params.dtype)
parser.max_class = self.task_config.model.num_classes - 1
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
| 11,868 | 38.171617 | 114 | py |
models | models-master/official/projects/edgetpu/vision/tasks/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/edgetpu/vision/tasks/image_classification_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for image classification task."""
# pylint: disable=unused-import
from absl.testing import parameterized
import orbit
import tensorflow as tf
from official.core import exp_factory
from official.modeling import optimization
from official.projects.edgetpu.vision.configs import mobilenet_edgetpu_config
from official.projects.edgetpu.vision.tasks import image_classification
from official.vision import registry_imports
# Dummy ImageNet TF dataset.
def dummy_imagenet_dataset():
def dummy_data(_):
dummy_image = tf.zeros((2, 224, 224, 3), dtype=tf.float32)
dummy_label = tf.zeros((2), dtype=tf.int32)
return (dummy_image, dummy_label)
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
class ImageClassificationTaskTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(('mobilenet_edgetpu_v2_xs'),
('mobilenet_edgetpu_v2_s'),
('mobilenet_edgetpu_v2_m'),
('mobilenet_edgetpu_v2_l'),
('mobilenet_edgetpu'),
('mobilenet_edgetpu_dm0p75'),
('mobilenet_edgetpu_dm1p25'),
('mobilenet_edgetpu_dm1p5'),
('mobilenet_edgetpu_dm1p75'))
def test_task(self, config_name):
config = exp_factory.get_exp_config(config_name)
config.task.train_data.global_batch_size = 2
task = image_classification.EdgeTPUTask(config.task)
model = task.build_model()
metrics = task.build_metrics()
dataset = dummy_imagenet_dataset()
iterator = iter(dataset)
opt_factory = optimization.OptimizerFactory(config.trainer.optimizer_config)
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
if isinstance(optimizer, optimization.ExponentialMovingAverage
) and not optimizer.has_shadow_copy:
optimizer.shadow_copy(model)
logs = task.train_step(next(iterator), model, optimizer, metrics=metrics)
for metric in metrics:
logs[metric.name] = metric.result()
self.assertIn('loss', logs)
self.assertIn('accuracy', logs)
self.assertIn('top_5_accuracy', logs)
logs = task.validation_step(next(iterator), model, metrics=metrics)
for metric in metrics:
logs[metric.name] = metric.result()
self.assertIn('loss', logs)
self.assertIn('accuracy', logs)
self.assertIn('top_5_accuracy', logs)
if __name__ == '__main__':
tf.test.main()
| 3,235 | 36.627907 | 80 | py |
models | models-master/official/projects/edgetpu/vision/tasks/image_classification.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image classification task definition."""
import os
import tempfile
from typing import Any, List, Mapping, Optional, Tuple
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import base_task
from official.core import task_factory
from official.modeling import tf_utils
from official.projects.edgetpu.vision.configs import mobilenet_edgetpu_config as edgetpu_cfg
from official.projects.edgetpu.vision.dataloaders import classification_input
from official.projects.edgetpu.vision.modeling import mobilenet_edgetpu_v1_model
from official.projects.edgetpu.vision.modeling import mobilenet_edgetpu_v2_model
from official.vision.configs import image_classification as base_cfg
from official.vision.dataloaders import input_reader_factory
def _copy_recursively(src: str, dst: str) -> None:
"""Recursively copy directory."""
for src_dir, _, src_files in tf.io.gfile.walk(src):
dst_dir = os.path.join(dst, os.path.relpath(src_dir, src))
if not tf.io.gfile.exists(dst_dir):
tf.io.gfile.makedirs(dst_dir)
for src_file in src_files:
tf.io.gfile.copy(
os.path.join(src_dir, src_file),
os.path.join(dst_dir, src_file),
overwrite=True)
def get_models() -> Mapping[str, tf.keras.Model]:
"""Returns the mapping from model type name to Keras model."""
model_mapping = {}
def add_models(name: str, constructor: Any):
if name in model_mapping:
raise ValueError(f'Model {name} already exists in the mapping.')
model_mapping[name] = constructor
for model in mobilenet_edgetpu_v1_model.MODEL_CONFIGS.keys():
add_models(model, mobilenet_edgetpu_v1_model.MobilenetEdgeTPU.from_name)
for model in mobilenet_edgetpu_v2_model.MODEL_CONFIGS.keys():
add_models(model, mobilenet_edgetpu_v2_model.MobilenetEdgeTPUV2.from_name)
return model_mapping
def load_searched_model(saved_model_path: str) -> tf.keras.Model:
"""Loads saved model from file.
Excepting loading MobileNet-EdgeTPU-V1/V2 models, we can also load searched
model directly from saved model path by changing the model path in
mobilenet_edgetpu_search (defined in mobilenet_edgetpu_config.py)
Args:
saved_model_path: Directory path for the saved searched model.
Returns:
Loaded keras model.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
if tf.io.gfile.isdir(saved_model_path):
_copy_recursively(saved_model_path, tmp_dir)
load_path = tmp_dir
else:
raise ValueError('Saved model path is invalid.')
load_options = tf.saved_model.LoadOptions(
experimental_io_device='/job:localhost')
model = tf.keras.models.load_model(load_path, options=load_options)
return model
@task_factory.register_task_cls(edgetpu_cfg.MobilenetEdgeTPUTaskConfig)
class EdgeTPUTask(base_task.Task):
"""A task for training MobileNet-EdgeTPU models."""
def build_model(self):
"""Builds model for MobileNet-EdgeTPU Task."""
model_config = self.task_config.model
model_params = model_config.model_params.as_dict()
model_name = model_params['model_name']
registered_models = get_models()
if model_name in registered_models:
logging.info('Load MobileNet-EdgeTPU-V1/V2 model.')
logging.info(model_params)
model = registered_models[model_name](**model_params)
elif model_name == 'mobilenet_edgetpu_search':
if self.task_config.saved_model_path is None:
raise ValueError('If using MobileNet-EdgeTPU-Search model, please'
'specify the saved model path via the'
'--params_override flag.')
logging.info('Load saved model (model from search) directly.')
model = load_searched_model(self.task_config.saved_model_path)
else:
raise ValueError('Model has to be mobilenet-edgetpu model or searched'
'model with given saved model path.')
return model
def initialize(self, model: tf.keras.Model):
"""Loads pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if self.task_config.init_checkpoint_modules == 'all':
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
elif self.task_config.init_checkpoint_modules == 'backbone':
ckpt = tf.train.Checkpoint(backbone=model.backbone)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
raise ValueError(
"Only 'all' or 'backbone' can be used to initialize the model.")
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(
self,
params: base_cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None
) -> tf.data.Dataset:
"""Builds classification input."""
num_classes = self.task_config.model.num_classes
input_size = self.task_config.model.input_size
image_field_key = self.task_config.train_data.image_field_key
label_field_key = self.task_config.train_data.label_field_key
is_multilabel = self.task_config.train_data.is_multilabel
if params.tfds_name:
raise ValueError('TFDS {} is not supported'.format(params.tfds_name))
else:
decoder = classification_input.Decoder(
image_field_key=image_field_key, label_field_key=label_field_key,
is_multilabel=is_multilabel)
parser = classification_input.Parser(
output_size=input_size[:2],
num_classes=num_classes,
image_field_key=image_field_key,
label_field_key=label_field_key,
decode_jpeg_only=params.decode_jpeg_only,
aug_rand_hflip=params.aug_rand_hflip,
aug_type=params.aug_type,
is_multilabel=is_multilabel,
dtype=params.dtype)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self,
labels: tf.Tensor,
model_outputs: tf.Tensor,
aux_losses: Optional[Any] = None) -> tf.Tensor:
"""Builds sparse categorical cross entropy loss.
Args:
labels: Input groundtruth labels.
model_outputs: Output logits of the classifier.
aux_losses: The auxiliarly loss tensors, i.e. `losses` in tf.keras.Model.
Returns:
The total loss tensor.
"""
losses_config = self.task_config.losses
is_multilabel = self.task_config.train_data.is_multilabel
if not is_multilabel:
if losses_config.one_hot:
total_loss = tf.keras.losses.categorical_crossentropy(
labels,
model_outputs,
from_logits=False,
label_smoothing=losses_config.label_smoothing)
else:
total_loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, model_outputs, from_logits=True)
else:
# Multi-label weighted binary cross entropy loss.
total_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=model_outputs)
total_loss = tf.reduce_sum(total_loss, axis=-1)
total_loss = tf_utils.safe_mean(total_loss)
if aux_losses:
total_loss += tf.add_n(aux_losses)
return total_loss
def build_metrics(self,
training: bool = True) -> List[tf.keras.metrics.Metric]:
"""Gets streaming metrics for training/validation."""
is_multilabel = self.task_config.train_data.is_multilabel
if not is_multilabel:
k = self.task_config.evaluation.top_k
if self.task_config.losses.one_hot:
metrics = [
tf.keras.metrics.CategoricalAccuracy(name='accuracy'),
tf.keras.metrics.TopKCategoricalAccuracy(
k=k, name='top_{}_accuracy'.format(k))]
else:
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'),
tf.keras.metrics.SparseTopKCategoricalAccuracy(
k=k, name='top_{}_accuracy'.format(k))]
else:
metrics = []
# These metrics destablize the training if included in training. The jobs
# fail due to OOM.
# TODO(arashwan): Investigate adding following metric to train.
if not training:
metrics = [
tf.keras.metrics.AUC(
name='globalPR-AUC',
curve='PR',
multi_label=False,
from_logits=True),
tf.keras.metrics.AUC(
name='meanPR-AUC',
curve='PR',
multi_label=True,
num_labels=self.task_config.model.num_classes,
from_logits=True),
]
return metrics
def train_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[List[Any]] = None):
"""Does forward and backward.
Args:
inputs: A tuple of input tensors of (features, labels).
model: A tf.keras.Model instance.
optimizer: The optimizer for this training step.
metrics: A nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
is_multilabel = self.task_config.train_data.is_multilabel
if self.task_config.losses.one_hot and not is_multilabel:
labels = tf.one_hot(labels, self.task_config.model.num_classes)
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(features, training=True)
# Computes per-replica loss.
loss = self.build_losses(
model_outputs=outputs, labels=labels, aux_losses=model.losses)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(
optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient before apply_gradients when LossScaleOptimizer is
# used.
if isinstance(
optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, labels, outputs)
elif model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in model.metrics})
return logs
def validation_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
metrics: Optional[List[Any]] = None):
"""Runs validatation step.
Args:
inputs: A tuple of input tensors of (features, labels).
model: A tf.keras.Model instance.
metrics: A nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
is_multilabel = self.task_config.train_data.is_multilabel
if self.task_config.losses.one_hot and not is_multilabel:
labels = tf.one_hot(labels, self.task_config.model.num_classes)
outputs = self.inference_step(features, model)
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
loss = self.build_losses(model_outputs=outputs, labels=labels,
aux_losses=model.losses)
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, labels, outputs)
elif model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in model.metrics})
return logs
def inference_step(self, inputs: tf.Tensor, model: tf.keras.Model):
"""Performs the forward step."""
return model(inputs, training=False)
| 13,218 | 36.768571 | 92 | py |
models | models-master/official/projects/waste_identification_ml/pre_processing/config/visualization.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""To visualize of the category distribution in an annotated JSON file."""
#! /usr/bin/env python3
import json
import numpy as np
import pandas as pd
def data_creation(path: str) -> pd.DataFrame:
"""Create a dataframe with the occurences of images and categories.
Args:
path: path to the annotated JSON file.
Returns:
dataset consisting of the counts of images and categories.
"""
# get annotation file data into a variable
with open(path) as json_file:
data = json.load(json_file)
# count the occurance of each category and an image in the annotation file
category_names = [i['name'] for i in data['categories']]
category_ids = [i['category_id'] for i in data['annotations']]
image_ids = [i['image_id'] for i in data['annotations']]
# create a dataframe
df = pd.DataFrame(
list(zip(category_ids, image_ids)), columns=['category_ids', 'image_ids'])
df = df.groupby('category_ids').agg(
object_count=('category_ids', 'count'),
image_count=('image_ids', 'nunique'))
df = df.reindex(range(1, len(data['categories']) + 1), fill_value=0)
df.index = category_names
return df
def visualize_detailed_counts_horizontally(path: str) -> None:
"""Plot a vertical bar graph showing the counts of images & categories.
Args:
path: path to the annotated JSON file.
"""
df = data_creation(path)
ax = df.plot(
kind='bar',
figsize=(40, 10),
xlabel='Categories',
ylabel='Counts',
width=0.8,
linewidth=1,
edgecolor='white') # rot = 0 for horizontal labeling
for p in ax.patches:
ax.annotate(
text=np.round(p.get_height()),
xy=(p.get_x() + p.get_width() / 2., p.get_height()),
ha='center',
va='top',
xytext=(4, 14),
textcoords='offset points')
def visualize_detailed_counts_vertically(path: str) -> None:
"""Plot a horizontal bar graph showing the counts of images & categories.
Args:
path: path to the annotated JSON file.
"""
df = data_creation(path)
ax = df.plot(
kind='barh',
figsize=(15, 40),
xlabel='Categories',
ylabel='Counts',
width=0.6)
for p in ax.patches:
ax.annotate(
str(p.get_width()), (p.get_x() + p.get_width(), p.get_y()),
xytext=(4, 6),
textcoords='offset points')
def visualize_annotation_file(path: str) -> None:
"""Plot a bar graph showing the category distribution.
Args:
path: path to the annotated JSON file.
"""
df = data_creation(path)
df['object_count'].plot.bar(
figsize=(20, 5),
width=0.5,
xlabel='Material types',
ylabel='count of material types')
| 3,268 | 28.718182 | 80 | py |
models | models-master/official/projects/waste_identification_ml/pre_processing/config/categories_list_of_dictionaries.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a list of dictionaries for categories according to the taxonomy.
Example usage-
build_material(MATERIAL_LIST,'material-types')
build_material(MATERIAL_FORM_LIST,'material-form-types')
build_material(MATERIAL_SUBCATEGORY_LIST,'material-subcategory-types')
build_material(MATERIAL_FORM_SUBCATEGORY_LIST,'material-form-subcategory-types')
"""
#! /usr/bin/env python
from typing import List, Dict, Union
MATERIAL_LIST = [
'Inorganic-wastes', 'Textiles', 'Rubber-and-Leather', 'Wood', 'Food',
'Plastics', 'Yard-trimming', 'Fiber', 'Glass', 'Metals'
]
MATERIAL_FORM_LIST = [
'Flexibles', 'Bottle', 'Jar', 'Carton', 'Sachets-&-Pouch', 'Blister-pack',
'Tray', 'Tube', 'Can', 'Tub', 'Cosmetic', 'Box', 'Clothes', 'Bulb',
'Cup-&-glass', 'Book-&-magazine', 'Bag', 'Lid', 'Clamshell', 'Mirror',
'Tangler', 'Cutlery', 'Cassette-&-tape', 'Electronic-devices', 'Battery',
'Pen-&-pencil', 'Paper-products', 'Foot-wear', 'Scissor', 'Toys', 'Brush',
'Pipe', 'Foil', 'Hangers'
]
MATERIAL_SUBCATEGORY_LIST = [
'HDPE_Flexible_Color', 'HDPE_Rigid_Color', 'LDPE_Flexible_Color',
'LDPE_Rigid_Color', 'PP_Flexible_Color', 'PP_Rigid_Color', 'PETE', 'PS',
'PVC', 'Others-MLP', 'Others-Tetrapak', 'Others-HIPC', 'Aluminium',
'Ferrous_Iron', 'Ferrous_Steel', 'Non-ferrous_Lead', 'Non-ferrous_Copper',
'Non-ferrous_Zinc'
]
PLASTICS_SUBCATEGORY_LIST = [
'HDPE', 'PETE', 'LDPE', 'PS', 'PP', 'PVC', 'Others-MLP', 'Others-Tetrapak',
'Others-HIPC'
]
def build_material(category_list: List[str],
supercategory: str) -> List[Dict[str, Union[int, str]]]:
"""Creates a list of dictionaries for the category classes.
Args:
category_list: list of categories from MATERIAL_LIST, MATERIAL_FORM_LIST,
MATERIAL_SUBCATEGORY_LIST, PLASTICS_SUBCATEGORY_LIST
supercategory: supercategory can be 'material-types', 'material-form-types',
'material-subcategory-types', 'material-form-subcategory-types',
'plastic-types'
Returns:
List of dictionaries returning categories with their IDs
"""
list_of_dictionaries = []
for num, m in enumerate(category_list, start=1):
list_of_dictionaries.append({
'id': num,
'name': m,
'supercategory': supercategory
})
return list_of_dictionaries
| 3,536 | 37.868132 | 84 | py |
models | models-master/official/projects/volumetric_models/registry_imports.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All necessary imports for registration."""
# pylint: disable=unused-import
from official.projects.volumetric_models.configs import semantic_segmentation_3d as semantic_segmentation_3d_cfg
from official.projects.volumetric_models.modeling import backbones
from official.projects.volumetric_models.modeling import decoders
from official.projects.volumetric_models.tasks import semantic_segmentation_3d
from official.vision import registry_imports
| 1,058 | 45.043478 | 112 | py |
models | models-master/official/projects/volumetric_models/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Model Garden Vision training driver."""
from absl import app
import gin # pylint: disable=unused-import
from official.common import flags as tfm_flags
from official.projects.volumetric_models import registry_imports # pylint: disable=unused-import
from official.vision import train
def main(_):
train.main(_)
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(main)
| 1,012 | 30.65625 | 97 | py |
models | models-master/official/projects/volumetric_models/train_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for train."""
import json
import os
from absl import flags
from absl import logging
from absl.testing import flagsaver
import tensorflow as tf
from official.projects.volumetric_models import train as train_lib
from official.vision.dataloaders import tfexample_utils
FLAGS = flags.FLAGS
class TrainTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self._model_dir = os.path.join(self.get_temp_dir(), 'model_dir')
tf.io.gfile.makedirs(self._model_dir)
data_dir = os.path.join(self.get_temp_dir(), 'data')
tf.io.gfile.makedirs(data_dir)
self._data_path = os.path.join(data_dir, 'data.tfrecord')
# pylint: disable=g-complex-comprehension
examples = [
tfexample_utils.create_3d_image_test_example(
image_height=32, image_width=32, image_volume=32, image_channel=2)
for _ in range(2)
]
# pylint: enable=g-complex-comprehension
tfexample_utils.dump_to_tfrecord(self._data_path, tf_examples=examples)
def test_run(self):
saved_flag_values = flagsaver.save_flag_values()
train_lib.tfm_flags.define_flags()
FLAGS.mode = 'train'
FLAGS.model_dir = self._model_dir
FLAGS.experiment = 'seg_unet3d_test'
logging.info('Test pipeline correctness.')
params_override = json.dumps({
'runtime': {
'mixed_precision_dtype': 'float32',
},
'trainer': {
'train_steps': 1,
'validation_steps': 1,
},
'task': {
'model': {
'backbone': {
'unet_3d': {
'model_id': 4,
},
},
'decoder': {
'unet_3d_decoder': {
'model_id': 4,
},
},
},
'train_data': {
'input_path': self._data_path,
'file_type': 'tfrecord',
'global_batch_size': 2,
},
'validation_data': {
'input_path': self._data_path,
'file_type': 'tfrecord',
'global_batch_size': 2,
}
}
})
FLAGS.params_override = params_override
train_lib.main('unused_args')
FLAGS.mode = 'eval'
with train_lib.gin.unlock_config():
train_lib.main('unused_args')
flagsaver.restore_flag_values(saved_flag_values)
if __name__ == '__main__':
tf.test.main()
| 3,072 | 28.834951 | 78 | py |
models | models-master/official/projects/volumetric_models/evaluation/segmentation_metrics.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics for segmentation."""
from typing import Optional
import tensorflow as tf
from official.projects.volumetric_models.losses import segmentation_losses
class DiceScore:
"""Dice score metric for semantic segmentation.
This class follows the same function interface as tf.keras.metrics.Metric but
does not derive from tf.keras.metrics.Metric or utilize its functions. The
reason is a tf.keras.metrics.Metric object does not run well on CPU while
created on GPU, when running with MirroredStrategy. The same interface allows
for minimal change to the upstream tasks.
Attributes:
name: The name of the metric.
dtype: The dtype of the metric, for example, tf.float32.
"""
def __init__(self,
num_classes: int,
metric_type: Optional[str] = None,
per_class_metric: bool = False,
name: Optional[str] = None,
dtype: Optional[str] = None):
"""Constructs segmentation evaluator class.
Args:
num_classes: The number of classes.
metric_type: An optional `str` of type of dice scores.
per_class_metric: Whether to report per-class metric.
name: A `str`, name of the metric instance..
dtype: The data type of the metric result.
"""
self._num_classes = num_classes
self._per_class_metric = per_class_metric
self._dice_op_overall = segmentation_losses.SegmentationLossDiceScore(
metric_type=metric_type)
self._dice_scores_overall = tf.Variable(0.0)
self._count = tf.Variable(0.0)
if self._per_class_metric:
# Always use raw dice score for per-class metrics, so metric_type is None
# by default.
self._dice_op_per_class = segmentation_losses.SegmentationLossDiceScore()
self._dice_scores_per_class = [
tf.Variable(0.0) for _ in range(num_classes)
]
self._count_per_class = [tf.Variable(0.0) for _ in range(num_classes)]
self.name = name
self.dtype = dtype
def update_state(self, y_true: tf.Tensor, y_pred: tf.Tensor):
"""Updates metric state.
Args:
y_true: The true labels of size [batch, width, height, volume,
num_classes].
y_pred: The prediction of size [batch, width, height, volume,
num_classes].
Raises:
ValueError: If number of classes from groundtruth label does not equal to
`num_classes`.
"""
if self._num_classes != y_true.get_shape()[-1]:
raise ValueError(
'The number of classes from groundtruth labels and `num_classes` '
'should equal, but they are {0} and {1}.'.format(
self._num_classes,
y_true.get_shape()[-1]))
# If both y_pred and y_true are all 0s, we skip computing the metrics;
# otherwise the averaged metrics will be erroneously lower.
if tf.reduce_sum(y_true) != 0 or tf.reduce_sum(y_pred) != 0:
self._count.assign_add(1.)
self._dice_scores_overall.assign_add(
1 - self._dice_op_overall(y_pred, y_true))
if self._per_class_metric:
for class_id in range(self._num_classes):
if tf.reduce_sum(y_true[..., class_id]) != 0 or tf.reduce_sum(
y_pred[..., class_id]) != 0:
self._count_per_class[class_id].assign_add(1.)
self._dice_scores_per_class[class_id].assign_add(
1 - self._dice_op_per_class(y_pred[...,
class_id], y_true[...,
class_id]))
def result(self) -> tf.Tensor:
"""Computes and returns the metric.
The first one is `generalized` or `adaptive` overall dice score, depending
on `metric_type`. If `per_class_metric` is True, `num_classes` elements are
also appended to the overall metric, as the per-class raw dice scores.
Returns:
The resulting dice scores.
"""
if self._per_class_metric:
dice_scores = [
tf.math.divide_no_nan(self._dice_scores_overall, self._count)
]
for class_id in range(self._num_classes):
dice_scores.append(
tf.math.divide_no_nan(self._dice_scores_per_class[class_id],
self._count_per_class[class_id]))
return tf.stack(dice_scores)
else:
return tf.math.divide_no_nan(self._dice_scores_overall, self._count)
def reset_states(self):
"""Resets the metrcis to the initial state."""
self._count = tf.Variable(0.0)
self._dice_scores_overall = tf.Variable(0.0)
if self._per_class_metric:
for class_id in range(self._num_classes):
self._dice_scores_per_class[class_id] = tf.Variable(0.0)
self._count_per_class[class_id] = tf.Variable(0.0)
| 5,352 | 38.072993 | 80 | py |
models | models-master/official/projects/volumetric_models/evaluation/segmentation_metrics_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for segmentation_losses.py."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.volumetric_models.evaluation import segmentation_metrics
class SegmentationMetricsTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters((1, 'generalized', 0.5, [0.67, 0.67]),
(1, 'adaptive', 0.5, [0.93, 0.67]),
(2, None, 0.5, [0.67, 0.67, 0.67]),
(3, 'generalized', 0.5, [0.67, 0.67, 0.67, 0.67]))
def test_forward_dice_score(self, num_classes, metric_type, output,
expected_score):
metric = segmentation_metrics.DiceScore(
num_classes=num_classes, metric_type=metric_type, per_class_metric=True)
y_pred = tf.constant(
output, shape=[2, 128, 128, 128, num_classes], dtype=tf.float32)
y_true = tf.ones(shape=[2, 128, 128, 128, num_classes], dtype=tf.float32)
metric.update_state(y_true=y_true, y_pred=y_pred)
actual_score = metric.result().numpy()
self.assertAllClose(
actual_score,
expected_score,
atol=1e-2,
msg='Output metric {} does not match expected metric {}.'.format(
actual_score, expected_score))
def test_num_classes_not_equal(self):
metric = segmentation_metrics.DiceScore(num_classes=4)
y_pred = tf.constant(0.5, shape=[2, 128, 128, 128, 2], dtype=tf.float32)
y_true = tf.ones(shape=[2, 128, 128, 128, 2], dtype=tf.float32)
with self.assertRaisesRegex(
ValueError,
'The number of classes from groundtruth labels and `num_classes` '
'should equal'):
metric.update_state(y_true=y_true, y_pred=y_pred)
if __name__ == '__main__':
tf.test.main()
| 2,368 | 39.844828 | 80 | py |
models | models-master/official/projects/volumetric_models/serving/semantic_segmentation_3d.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""3D semantic segmentation input and model functions for serving/inference."""
from typing import Mapping
import tensorflow as tf
# pylint: disable=unused-import
from official.projects.volumetric_models.modeling import backbones
from official.projects.volumetric_models.modeling import decoders
from official.projects.volumetric_models.modeling import factory
from official.vision.serving import export_base
class SegmentationModule(export_base.ExportModule):
"""Segmentation Module."""
def _build_model(self) -> tf.keras.Model:
"""Builds and returns a segmentation model."""
num_channels = self.params.task.model.num_channels
input_specs = tf.keras.layers.InputSpec(
shape=[self._batch_size] + self._input_image_size + [num_channels])
return factory.build_segmentation_model_3d(
input_specs=input_specs,
model_config=self.params.task.model,
l2_regularizer=None)
def serve(
self, images: tf.Tensor) -> Mapping[str, tf.Tensor]:
"""Casts an image tensor to float and runs inference.
Args:
images: A uint8 tf.Tensor of shape [batch_size, None, None, None,
num_channels].
Returns:
A dictionary holding segmentation outputs.
"""
with tf.device('cpu:0'):
images = tf.cast(images, dtype=tf.float32)
outputs = self.inference_step(images)
output_key = 'logits' if self.params.task.model.head.output_logits else 'probs'
return {output_key: outputs['logits']}
| 2,089 | 33.833333 | 83 | py |
models | models-master/official/projects/volumetric_models/serving/export_saved_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Volumetric model export binary for serving/inference.
To export a trained checkpoint in saved_model format (shell script):
EXPERIMENT_TYPE = XX
CHECKPOINT_PATH = XX
EXPORT_DIR_PATH = XX
export_saved_model --experiment=${EXPERIMENT_TYPE} \
--export_dir=${EXPORT_DIR_PATH}/ \
--checkpoint_path=${CHECKPOINT_PATH} \
--batch_size=1 \
--input_image_size=128,128,128 \
--num_channels=1
To serve (python):
export_dir_path = XX
input_type = XX
input_images = XX
imported = tf.saved_model.load(export_dir_path)
model_fn = imported.signatures['serving_default']
output = model_fn(input_images)
"""
from absl import app
from absl import flags
from official.common import registry_imports # pylint: disable=unused-import
from official.core import exp_factory
from official.modeling import hyperparams
from official.projects.volumetric_models.serving import semantic_segmentation_3d
from official.vision.serving import export_saved_model_lib
FLAGS = flags.FLAGS
flags.DEFINE_string(
'experiment', None, 'experiment type, e.g. retinanet_resnetfpn_coco')
flags.DEFINE_string('export_dir', None, 'The export directory.')
flags.DEFINE_string('checkpoint_path', None, 'Checkpoint path.')
flags.DEFINE_multi_string(
'config_file',
default=None,
help='YAML/JSON files which specifies overrides. The override order '
'follows the order of args. Note that each file '
'can be used as an override template to override the default parameters '
'specified in Python. If the same parameter is specified in both '
'`--config_file` and `--params_override`, `config_file` will be used '
'first, followed by params_override.')
flags.DEFINE_string(
'params_override', '',
'The JSON/YAML file or string which specifies the parameter to be overriden'
' on top of `config_file` template.')
flags.DEFINE_integer(
'batch_size', None, 'The batch size.')
flags.DEFINE_string(
'input_type', 'image_tensor',
'One of `image_tensor`, `image_bytes`, `tf_example`.')
flags.DEFINE_list(
'input_image_size', None,
'The comma-separated string of three integers representing the '
'height, width and depth of the input to the model.')
flags.DEFINE_integer('num_channels', 1,
'The number of channels of input image.')
flags.register_validator(
'input_image_size',
lambda value: value is not None and len(value) == 3,
message='--input_image_size must be comma-separated string of three '
'integers representing the height, width and depth of the input to '
'the model.')
def main(_):
flags.mark_flag_as_required('export_dir')
flags.mark_flag_as_required('checkpoint_path')
params = exp_factory.get_exp_config(FLAGS.experiment)
for config_file in FLAGS.config_file or []:
params = hyperparams.override_params_dict(
params, config_file, is_strict=True)
if FLAGS.params_override:
params = hyperparams.override_params_dict(
params, FLAGS.params_override, is_strict=True)
params.validate()
params.lock()
input_image_size = FLAGS.input_image_size
export_module = semantic_segmentation_3d.SegmentationModule(
params=params,
batch_size=1,
input_image_size=input_image_size,
num_channels=FLAGS.num_channels)
export_saved_model_lib.export_inference_graph(
input_type=FLAGS.input_type,
batch_size=FLAGS.batch_size,
input_image_size=input_image_size,
params=params,
checkpoint_path=FLAGS.checkpoint_path,
export_dir=FLAGS.export_dir,
num_channels=FLAGS.num_channels,
export_module=export_module,
export_checkpoint_subdir='checkpoint',
export_saved_model_subdir='saved_model')
if __name__ == '__main__':
app.run(main)
| 4,426 | 34.134921 | 80 | py |
models | models-master/official/projects/volumetric_models/serving/semantic_segmentation_3d_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for semantic_segmentation_3d export lib."""
import os
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
# pylint: disable=unused-import
from official.core import exp_factory
from official.projects.volumetric_models.configs import semantic_segmentation_3d as exp_cfg
from official.projects.volumetric_models.modeling import backbones
from official.projects.volumetric_models.modeling import decoders
from official.projects.volumetric_models.serving import semantic_segmentation_3d
class SemanticSegmentationExportTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
self._num_channels = 2
self._input_image_size = [32, 32, 32]
self._params = exp_factory.get_exp_config('seg_unet3d_test')
input_shape = self._input_image_size + [self._num_channels]
self._image_array = np.zeros(shape=input_shape, dtype=np.uint8)
def _get_segmentation_module(self):
return semantic_segmentation_3d.SegmentationModule(
self._params,
batch_size=1,
input_image_size=self._input_image_size,
num_channels=self._num_channels)
def _export_from_module(self, module, input_type: str, save_directory: str):
signatures = module.get_inference_signatures(
{input_type: 'serving_default'})
tf.saved_model.save(module,
save_directory,
signatures=signatures)
def _get_dummy_input(self, input_type):
"""Get dummy input for the given input type."""
if input_type == 'image_tensor':
image_tensor = tf.convert_to_tensor(self._image_array, dtype=tf.uint8)
return tf.expand_dims(image_tensor, axis=0)
if input_type == 'image_bytes':
return [self._image_array.tostring()]
if input_type == 'tf_example':
encoded_image = self._image_array.tostring()
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[encoded_image])),
})).SerializeToString()
return [example]
@parameterized.parameters(
{'input_type': 'image_tensor'},
{'input_type': 'image_bytes'},
{'input_type': 'tf_example'},
)
def test_export(self, input_type: str = 'image_tensor'):
tmp_dir = self.get_temp_dir()
module = self._get_segmentation_module()
self._export_from_module(module, input_type, tmp_dir)
# Check if model is successfully exported.
self.assertTrue(tf.io.gfile.exists(os.path.join(tmp_dir, 'saved_model.pb')))
self.assertTrue(
tf.io.gfile.exists(
os.path.join(tmp_dir, 'variables', 'variables.index')))
self.assertTrue(
tf.io.gfile.exists(
os.path.join(tmp_dir, 'variables',
'variables.data-00000-of-00001')))
# Get inference signature from loaded SavedModel.
imported = tf.saved_model.load(tmp_dir)
segmentation_fn = imported.signatures['serving_default']
images = self._get_dummy_input(input_type)
image_tensor = self._get_dummy_input(input_type='image_tensor')
# Perform inference using loaded SavedModel and model instance and check if
# outputs equal.
expected_output = module.model(image_tensor, training=False)
out = segmentation_fn(tf.constant(images))
self.assertAllClose(out['logits'].numpy(),
expected_output['logits'].numpy())
if __name__ == '__main__':
tf.test.main()
| 4,165 | 35.867257 | 91 | py |
models | models-master/official/projects/volumetric_models/configs/semantic_segmentation_3d.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Semantic segmentation configuration definition."""
import dataclasses
from typing import List, Optional, Union
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.projects.volumetric_models.configs import backbones
from official.projects.volumetric_models.configs import decoders
from official.vision.configs import common
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""Input config for training."""
output_size: List[int] = dataclasses.field(default_factory=list)
input_size: List[int] = dataclasses.field(default_factory=list)
num_classes: int = 0
num_channels: int = 1
input_path: str = ''
global_batch_size: int = 0
is_training: bool = True
dtype: str = 'float32'
label_dtype: str = 'float32'
image_field_key: str = 'image/encoded'
label_field_key: str = 'image/class/label'
shuffle_buffer_size: int = 1000
cycle_length: int = 10
drop_remainder: bool = False
file_type: str = 'tfrecord'
@dataclasses.dataclass
class SegmentationHead3D(hyperparams.Config):
"""Segmentation head config."""
num_classes: int = 0
level: int = 1
num_convs: int = 0
num_filters: int = 256
upsample_factor: int = 1
output_logits: bool = True
use_batch_normalization: bool = True
@dataclasses.dataclass
class SemanticSegmentationModel3D(hyperparams.Config):
"""Semantic segmentation model config."""
num_classes: int = 0
num_channels: int = 1
input_size: List[int] = dataclasses.field(default_factory=list)
min_level: int = 3
max_level: int = 6
head: SegmentationHead3D = dataclasses.field(
default_factory=SegmentationHead3D
)
backbone: backbones.Backbone = dataclasses.field(
default_factory=lambda: backbones.Backbone( # pylint: disable=g-long-lambda
type='unet_3d', unet_3d=backbones.UNet3D()
)
)
decoder: decoders.Decoder = dataclasses.field(
default_factory=lambda: decoders.Decoder( # pylint: disable=g-long-lambda
type='unet_3d_decoder', unet_3d_decoder=decoders.UNet3DDecoder()
)
)
norm_activation: common.NormActivation = dataclasses.field(
default_factory=common.NormActivation
)
@dataclasses.dataclass
class Losses(hyperparams.Config):
# Supported `loss_type` are `adaptive` and `generalized`.
loss_type: str = 'adaptive'
l2_weight_decay: float = 0.0
@dataclasses.dataclass
class Evaluation(hyperparams.Config):
report_per_class_metric: bool = False # Whether to report per-class metrics.
@dataclasses.dataclass
class SemanticSegmentation3DTask(cfg.TaskConfig):
"""The model config."""
model: SemanticSegmentationModel3D = dataclasses.field(
default_factory=SemanticSegmentationModel3D
)
train_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=True)
)
validation_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=False)
)
losses: Losses = dataclasses.field(default_factory=Losses)
evaluation: Evaluation = dataclasses.field(default_factory=Evaluation)
train_input_partition_dims: List[int] = dataclasses.field(
default_factory=list)
eval_input_partition_dims: List[int] = dataclasses.field(default_factory=list)
init_checkpoint: Optional[str] = None
init_checkpoint_modules: Union[
str, List[str]] = 'all' # all, backbone, and/or decoder
@exp_factory.register_config_factory('seg_unet3d_test')
def seg_unet3d_test() -> cfg.ExperimentConfig:
"""Image segmentation on a dummy dataset with 3D UNet for testing purpose."""
train_batch_size = 2
eval_batch_size = 2
steps_per_epoch = 10
config = cfg.ExperimentConfig(
task=SemanticSegmentation3DTask(
model=SemanticSegmentationModel3D(
num_classes=2,
input_size=[32, 32, 32],
num_channels=2,
backbone=backbones.Backbone(
type='unet_3d', unet_3d=backbones.UNet3D(model_id=2)),
decoder=decoders.Decoder(
type='unet_3d_decoder',
unet_3d_decoder=decoders.UNet3DDecoder(model_id=2)),
head=SegmentationHead3D(num_convs=0, num_classes=2),
norm_activation=common.NormActivation(
activation='relu', use_sync_bn=False)),
train_data=DataConfig(
input_path='train.tfrecord',
num_classes=2,
input_size=[32, 32, 32],
num_channels=2,
is_training=True,
global_batch_size=train_batch_size),
validation_data=DataConfig(
input_path='val.tfrecord',
num_classes=2,
input_size=[32, 32, 32],
num_channels=2,
is_training=False,
global_batch_size=eval_batch_size),
losses=Losses(loss_type='adaptive')),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=10,
validation_steps=10,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
},
'learning_rate': {
'type': 'constant',
'constant': {
'learning_rate': 0.000001
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
| 6,315 | 34.284916 | 82 | py |
models | models-master/official/projects/volumetric_models/configs/semantic_segmentation_3d_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for semantic_segmentation."""
# pylint: disable=unused-import
from absl.testing import parameterized
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.volumetric_models.configs import semantic_segmentation_3d as exp_cfg
class ImageSegmentationConfigTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('seg_unet3d_test',),)
def test_semantic_segmentation_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, exp_cfg.SemanticSegmentation3DTask)
self.assertIsInstance(config.task.model,
exp_cfg.SemanticSegmentationModel3D)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.task.train_data.is_training = None
with self.assertRaises(KeyError):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 1,642 | 36.340909 | 91 | py |
models | models-master/official/projects/volumetric_models/configs/backbones.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Backbones configurations."""
import dataclasses
from typing import Optional, Sequence
from official.modeling import hyperparams
@dataclasses.dataclass
class UNet3D(hyperparams.Config):
"""UNet3D config."""
model_id: int = 4
pool_size: Sequence[int] = (2, 2, 2)
kernel_size: Sequence[int] = (3, 3, 3)
base_filters: int = 32
use_batch_normalization: bool = True
@dataclasses.dataclass
class Backbone(hyperparams.OneOfConfig):
"""Configuration for backbones.
Attributes:
type: 'str', type of backbone be used, one the of fields below.
unet_3d: UNet3D backbone config.
"""
type: Optional[str] = None
unet_3d: UNet3D = dataclasses.field(default_factory=UNet3D)
| 1,304 | 30.071429 | 74 | py |
models | models-master/official/projects/volumetric_models/configs/decoders.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decoders configurations."""
import dataclasses
from typing import Optional, Sequence
from official.modeling import hyperparams
@dataclasses.dataclass
class Identity(hyperparams.Config):
"""Identity config."""
pass
@dataclasses.dataclass
class UNet3DDecoder(hyperparams.Config):
"""UNet3D decoder config."""
model_id: int = 4
pool_size: Sequence[int] = (2, 2, 2)
kernel_size: Sequence[int] = (3, 3, 3)
use_batch_normalization: bool = True
use_deconvolution: bool = True
@dataclasses.dataclass
class Decoder(hyperparams.OneOfConfig):
"""Configuration for decoders.
Attributes:
type: 'str', type of decoder be used, on the of fields below.
identity: identity decoder config.
unet_3d_decoder: UNet3D decoder config.
"""
type: Optional[str] = None
identity: Identity = dataclasses.field(default_factory=Identity)
unet_3d_decoder: UNet3DDecoder = dataclasses.field(
default_factory=UNet3DDecoder
)
| 1,560 | 29.019231 | 74 | py |
models | models-master/official/projects/volumetric_models/dataloaders/segmentation_input_3d.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data parser and processing for 3D segmentation datasets."""
from typing import Any, Dict, Sequence, Tuple
import tensorflow as tf
from official.vision.dataloaders import decoder
from official.vision.dataloaders import parser
class Decoder(decoder.Decoder):
"""A tf.Example decoder for segmentation task."""
def __init__(self,
image_field_key: str = 'image/encoded',
label_field_key: str = 'image/class/label'):
self._keys_to_features = {
image_field_key: tf.io.FixedLenFeature([], tf.string, default_value=''),
label_field_key: tf.io.FixedLenFeature([], tf.string, default_value='')
}
def decode(self, serialized_example: tf.string) -> Dict[str, tf.Tensor]:
return tf.io.parse_single_example(serialized_example,
self._keys_to_features)
class Parser(parser.Parser):
"""Parser to parse an image and its annotations into a dictionary of tensors."""
def __init__(self,
input_size: Sequence[int],
num_classes: int,
num_channels: int = 3,
image_field_key: str = 'image/encoded',
label_field_key: str = 'image/class/label',
dtype: str = 'float32',
label_dtype: str = 'float32'):
"""Initializes parameters for parsing annotations in the dataset.
Args:
input_size: The input tensor size of [height, width, volume] of input
image.
num_classes: The number of classes to be segmented.
num_channels: The channel of input images.
image_field_key: A `str` of the key name to encoded image in TFExample.
label_field_key: A `str` of the key name to label in TFExample.
dtype: The data type. One of {`bfloat16`, `float32`, `float16`}.
label_dtype: The data type of input label.
"""
self._input_size = input_size
self._num_classes = num_classes
self._num_channels = num_channels
self._image_field_key = image_field_key
self._label_field_key = label_field_key
self._dtype = dtype
self._label_dtype = label_dtype
def _prepare_image_and_label(
self, data: Dict[str, Any]) -> Tuple[tf.Tensor, tf.Tensor]:
"""Prepares normalized image and label."""
image = tf.io.decode_raw(data[self._image_field_key],
tf.as_dtype(tf.float32))
label = tf.io.decode_raw(data[self._label_field_key],
tf.as_dtype(self._label_dtype))
image_size = list(self._input_size) + [self._num_channels]
image = tf.reshape(image, image_size)
label_size = list(self._input_size) + [self._num_classes]
label = tf.reshape(label, label_size)
image = tf.cast(image, dtype=self._dtype)
label = tf.cast(label, dtype=self._dtype)
# TPU doesn't support tf.int64 well, use tf.int32 directly.
if label.dtype == tf.int64:
label = tf.cast(label, dtype=tf.int32)
return image, label
def _parse_train_data(self, data: Dict[str,
Any]) -> Tuple[tf.Tensor, tf.Tensor]:
"""Parses data for training and evaluation."""
image, labels = self._prepare_image_and_label(data)
# Cast image as self._dtype
image = tf.cast(image, dtype=self._dtype)
return image, labels
def _parse_eval_data(self, data: Dict[str,
Any]) -> Tuple[tf.Tensor, tf.Tensor]:
"""Parses data for training and evaluation."""
image, labels = self._prepare_image_and_label(data)
# Cast image as self._dtype
image = tf.cast(image, dtype=self._dtype)
return image, labels
| 4,231 | 38.551402 | 82 | py |
models | models-master/official/projects/volumetric_models/dataloaders/segmentation_input_3d_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for segmentation_input_3d.py."""
import os
from absl.testing import parameterized
import tensorflow as tf
from official.projects.volumetric_models.dataloaders import segmentation_input_3d
from official.vision.dataloaders import tfexample_utils
class InputReaderTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super().setUp()
data_dir = os.path.join(self.get_temp_dir(), 'data')
tf.io.gfile.makedirs(data_dir)
self._data_path = os.path.join(data_dir, 'data.tfrecord')
self._example = tfexample_utils.create_3d_image_test_example(
image_height=32, image_width=32, image_volume=32, image_channel=2)
@parameterized.parameters(
([32, 32, 32], 2, 2, False),
([32, 32, 32], 2, 2, True),
)
def testSegmentationInputReader(self, input_size, num_classes, num_channels,
is_training):
decoder = segmentation_input_3d.Decoder()
parser = segmentation_input_3d.Parser(
input_size=input_size,
num_classes=num_classes,
num_channels=num_channels)
decoded_tensor = decoder.decode(self._example.SerializeToString())
image, labels = parser.parse_fn(is_training=is_training)(decoded_tensor)
# Checks image shape.
self.assertEqual(
list(image.numpy().shape),
[input_size[0], input_size[1], input_size[2], num_channels])
self.assertEqual(
list(labels.numpy().shape),
[input_size[0], input_size[1], input_size[2], num_classes])
if __name__ == '__main__':
tf.test.main()
| 2,160 | 33.301587 | 81 | py |
models | models-master/official/projects/volumetric_models/modeling/nn_blocks_3d.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains common building blocks for neural networks."""
from typing import Sequence, Union
# Import libraries
import tensorflow as tf
from official.modeling import tf_utils
from official.vision.modeling.layers import nn_layers
@tf.keras.utils.register_keras_serializable(package='Vision')
class BasicBlock3DVolume(tf.keras.layers.Layer):
"""A basic 3d convolution block."""
def __init__(self,
filters: Union[int, Sequence[int]],
strides: Union[int, Sequence[int]],
kernel_size: Union[int, Sequence[int]],
kernel_initializer: str = 'VarianceScaling',
kernel_regularizer: tf.keras.regularizers.Regularizer = None,
bias_regularizer: tf.keras.regularizers.Regularizer = None,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
use_batch_normalization: bool = False, # pytype: disable=annotation-type-mismatch # typed-keras
**kwargs):
"""Creates a basic 3d convolution block applying one or more convolutions.
Args:
filters: A list of `int` numbers or an `int` number of filters. Given an
`int` input, a single convolution is applied; otherwise a series of
convolutions are applied.
strides: An integer or tuple/list of 3 integers, specifying the strides of
the convolution along each spatial dimension. Can be a single integer to
specify the same value for all spatial dimensions.
kernel_size: An integer or tuple/list of 3 integers, specifying the depth,
height and width of the 3D convolution window. Can be a single integer
to specify the same value for all spatial dimensions.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
activation: `str` name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
use_batch_normalization: Wheher to use batch normalizaion or not.
**kwargs: keyword arguments to be passed.
"""
super().__init__(**kwargs)
if isinstance(filters, int):
self._filters = [filters]
else:
self._filters = filters
self._strides = strides
self._kernel_size = kernel_size
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._activation = activation
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._use_batch_normalization = use_batch_normalization
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation_fn = tf_utils.get_activation(activation)
def build(self, input_shape: tf.TensorShape):
"""Builds the basic 3d convolution block."""
self._convs = []
self._norms = []
for filters in self._filters:
self._convs.append(
tf.keras.layers.Conv3D(
filters=filters,
kernel_size=self._kernel_size,
strides=self._strides,
padding='same',
data_format=tf.keras.backend.image_data_format(),
activation=None))
self._norms.append(
self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon))
super(BasicBlock3DVolume, self).build(input_shape)
def get_config(self):
"""Returns the config of the basic 3d convolution block."""
config = {
'filters': self._filters,
'strides': self._strides,
'kernel_size': self._kernel_size,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'use_batch_normalization': self._use_batch_normalization
}
base_config = super(BasicBlock3DVolume, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs: tf.Tensor, training: bool = None) -> tf.Tensor:
"""Runs forward pass on the input tensor."""
x = inputs
for conv, norm in zip(self._convs, self._norms):
x = conv(x)
if self._use_batch_normalization:
x = norm(x)
x = self._activation_fn(x)
return x
@tf.keras.utils.register_keras_serializable(package='Vision')
class ResidualBlock3DVolume(tf.keras.layers.Layer):
"""A residual 3d block."""
def __init__(self,
filters,
strides,
use_projection=False,
se_ratio=None,
stochastic_depth_drop_rate=None,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
**kwargs):
"""A residual 3d block with BN after convolutions.
Args:
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
se_ratio: `float` or None. Ratio of the Squeeze-and-Excitation layer.
stochastic_depth_drop_rate: `float` or None. if not None, drop rate for
the stochastic depth layer.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
activation: `str` name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
**kwargs: keyword arguments to be passed.
"""
super().__init__(**kwargs)
self._filters = filters
self._strides = strides
self._use_projection = use_projection
self._se_ratio = se_ratio
self._use_sync_bn = use_sync_bn
self._activation = activation
self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
self._kernel_initializer = kernel_initializer
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation_fn = tf_utils.get_activation(activation)
def build(self, input_shape):
if self._use_projection:
self._shortcut = tf.keras.layers.Conv3D(
filters=self._filters,
kernel_size=1,
strides=self._strides,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm0 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._conv1 = tf.keras.layers.Conv3D(
filters=self._filters,
kernel_size=3,
strides=self._strides,
padding='same',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm1 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._conv2 = tf.keras.layers.Conv3D(
filters=self._filters,
kernel_size=3,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm2 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1:
self._squeeze_excitation = nn_layers.SqueezeExcitation(
in_filters=self._filters,
out_filters=self._filters,
se_ratio=self._se_ratio,
use_3d_input=True,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
else:
self._squeeze_excitation = None
if self._stochastic_depth_drop_rate:
self._stochastic_depth = nn_layers.StochasticDepth(
self._stochastic_depth_drop_rate)
else:
self._stochastic_depth = None
super(ResidualBlock3DVolume, self).build(input_shape)
def get_config(self):
config = {
'filters': self._filters,
'strides': self._strides,
'use_projection': self._use_projection,
'se_ratio': self._se_ratio,
'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon
}
base_config = super(ResidualBlock3DVolume, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, training=None):
shortcut = inputs
if self._use_projection:
shortcut = self._shortcut(shortcut)
shortcut = self._norm0(shortcut)
x = self._conv1(inputs)
x = self._norm1(x)
x = self._activation_fn(x)
x = self._conv2(x)
x = self._norm2(x)
if self._squeeze_excitation:
x = self._squeeze_excitation(x)
if self._stochastic_depth:
x = self._stochastic_depth(x, training=training)
return self._activation_fn(x + shortcut)
@tf.keras.utils.register_keras_serializable(package='Vision')
class BottleneckBlock3DVolume(tf.keras.layers.Layer):
"""A standard bottleneck block."""
def __init__(self,
filters,
strides,
dilation_rate=1,
use_projection=False,
se_ratio=None,
stochastic_depth_drop_rate=None,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
**kwargs):
"""A standard bottleneck 3d block with BN after convolutions.
Args:
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
dilation_rate: `int` dilation_rate of convolutions. Default to 1.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
se_ratio: `float` or None. Ratio of the Squeeze-and-Excitation layer.
stochastic_depth_drop_rate: `float` or None. if not None, drop rate for
the stochastic depth layer.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
activation: `str` name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
**kwargs: keyword arguments to be passed.
"""
super().__init__(**kwargs)
self._filters = filters
self._strides = strides
self._dilation_rate = dilation_rate
self._use_projection = use_projection
self._se_ratio = se_ratio
self._use_sync_bn = use_sync_bn
self._activation = activation
self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
self._kernel_initializer = kernel_initializer
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation_fn = tf_utils.get_activation(activation)
def build(self, input_shape):
if self._use_projection:
self._shortcut = tf.keras.layers.Conv3D(
filters=self._filters * 4,
kernel_size=1,
strides=self._strides,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm0 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._conv1 = tf.keras.layers.Conv3D(
filters=self._filters,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm1 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._conv2 = tf.keras.layers.Conv3D(
filters=self._filters,
kernel_size=3,
strides=self._strides,
dilation_rate=self._dilation_rate,
padding='same',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm2 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._conv3 = tf.keras.layers.Conv3D(
filters=self._filters * 4,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm3 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1:
self._squeeze_excitation = nn_layers.SqueezeExcitation(
in_filters=self._filters * 4,
out_filters=self._filters * 4,
se_ratio=self._se_ratio,
use_3d_input=True,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
else:
self._squeeze_excitation = None
if self._stochastic_depth_drop_rate:
self._stochastic_depth = nn_layers.StochasticDepth(
self._stochastic_depth_drop_rate)
else:
self._stochastic_depth = None
super(BottleneckBlock3DVolume, self).build(input_shape)
def get_config(self):
config = {
'filters': self._filters,
'strides': self._strides,
'dilation_rate': self._dilation_rate,
'use_projection': self._use_projection,
'se_ratio': self._se_ratio,
'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon
}
base_config = super(BottleneckBlock3DVolume, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, training=None):
shortcut = inputs
if self._use_projection:
shortcut = self._shortcut(shortcut)
shortcut = self._norm0(shortcut)
x = self._conv1(inputs)
x = self._norm1(x)
x = self._activation_fn(x)
x = self._conv2(x)
x = self._norm2(x)
x = self._activation_fn(x)
x = self._conv3(x)
x = self._norm3(x)
if self._squeeze_excitation:
x = self._squeeze_excitation(x)
if self._stochastic_depth:
x = self._stochastic_depth(x, training=training)
return self._activation_fn(x + shortcut)
| 19,158 | 36.714567 | 112 | py |
models | models-master/official/projects/volumetric_models/modeling/factory_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for factory.py."""
from absl.testing import parameterized
import tensorflow as tf
# pylint: disable=unused-import
from official.projects.volumetric_models.configs import semantic_segmentation_3d as exp_cfg
from official.projects.volumetric_models.modeling import backbones
from official.projects.volumetric_models.modeling import decoders
from official.projects.volumetric_models.modeling import factory
class SegmentationModelBuilderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(((128, 128, 128), 5e-5, True),
((64, 64, 64), None, False))
def test_unet3d_builder(self, input_size, weight_decay, use_bn):
num_classes = 3
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size[0], input_size[1], input_size[2], 3])
model_config = exp_cfg.SemanticSegmentationModel3D(num_classes=num_classes)
model_config.head.use_batch_normalization = use_bn
l2_regularizer = (
tf.keras.regularizers.l2(weight_decay) if weight_decay else None)
model = factory.build_segmentation_model_3d(
input_specs=input_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
self.assertIsInstance(
model, tf.keras.Model,
'Output should be a tf.keras.Model instance but got %s' % type(model))
if __name__ == '__main__':
tf.test.main()
| 1,994 | 38.9 | 91 | py |
models | models-master/official/projects/volumetric_models/modeling/nn_blocks_3d_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for 3D volumeric convoluion blocks."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.projects.volumetric_models.modeling import nn_blocks_3d
class NNBlocks3DTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters((128, 128, 32, 1), (256, 256, 16, 2))
def test_bottleneck_block_3d_volume_creation(self, spatial_size, volume_size,
filters, strides):
inputs = tf.keras.Input(
shape=(spatial_size, spatial_size, volume_size, filters * 4),
batch_size=1)
block = nn_blocks_3d.BottleneckBlock3DVolume(
filters=filters,
strides=strides,
use_projection=True,
se_ratio=0.2,
stochastic_depth_drop_rate=0.2)
features = block(inputs)
self.assertAllEqual([
1, spatial_size // strides, spatial_size // strides,
volume_size // strides, filters * 4
], features.shape.as_list())
@parameterized.parameters((128, 128, 32, 1), (256, 256, 64, 2))
def test_residual_block_3d_volume_creation(self, spatial_size, volume_size,
filters, strides):
inputs = tf.keras.Input(
shape=(spatial_size, spatial_size, volume_size, filters), batch_size=1)
block = nn_blocks_3d.ResidualBlock3DVolume(
filters=filters,
strides=strides,
use_projection=True,
se_ratio=0.2,
stochastic_depth_drop_rate=0.2)
features = block(inputs)
self.assertAllEqual([
1, spatial_size // strides, spatial_size // strides,
volume_size // strides, filters
], features.shape.as_list())
@parameterized.parameters((128, 128, 64, 1, 3), (256, 256, 128, 2, 1))
def test_basic_block_3d_volume_creation(self, spatial_size, volume_size,
filters, strides, kernel_size):
inputs = tf.keras.Input(
shape=(spatial_size, spatial_size, volume_size, filters), batch_size=1)
block = nn_blocks_3d.BasicBlock3DVolume(
filters=filters, strides=strides, kernel_size=kernel_size)
features = block(inputs)
self.assertAllEqual([
1, spatial_size // strides, spatial_size // strides,
volume_size // strides, filters
], features.shape.as_list())
if __name__ == '__main__':
tf.test.main()
| 2,976 | 34.86747 | 79 | py |
models | models-master/official/projects/volumetric_models/modeling/segmentation_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for segmentation network."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.volumetric_models.modeling import backbones
from official.projects.volumetric_models.modeling import decoders
from official.projects.volumetric_models.modeling.heads import segmentation_heads_3d
from official.vision.modeling import segmentation_model
class SegmentationNetworkUNet3DTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
([32, 32], 4),
([64, 64], 4),
([64, 64], 2),
([128, 64], 2),
)
def test_segmentation_network_unet3d_creation(self, input_size, depth):
"""Test for creation of a segmentation network."""
num_classes = 2
inputs = np.random.rand(2, input_size[0], input_size[0], input_size[1], 3)
tf.keras.backend.set_image_data_format('channels_last')
backbone = backbones.UNet3D(model_id=depth)
decoder = decoders.UNet3DDecoder(
model_id=depth, input_specs=backbone.output_specs)
head = segmentation_heads_3d.SegmentationHead3D(
num_classes, level=1, num_convs=0)
model = segmentation_model.SegmentationModel(
backbone=backbone, decoder=decoder, head=head)
outputs = model(inputs)
self.assertAllEqual(
[2, input_size[0], input_size[0], input_size[1], num_classes],
outputs['logits'].numpy().shape)
def test_serialize_deserialize(self):
"""Validate the network can be serialized and deserialized."""
num_classes = 3
backbone = backbones.UNet3D(model_id=4)
decoder = decoders.UNet3DDecoder(
model_id=4, input_specs=backbone.output_specs)
head = segmentation_heads_3d.SegmentationHead3D(
num_classes, level=1, num_convs=0)
model = segmentation_model.SegmentationModel(
backbone=backbone, decoder=decoder, head=head)
config = model.get_config()
new_model = segmentation_model.SegmentationModel.from_config(config)
# Validate that the config can be forced to JSON.
_ = new_model.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(model.get_config(), new_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,856 | 36.103896 | 84 | py |
models | models-master/official/projects/volumetric_models/modeling/factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory methods to build models."""
from typing import Sequence, Union
# Import libraries
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.volumetric_models.modeling.decoders import factory as decoder_factory
from official.projects.volumetric_models.modeling.heads import segmentation_heads_3d
from official.vision.modeling import segmentation_model
from official.vision.modeling.backbones import factory as backbone_factory
def build_segmentation_model_3d(
input_specs: Union[tf.keras.layers.InputSpec,
Sequence[tf.keras.layers.InputSpec]],
model_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None
) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds Segmentation model."""
norm_activation_config = model_config.norm_activation
backbone = backbone_factory.build_backbone(
input_specs=input_specs,
backbone_config=model_config.backbone,
norm_activation_config=norm_activation_config,
l2_regularizer=l2_regularizer)
decoder = decoder_factory.build_decoder(
input_specs=backbone.output_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
head_config = model_config.head
head = segmentation_heads_3d.SegmentationHead3D(
num_classes=model_config.num_classes,
level=head_config.level,
num_convs=head_config.num_convs,
num_filters=head_config.num_filters,
upsample_factor=head_config.upsample_factor,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
use_batch_normalization=head_config.use_batch_normalization,
kernel_regularizer=l2_regularizer,
output_logits=head_config.output_logits)
model = segmentation_model.SegmentationModel(backbone, decoder, head)
return model
| 2,620 | 39.323077 | 92 | py |
models | models-master/official/projects/volumetric_models/modeling/decoders/unet_3d_decoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for 3D UNet decoder."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.projects.volumetric_models.modeling.backbones import unet_3d
from official.projects.volumetric_models.modeling.decoders import unet_3d_decoder
class UNet3DDecoderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
([128, 64], 4),
([256, 128], 6),
)
def test_network_creation(self, input_size, model_id):
"""Test creation of UNet3D family models."""
tf.keras.backend.set_image_data_format('channels_last')
# `input_size` consists of [spatial size, volume size].
inputs = tf.keras.Input(
shape=(input_size[0], input_size[0], input_size[1], 3), batch_size=1)
backbone = unet_3d.UNet3D(model_id=model_id)
network = unet_3d_decoder.UNet3DDecoder(
model_id=model_id, input_specs=backbone.output_specs)
endpoints = backbone(inputs)
feats = network(endpoints)
self.assertIn('1', feats)
self.assertAllEqual([1, input_size[0], input_size[0], input_size[1], 64],
feats['1'].shape.as_list())
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
model_id=4,
input_specs=unet_3d.UNet3D(model_id=4).output_specs,
pool_size=(2, 2, 2),
kernel_size=(3, 3, 3),
kernel_regularizer=None,
activation='relu',
norm_momentum=0.99,
norm_epsilon=0.001,
use_sync_bn=False,
use_batch_normalization=True,
use_deconvolution=True)
network = unet_3d_decoder.UNet3DDecoder(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = unet_3d_decoder.UNet3DDecoder.from_config(
network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,822 | 33.851852 | 81 | py |
models | models-master/official/projects/volumetric_models/modeling/decoders/factory_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for factory functions."""
from absl.testing import parameterized
import tensorflow as tf
from tensorflow.python.distribute import combinations
from official.projects.volumetric_models.configs import decoders as decoders_cfg
from official.projects.volumetric_models.configs import semantic_segmentation_3d as semantic_segmentation_3d_exp
from official.projects.volumetric_models.modeling import decoders
from official.projects.volumetric_models.modeling.decoders import factory
class FactoryTest(tf.test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(model_id=[2, 3],))
def test_unet_3d_decoder_creation(self, model_id):
"""Test creation of UNet 3D decoder."""
# Create test input for decoders based on input model_id.
input_specs = {}
for level in range(model_id):
input_specs[str(level + 1)] = tf.TensorShape(
[1, 128 // (2**level), 128 // (2**level), 128 // (2**level), 1])
network = decoders.UNet3DDecoder(
model_id=model_id,
input_specs=input_specs,
use_sync_bn=True,
use_batch_normalization=True,
use_deconvolution=True)
model_config = semantic_segmentation_3d_exp.SemanticSegmentationModel3D()
model_config.num_classes = 2
model_config.num_channels = 1
model_config.input_size = [None, None, None]
model_config.decoder = decoders_cfg.Decoder(
type='unet_3d_decoder',
unet_3d_decoder=decoders_cfg.UNet3DDecoder(model_id=model_id))
factory_network = factory.build_decoder(
input_specs=input_specs, model_config=model_config)
network_config = network.get_config()
factory_network_config = factory_network.get_config()
print(network_config)
print(factory_network_config)
self.assertEqual(network_config, factory_network_config)
def test_identity_creation(self):
"""Test creation of identity decoder."""
model_config = semantic_segmentation_3d_exp.SemanticSegmentationModel3D()
model_config.num_classes = 2
model_config.num_channels = 3
model_config.input_size = [None, None, None]
model_config.decoder = decoders_cfg.Decoder(
type='identity', identity=decoders_cfg.Identity())
factory_network = factory.build_decoder(
input_specs=None, model_config=model_config)
self.assertIsNone(factory_network)
if __name__ == '__main__':
tf.test.main()
| 3,008 | 36.148148 | 112 | py |
models | models-master/official/projects/volumetric_models/modeling/decoders/factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decoder registers and factory method.
One can register a new decoder model by the following two steps:
1 Import the factory and register the build in the decoder file.
2 Import the decoder class and add a build in __init__.py.
```
# my_decoder.py
from modeling.decoders import factory
class MyDecoder():
...
@factory.register_decoder_builder('my_decoder')
def build_my_decoder():
return MyDecoder()
# decoders/__init__.py adds import
from modeling.decoders.my_decoder import MyDecoder
```
If one wants the MyDecoder class to be used only by those binary
then don't imported the decoder module in decoders/__init__.py, but import it
in place that uses it.
"""
from typing import Union, Mapping, Optional
# Import libraries
import tensorflow as tf
from official.core import registry
from official.modeling import hyperparams
_REGISTERED_DECODER_CLS = {}
def register_decoder_builder(key: str):
"""Decorates a builder of decoder class.
The builder should be a Callable (a class or a function).
This decorator supports registration of decoder builder as follows:
```
class MyDecoder(tf.keras.Model):
pass
@register_decoder_builder('mydecoder')
def builder(input_specs, config, l2_reg):
return MyDecoder(...)
# Builds a MyDecoder object.
my_decoder = build_decoder_3d(input_specs, config, l2_reg)
```
Args:
key: A `str` of key to look up the builder.
Returns:
A callable for using as class decorator that registers the decorated class
for creation from an instance of task_config_cls.
"""
return registry.register(_REGISTERED_DECODER_CLS, key)
@register_decoder_builder('identity')
def build_identity(
input_specs: Optional[Mapping[str, tf.TensorShape]] = None,
model_config: Optional[hyperparams.Config] = None,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None) -> None:
del input_specs, model_config, l2_regularizer # Unused by identity decoder.
return None
def build_decoder(
input_specs: Mapping[str, tf.TensorShape],
model_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None,
**kwargs) -> Union[None, tf.keras.Model, tf.keras.layers.Layer]: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds decoder from a config.
Args:
input_specs: A `dict` of input specifications. A dictionary consists of
{level: TensorShape} from a backbone.
model_config: A `OneOfConfig` of model config.
l2_regularizer: A `tf.keras.regularizers.Regularizer` object. Default to
None.
**kwargs: Additional keyword args to be passed to decoder builder.
Returns:
An instance of the decoder.
"""
decoder_builder = registry.lookup(_REGISTERED_DECODER_CLS,
model_config.decoder.type)
return decoder_builder(
input_specs=input_specs,
model_config=model_config,
l2_regularizer=l2_regularizer,
**kwargs)
| 3,565 | 29.478632 | 127 | py |
models | models-master/official/projects/volumetric_models/modeling/decoders/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decoders package definition."""
from official.projects.volumetric_models.modeling.decoders.unet_3d_decoder import UNet3DDecoder
| 741 | 40.222222 | 95 | py |
models | models-master/official/projects/volumetric_models/modeling/decoders/unet_3d_decoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of 3D UNet Model decoder part.
[1] Özgün Çiçek, Ahmed Abdulkadir, Soeren S. Lienkamp, Thomas Brox, Olaf
Ronneberger. 3D U-Net: Learning Dense Volumetric Segmentation from Sparse
Annotation. arXiv:1606.06650.
"""
from typing import Any, Dict, Mapping, Optional, Sequence
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.volumetric_models.modeling import nn_blocks_3d
from official.projects.volumetric_models.modeling.decoders import factory
layers = tf.keras.layers
@tf.keras.utils.register_keras_serializable(package='Vision')
class UNet3DDecoder(tf.keras.Model):
"""Class to build 3D UNet decoder."""
def __init__(self,
model_id: int,
input_specs: Mapping[str, tf.TensorShape],
pool_size: Sequence[int] = (2, 2, 2),
kernel_size: Sequence[int] = (3, 3, 3),
kernel_regularizer: tf.keras.regularizers.Regularizer = None,
activation: str = 'relu',
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
use_sync_bn: bool = False,
use_batch_normalization: bool = False,
use_deconvolution: bool = False, # pytype: disable=annotation-type-mismatch # typed-keras
**kwargs):
"""3D UNet decoder initialization function.
Args:
model_id: The depth of UNet3D backbone model. The greater the depth, the
more max pooling layers will be added to the model. Lowering the depth
may reduce the amount of memory required for training.
input_specs: The input specifications. A dictionary consists of
{level: TensorShape} from a backbone.
pool_size: The pooling size for the max pooling operations.
kernel_size: The kernel size for 3D convolution.
kernel_regularizer: A tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
activation: The name of the activation function.
norm_momentum: The normalization momentum for the moving average.
norm_epsilon: A float added to variance to avoid dividing by zero.
use_sync_bn: If True, use synchronized batch normalization.
use_batch_normalization: If set to True, use batch normalization after
convolution and before activation. Default to False.
use_deconvolution: If set to True, the model will use transpose
convolution (deconvolution) instead of up-sampling. This increases the
amount memory required during training. Default to False.
**kwargs: Keyword arguments to be passed.
"""
self._config_dict = {
'model_id': model_id,
'input_specs': input_specs,
'pool_size': pool_size,
'kernel_size': kernel_size,
'kernel_regularizer': kernel_regularizer,
'activation': activation,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'use_sync_bn': use_sync_bn,
'use_batch_normalization': use_batch_normalization,
'use_deconvolution': use_deconvolution
}
if use_sync_bn:
self._norm = layers.experimental.SyncBatchNormalization
else:
self._norm = layers.BatchNormalization
self._use_batch_normalization = use_batch_normalization
if tf.keras.backend.image_data_format() == 'channels_last':
channel_dim = -1
else:
channel_dim = 1
# Build 3D UNet.
inputs = self._build_input_pyramid(input_specs, model_id) # pytype: disable=wrong-arg-types # dynamic-method-lookup
# Add levels with up-convolution or up-sampling.
x = inputs[str(model_id)]
for layer_depth in range(model_id - 1, 0, -1):
# Apply deconvolution or upsampling.
if use_deconvolution:
x = layers.Conv3DTranspose(
filters=x.get_shape().as_list()[channel_dim],
kernel_size=pool_size,
strides=(2, 2, 2))(
x)
else:
x = layers.UpSampling3D(size=pool_size)(x)
# Concatenate upsampled features with input features from one layer up.
x = tf.concat([x, tf.cast(inputs[str(layer_depth)], dtype=x.dtype)],
axis=channel_dim)
filter_num = inputs[str(layer_depth)].get_shape().as_list()[channel_dim]
x = nn_blocks_3d.BasicBlock3DVolume(
filters=[filter_num, filter_num],
strides=(1, 1, 1),
kernel_size=kernel_size,
kernel_regularizer=kernel_regularizer,
activation=activation,
use_sync_bn=use_sync_bn,
norm_momentum=norm_momentum,
norm_epsilon=norm_epsilon,
use_batch_normalization=use_batch_normalization)(
x)
feats = {'1': x}
self._output_specs = {l: feats[l].get_shape() for l in feats}
super(UNet3DDecoder, self).__init__(inputs=inputs, outputs=feats, **kwargs)
def _build_input_pyramid(self, input_specs: Dict[str, tf.TensorShape],
depth: int) -> Dict[str, tf.Tensor]:
"""Builds input pyramid features."""
assert isinstance(input_specs, dict)
if len(input_specs.keys()) > depth:
raise ValueError(
'Backbone depth should be equal to 3D UNet decoder\'s depth.')
inputs = {}
for level, spec in input_specs.items():
inputs[level] = tf.keras.Input(shape=spec[1:])
return inputs
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config: Mapping[str, Any], custom_objects=None):
return cls(**config)
@property
def output_specs(self) -> Mapping[str, tf.TensorShape]:
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_decoder_builder('unet_3d_decoder')
def build_unet_3d_decoder(
input_specs: Mapping[str, tf.TensorShape],
model_config: hyperparams.Config,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None
) -> tf.keras.Model:
"""Builds UNet3D decoder from a config.
Args:
input_specs: A `dict` of input specifications. A dictionary consists of
{level: TensorShape} from a backbone.
model_config: A OneOfConfig. Model config.
l2_regularizer: A `tf.keras.regularizers.Regularizer` instance. Default to
None.
Returns:
A `tf.keras.Model` instance of the UNet3D decoder.
"""
decoder_type = model_config.decoder.type
decoder_cfg = model_config.decoder.get()
assert decoder_type == 'unet_3d_decoder', (f'Inconsistent decoder type '
f'{decoder_type}')
norm_activation_config = model_config.norm_activation
return UNet3DDecoder(
model_id=decoder_cfg.model_id,
input_specs=input_specs,
pool_size=decoder_cfg.pool_size,
kernel_regularizer=l2_regularizer,
activation=norm_activation_config.activation,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
use_sync_bn=norm_activation_config.use_sync_bn,
use_batch_normalization=decoder_cfg.use_batch_normalization,
use_deconvolution=decoder_cfg.use_deconvolution)
| 7,760 | 39.005155 | 121 | py |
models | models-master/official/projects/volumetric_models/modeling/backbones/unet_3d_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for 3D UNet backbone."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.projects.volumetric_models.modeling.backbones import unet_3d
class UNet3DTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
([128, 64], 4),
([256, 128], 6),
)
def test_network_creation(self, input_size, model_id):
"""Test creation of UNet3D family models."""
tf.keras.backend.set_image_data_format('channels_last')
network = unet_3d.UNet3D(model_id=model_id)
inputs = tf.keras.Input(
shape=(input_size[0], input_size[0], input_size[1], 3), batch_size=1)
endpoints = network(inputs)
for layer_depth in range(model_id):
self.assertAllEqual([
1, input_size[0] / 2**layer_depth, input_size[0] / 2**layer_depth,
input_size[1] / 2**layer_depth, 64 * 2**layer_depth
], endpoints[str(layer_depth + 1)].shape.as_list())
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
model_id=4,
pool_size=(2, 2, 2),
kernel_size=(3, 3, 3),
activation='relu',
base_filters=32,
kernel_regularizer=None,
norm_momentum=0.99,
norm_epsilon=0.001,
use_sync_bn=False,
use_batch_normalization=True)
network = unet_3d.UNet3D(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = unet_3d.UNet3D.from_config(network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,530 | 33.202703 | 79 | py |
models | models-master/official/projects/volumetric_models/modeling/backbones/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Backbones package definition."""
from official.projects.volumetric_models.modeling.backbones.unet_3d import UNet3D
| 728 | 39.5 | 81 | py |
models | models-master/official/projects/volumetric_models/modeling/backbones/unet_3d.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of 3D UNet Model encoder part.
[1] Özgün Çiçek, Ahmed Abdulkadir, Soeren S. Lienkamp, Thomas Brox, Olaf
Ronneberger. 3D U-Net: Learning Dense Volumetric Segmentation from Sparse
Annotation. arXiv:1606.06650.
"""
from typing import Any, Mapping, Sequence
# Import libraries
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.volumetric_models.modeling import nn_blocks_3d
from official.vision.modeling.backbones import factory
layers = tf.keras.layers
@tf.keras.utils.register_keras_serializable(package='Vision')
class UNet3D(tf.keras.Model):
"""Class to build 3D UNet backbone."""
def __init__(
self,
model_id: int,
input_specs: layers = layers.InputSpec(shape=[None, None, None, None, 3]),
pool_size: Sequence[int] = (2, 2, 2),
kernel_size: Sequence[int] = (3, 3, 3),
base_filters: int = 32,
kernel_regularizer: tf.keras.regularizers.Regularizer = None,
activation: str = 'relu',
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
use_sync_bn: bool = False,
use_batch_normalization: bool = False, # type: ignore # typed-keras
**kwargs):
"""3D UNet backbone initialization function.
Args:
model_id: The depth of UNet3D backbone model. The greater the depth, the
more max pooling layers will be added to the model. Lowering the depth
may reduce the amount of memory required for training.
input_specs: The specs of the input tensor. It specifies a 5D input of
[batch, height, width, volume, channel] for `channel_last` data format
or [batch, channel, height, width, volume] for `channel_first` data
format.
pool_size: The pooling size for the max pooling operations.
kernel_size: The kernel size for 3D convolution.
base_filters: The number of filters that the first layer in the
convolution network will have. Following layers will contain a multiple
of this number. Lowering this number will likely reduce the amount of
memory required to train the model.
kernel_regularizer: A tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
activation: The name of the activation function.
norm_momentum: The normalization momentum for the moving average.
norm_epsilon: A float added to variance to avoid dividing by zero.
use_sync_bn: If True, use synchronized batch normalization.
use_batch_normalization: If set to True, use batch normalization after
convolution and before activation. Default to False.
**kwargs: Keyword arguments to be passed.
"""
self._model_id = model_id
self._input_specs = input_specs
self._pool_size = pool_size
self._kernel_size = kernel_size
self._activation = activation
self._base_filters = base_filters
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._use_sync_bn = use_sync_bn
if use_sync_bn:
self._norm = layers.experimental.SyncBatchNormalization
else:
self._norm = layers.BatchNormalization
self._kernel_regularizer = kernel_regularizer
self._use_batch_normalization = use_batch_normalization
# Build 3D UNet.
inputs = tf.keras.Input(
shape=input_specs.shape[1:], dtype=input_specs.dtype)
x = inputs
endpoints = {}
# Add levels with max pooling to downsample input.
for layer_depth in range(model_id):
# Two convoluions are applied sequentially without downsampling.
filter_num = base_filters * (2**layer_depth)
x2 = nn_blocks_3d.BasicBlock3DVolume(
filters=[filter_num, filter_num * 2],
strides=(1, 1, 1),
kernel_size=self._kernel_size,
kernel_regularizer=self._kernel_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
use_batch_normalization=self._use_batch_normalization)(
x)
if layer_depth < model_id - 1:
x = layers.MaxPool3D(
pool_size=pool_size,
strides=(2, 2, 2),
padding='valid',
data_format=tf.keras.backend.image_data_format())(
x2)
else:
x = x2
endpoints[str(layer_depth + 1)] = x2
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super(UNet3D, self).__init__(inputs=inputs, outputs=endpoints, **kwargs)
def get_config(self) -> Mapping[str, Any]:
return {
'model_id': self._model_id,
'pool_size': self._pool_size,
'kernel_size': self._kernel_size,
'activation': self._activation,
'base_filters': self._base_filters,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'use_sync_bn': self._use_sync_bn,
'kernel_regularizer': self._kernel_regularizer,
'use_batch_normalization': self._use_batch_normalization
}
@classmethod
def from_config(cls, config: Mapping[str, Any], custom_objects=None):
return cls(**config)
@property
def output_specs(self) -> Mapping[str, tf.TensorShape]:
"""Returns a dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_backbone_builder('unet_3d')
def build_unet3d(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds 3D UNet backbone from a config."""
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert backbone_type == 'unet_3d', (f'Inconsistent backbone type '
f'{backbone_type}')
return UNet3D(
model_id=backbone_cfg.model_id,
input_specs=input_specs,
pool_size=backbone_cfg.pool_size,
base_filters=backbone_cfg.base_filters,
kernel_regularizer=l2_regularizer,
activation=norm_activation_config.activation,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
use_sync_bn=norm_activation_config.use_sync_bn,
use_batch_normalization=backbone_cfg.use_batch_normalization)
| 7,065 | 38.920904 | 139 | py |
models | models-master/official/projects/volumetric_models/modeling/heads/segmentation_heads_3d.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Segmentation heads."""
from typing import Any, Union, Sequence, Mapping, Tuple
import tensorflow as tf
from official.modeling import tf_utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class SegmentationHead3D(tf.keras.layers.Layer):
"""Segmentation head for 3D input."""
def __init__(self,
num_classes: int,
level: Union[int, str],
num_convs: int = 2,
num_filters: int = 256,
upsample_factor: int = 1,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
use_batch_normalization: bool = False,
kernel_regularizer: tf.keras.regularizers.Regularizer = None,
bias_regularizer: tf.keras.regularizers.Regularizer = None,
output_logits: bool = True, # pytype: disable=annotation-type-mismatch # typed-keras
**kwargs):
"""Initialize params to build segmentation head.
Args:
num_classes: `int` number of mask classification categories. The number of
classes does not include background class.
level: `int` or `str`, level to use to build segmentation head.
num_convs: `int` number of stacked convolution before the last prediction
layer.
num_filters: `int` number to specify the number of filters used. Default
is 256.
upsample_factor: `int` number to specify the upsampling factor to generate
finer mask. Default 1 means no upsampling is applied.
activation: `string`, indicating which activation is used, e.g. 'relu',
'swish', etc.
use_sync_bn: `bool`, whether to use synchronized batch normalization
across different replicas.
norm_momentum: `float`, the momentum parameter of the normalization
layers.
norm_epsilon: `float`, the epsilon parameter of the normalization layers.
use_batch_normalization: A bool of whether to use batch normalization or
not.
kernel_regularizer: `tf.keras.regularizers.Regularizer` object for layer
kernel.
bias_regularizer: `tf.keras.regularizers.Regularizer` object for bias.
output_logits: A `bool` of whether to output logits or not. Default
is True. If set to False, output softmax.
**kwargs: other keyword arguments passed to Layer.
"""
super(SegmentationHead3D, self).__init__(**kwargs)
self._config_dict = {
'num_classes': num_classes,
'level': level,
'num_convs': num_convs,
'num_filters': num_filters,
'upsample_factor': upsample_factor,
'activation': activation,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'use_batch_normalization': use_batch_normalization,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
'output_logits': output_logits
}
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation = tf_utils.get_activation(activation, use_keras_layer=True)
def build(self, input_shape: Union[tf.TensorShape, Sequence[tf.TensorShape]]):
"""Creates the variables of the segmentation head."""
conv_op = tf.keras.layers.Conv3D
conv_kwargs = {
'kernel_size': (3, 3, 3),
'padding': 'same',
'use_bias': False,
'kernel_initializer': tf.keras.initializers.RandomNormal(stddev=0.01),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
}
final_kernel_size = (1, 1, 1)
bn_op = (
tf.keras.layers.experimental.SyncBatchNormalization
if self._config_dict['use_sync_bn'] else
tf.keras.layers.BatchNormalization)
bn_kwargs = {
'axis': self._bn_axis,
'momentum': self._config_dict['norm_momentum'],
'epsilon': self._config_dict['norm_epsilon'],
}
# Segmentation head layers.
self._convs = []
self._norms = []
for i in range(self._config_dict['num_convs']):
conv_name = 'segmentation_head_conv_{}'.format(i)
self._convs.append(
conv_op(
name=conv_name,
filters=self._config_dict['num_filters'],
**conv_kwargs))
norm_name = 'segmentation_head_norm_{}'.format(i)
if self._config_dict['use_batch_normalization']:
self._norms.append(bn_op(name=norm_name, **bn_kwargs))
self._classifier = conv_op(
name='segmentation_output',
filters=self._config_dict['num_classes'],
kernel_size=final_kernel_size,
padding='valid',
activation=None,
bias_initializer=tf.zeros_initializer(),
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'])
super(SegmentationHead3D, self).build(input_shape)
def call(self, inputs: Tuple[Union[tf.Tensor, Mapping[str, tf.Tensor]],
Union[tf.Tensor, Mapping[str, tf.Tensor]]]):
"""Forward pass of the segmentation head.
It supports both a tuple of 2 tensors or 2 dictionaries. The first is
backbone endpoints, and the second is decoder endpoints. When inputs are
tensors, they are from a single level of feature maps. When inputs are
dictionaries, they contain multiple levels of feature maps, where the key
is the index of feature map.
Args:
inputs: A tuple of 2 feature map tensors of shape
[batch, height_l, width_l, channels] or 2 dictionaries of tensors:
- key: A `str` of the level of the multilevel features.
- values: A `tf.Tensor` of the feature map tensors, whose shape is
[batch, height_l, width_l, channels].
The first is backbone endpoints, and the second is decoder endpoints.
Returns:
segmentation prediction mask: A `tf.Tensor` of the segmentation mask
scores predicted from input features.
"""
decoder_output = inputs[1]
x = decoder_output[str(self._config_dict['level'])] if isinstance(
decoder_output, dict) else decoder_output
for i, conv in enumerate(self._convs):
x = conv(x)
if self._norms:
x = self._norms[i](x)
x = self._activation(x)
x = tf.keras.layers.UpSampling3D(size=self._config_dict['upsample_factor'])(
x)
x = self._classifier(x)
return x if self._config_dict['output_logits'] else tf.keras.layers.Softmax(
dtype='float32')(
x)
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config: Mapping[str, Any]):
return cls(**config)
| 7,501 | 39.333333 | 101 | py |
models | models-master/official/projects/volumetric_models/modeling/heads/segmentation_heads_3d_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for segmentation_heads.py."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.volumetric_models.modeling.heads import segmentation_heads_3d
class SegmentationHead3DTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(1, 0, True),
(2, 1, False),
)
def test_forward(self, level, num_convs, use_bn):
head = segmentation_heads_3d.SegmentationHead3D(
num_classes=10,
level=level,
num_convs=num_convs,
use_batch_normalization=use_bn)
backbone_features = {
'1': np.random.rand(2, 128, 128, 128, 16),
'2': np.random.rand(2, 64, 64, 64, 16),
}
decoder_features = {
'1': np.random.rand(2, 128, 128, 128, 16),
'2': np.random.rand(2, 64, 64, 64, 16),
}
logits = head((backbone_features, decoder_features))
if str(level) in decoder_features:
self.assertAllEqual(logits.numpy().shape, [
2, decoder_features[str(level)].shape[1],
decoder_features[str(level)].shape[2],
decoder_features[str(level)].shape[3], 10
])
def test_serialize_deserialize(self):
head = segmentation_heads_3d.SegmentationHead3D(num_classes=10, level=3)
config = head.get_config()
new_head = segmentation_heads_3d.SegmentationHead3D.from_config(config)
self.assertAllEqual(head.get_config(), new_head.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,092 | 32.758065 | 84 | py |
models | models-master/official/projects/volumetric_models/tasks/semantic_segmentation_3d.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image segmentation task definition."""
from typing import Any, Dict, Mapping, Optional, Sequence, Union
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import base_task
from official.core import input_reader
from official.core import task_factory
from official.projects.volumetric_models.configs import semantic_segmentation_3d as exp_cfg
from official.projects.volumetric_models.dataloaders import segmentation_input_3d
from official.projects.volumetric_models.evaluation import segmentation_metrics
from official.projects.volumetric_models.losses import segmentation_losses
from official.projects.volumetric_models.modeling import factory
@task_factory.register_task_cls(exp_cfg.SemanticSegmentation3DTask)
class SemanticSegmentation3DTask(base_task.Task):
"""A task for semantic segmentation."""
def build_model(self) -> tf.keras.Model:
"""Builds segmentation model."""
input_specs = tf.keras.layers.InputSpec(
shape=[None] + self.task_config.model.input_size +
[self.task_config.model.num_channels],
dtype=self.task_config.train_data.dtype)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (
tf.keras.regularizers.l2(l2_weight_decay /
2.0) if l2_weight_decay else None)
model = factory.build_segmentation_model_3d(
input_specs=input_specs,
model_config=self.task_config.model,
l2_regularizer=l2_regularizer)
# Create a dummy input and call model instance to initialize the model. This
# is needed when launching multiple experiments using the same model
# directory. Since there is already a trained model, forward pass will not
# run and the model will never be built. This is only done when spatial
# partitioning is not enabled; otherwise it will fail with OOM due to
# extremely large input.
if (not self.task_config.train_input_partition_dims) and (
not self.task_config.eval_input_partition_dims):
dummy_input = tf.random.uniform(shape=[1] + list(input_specs.shape[1:]))
_ = model(dummy_input)
return model
def initialize(self, model: tf.keras.Model):
"""Loads pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if 'all' in self.task_config.init_checkpoint_modules:
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
ckpt_items = {}
if 'backbone' in self.task_config.init_checkpoint_modules:
ckpt_items.update(backbone=model.backbone)
if 'decoder' in self.task_config.init_checkpoint_modules:
ckpt_items.update(decoder=model.decoder)
ckpt = tf.train.Checkpoint(**ckpt_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(self, params, input_context=None) -> tf.data.Dataset:
"""Builds classification input."""
decoder = segmentation_input_3d.Decoder(
image_field_key=params.image_field_key,
label_field_key=params.label_field_key)
parser = segmentation_input_3d.Parser(
input_size=params.input_size,
num_classes=params.num_classes,
num_channels=params.num_channels,
image_field_key=params.image_field_key,
label_field_key=params.label_field_key,
dtype=params.dtype,
label_dtype=params.label_dtype)
reader = input_reader.InputReader(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self,
labels: tf.Tensor,
model_outputs: tf.Tensor,
aux_losses=None) -> tf.Tensor:
"""Segmentation loss.
Args:
labels: labels.
model_outputs: Output logits of the classifier.
aux_losses: auxiliarly loss tensors, i.e. `losses` in keras.Model.
Returns:
The total loss tensor.
"""
segmentation_loss_fn = segmentation_losses.SegmentationLossDiceScore(
metric_type='adaptive')
total_loss = segmentation_loss_fn(model_outputs, labels)
if aux_losses:
total_loss += tf.add_n(aux_losses)
return total_loss
def build_metrics(self,
training: bool = True) -> Sequence[tf.keras.metrics.Metric]:
"""Gets streaming metrics for training/validation."""
metrics = []
num_classes = self.task_config.model.num_classes
if training:
metrics.extend([
tf.keras.metrics.CategoricalAccuracy(
name='train_categorical_accuracy', dtype=tf.float32)
])
else:
self.metrics = [
segmentation_metrics.DiceScore(
num_classes=num_classes,
metric_type='generalized',
per_class_metric=self.task_config.evaluation
.report_per_class_metric,
name='val_generalized_dice',
dtype=tf.float32)
]
return metrics
def train_step(
self,
inputs,
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[Sequence[tf.keras.metrics.Metric]] = None
) -> Dict[Any, Any]:
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
input_partition_dims = self.task_config.train_input_partition_dims
if input_partition_dims:
strategy = tf.distribute.get_strategy()
features = strategy.experimental_split_to_logical_devices(
features, input_partition_dims)
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(features, training=True)
# Casting output layer as float32 is necessary when mixed_precision is
# mixed_float16 or mixed_bfloat16 to ensure output is casted as float32.
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
outputs = outputs['logits']
if self.task_config.model.head.output_logits:
outputs = tf.nn.softmax(outputs)
# Computes per-replica loss.
loss = self.build_losses(
labels=labels, model_outputs=outputs, aux_losses=model.losses)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient before apply_gradients when LossScaleOptimizer is
# used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
# Compute all metrics within strategy scope for training.
if metrics:
labels = tf.cast(labels, tf.float32)
outputs = tf.cast(outputs, tf.float32)
self.process_metrics(metrics, labels, outputs)
logs.update({m.name: m.result() for m in metrics})
return logs
def validation_step(
self,
inputs,
model: tf.keras.Model,
metrics: Optional[Sequence[tf.keras.metrics.Metric]] = None
) -> Dict[Any, Any]:
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
input_partition_dims = self.task_config.eval_input_partition_dims
if input_partition_dims:
strategy = tf.distribute.get_strategy()
features = strategy.experimental_split_to_logical_devices(
features, input_partition_dims)
outputs = self.inference_step(features, model)
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
outputs = outputs['logits']
if self.task_config.model.head.output_logits:
outputs = tf.nn.softmax(outputs)
loss = self.build_losses(
model_outputs=outputs, labels=labels, aux_losses=model.losses)
logs = {self.loss: loss}
# Compute dice score metrics on CPU.
for metric in self.metrics:
labels = tf.cast(labels, tf.float32)
logits = tf.cast(outputs, tf.float32)
logs.update({metric.name: (labels, logits)})
return logs
def inference_step(self, inputs, model: tf.keras.Model) -> tf.Tensor:
"""Performs the forward step."""
return model(inputs, training=False)
def aggregate_logs(
self,
state: Optional[Sequence[Union[segmentation_metrics.DiceScore,
tf.keras.metrics.Metric]]] = None,
step_outputs: Optional[Mapping[str, Any]] = None
) -> Sequence[tf.keras.metrics.Metric]:
"""Aggregates statistics to compute metrics over training.
Args:
state: A sequence of tf.keras.metrics.Metric objects. Each element records
a metric.
step_outputs: A dictionary of [metric_name, (labels, output)] from a step.
Returns:
An updated sequence of tf.keras.metrics.Metric objects.
"""
if state is None:
for metric in self.metrics:
metric.reset_states()
state = self.metrics
for metric in self.metrics:
labels = step_outputs[metric.name][0]
predictions = step_outputs[metric.name][1]
# If `step_output` is distributed, it contains a tuple of Tensors instead
# of a single Tensor, so we need to concatenate them along the batch
# dimension in this case to have a single Tensor.
if isinstance(labels, tuple):
labels = tf.concat(list(labels), axis=0)
if isinstance(predictions, tuple):
predictions = tf.concat(list(predictions), axis=0)
labels = tf.cast(labels, tf.float32)
predictions = tf.cast(predictions, tf.float32)
metric.update_state(labels, predictions)
return state
def reduce_aggregated_logs(
self,
aggregated_logs: Optional[Mapping[str, Any]] = None,
global_step: Optional[tf.Tensor] = None) -> Mapping[str, float]:
"""Reduces logs to obtain per-class metrics if needed.
Args:
aggregated_logs: An optional dictionary containing aggregated logs.
global_step: An optional `tf.Tensor` of current global training steps.
Returns:
The reduced logs containing per-class metrics and overall metrics.
Raises:
ValueError: If `self.metrics` does not contain exactly 1 metric object.
"""
result = {}
if len(self.metrics) != 1:
raise ValueError('Exact one metric must be present, but {0} are '
'present.'.format(len(self.metrics)))
metric = self.metrics[0].result().numpy()
if self.task_config.evaluation.report_per_class_metric:
for i, metric_val in enumerate(metric):
metric_name = self.metrics[0].name + '/class_{0}'.format(
i - 1) if i > 0 else self.metrics[0].name
result.update({metric_name: metric_val})
else:
result.update({self.metrics[0].name: metric})
return result
| 12,897 | 35.851429 | 91 | py |
models | models-master/official/projects/volumetric_models/tasks/semantic_segmentation_3d_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for semantic segmentation task."""
# pylint: disable=unused-import
import functools
import os
from absl.testing import parameterized
import orbit
import tensorflow as tf
from official.common import registry_imports # pylint: disable=unused-import
from official.core import exp_factory
from official.modeling import optimization
from official.projects.volumetric_models.evaluation import segmentation_metrics
from official.projects.volumetric_models.modeling import backbones
from official.projects.volumetric_models.modeling import decoders
from official.projects.volumetric_models.tasks import semantic_segmentation_3d as img_seg_task
from official.vision.dataloaders import tfexample_utils
class SemanticSegmentationTaskTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
data_dir = os.path.join(self.get_temp_dir(), 'data')
tf.io.gfile.makedirs(data_dir)
self._data_path = os.path.join(data_dir, 'data.tfrecord')
# pylint: disable=g-complex-comprehension
examples = [
tfexample_utils.create_3d_image_test_example(
image_height=32, image_width=32, image_volume=32, image_channel=2)
for _ in range(20)
]
# pylint: enable=g-complex-comprehension
tfexample_utils.dump_to_tfrecord(self._data_path, tf_examples=examples)
@parameterized.parameters(('seg_unet3d_test',))
def test_task(self, config_name):
config = exp_factory.get_exp_config(config_name)
config.task.train_data.input_path = self._data_path
config.task.train_data.global_batch_size = 4
config.task.train_data.shuffle_buffer_size = 4
config.task.validation_data.input_path = self._data_path
config.task.validation_data.shuffle_buffer_size = 4
config.task.evaluation.report_per_class_metric = True
task = img_seg_task.SemanticSegmentation3DTask(config.task)
model = task.build_model()
metrics = task.build_metrics()
strategy = tf.distribute.get_strategy()
dataset = orbit.utils.make_distributed_dataset(strategy, task.build_inputs,
config.task.train_data)
iterator = iter(dataset)
opt_factory = optimization.OptimizerFactory(config.trainer.optimizer_config)
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
logs = task.train_step(next(iterator), model, optimizer, metrics=metrics)
# Check if training loss is produced.
self.assertIn('loss', logs)
# Obtain distributed outputs.
distributed_outputs = strategy.run(
functools.partial(
task.validation_step,
model=model,
metrics=task.build_metrics(training=False)),
args=(next(iterator),))
outputs = tf.nest.map_structure(strategy.experimental_local_results,
distributed_outputs)
# Check if validation loss is produced.
self.assertIn('loss', outputs)
# Check if state is updated.
state = task.aggregate_logs(state=None, step_outputs=outputs)
self.assertLen(state, 1)
self.assertIsInstance(state[0], segmentation_metrics.DiceScore)
# Check if all metrics are produced.
result = task.reduce_aggregated_logs(aggregated_logs={}, global_step=1)
self.assertIn('val_generalized_dice', result)
self.assertIn('val_generalized_dice/class_0', result)
self.assertIn('val_generalized_dice/class_1', result)
if __name__ == '__main__':
tf.test.main()
| 4,069 | 38.514563 | 94 | py |
models | models-master/official/projects/volumetric_models/losses/segmentation_losses_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for segmentation_losses.py."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.volumetric_models.losses import segmentation_losses
class SegmentationLossDiceScoreTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters((None, 0.5, 0.3), ('generalized', 0.5, 0.3),
('adaptive', 0.5, 0.07))
def test_supported_loss(self, metric_type, output, expected_score):
loss = segmentation_losses.SegmentationLossDiceScore(
metric_type=metric_type)
logits = tf.constant(output, shape=[2, 128, 128, 128, 1], dtype=tf.float32)
labels = tf.ones(shape=[2, 128, 128, 128, 1], dtype=tf.float32)
actual_score = loss(logits=logits, labels=labels)
self.assertAlmostEqual(actual_score.numpy(), expected_score, places=1)
@parameterized.parameters((None, 0, 0), ('generalized', 0, 0),
('adaptive', 0, 0))
def test_supported_loss_zero_labels_logits(self, metric_type, output,
expected_score):
loss = segmentation_losses.SegmentationLossDiceScore(
metric_type=metric_type)
logits = tf.constant(output, shape=[2, 128, 128, 128, 1], dtype=tf.float32)
labels = tf.zeros(shape=[2, 128, 128, 128, 1], dtype=tf.float32)
actual_score = loss(logits=logits, labels=labels)
self.assertAlmostEqual(actual_score.numpy(), expected_score, places=1)
if __name__ == '__main__':
tf.test.main()
| 2,098 | 41.836735 | 79 | py |
models | models-master/official/projects/volumetric_models/losses/segmentation_losses.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses used for segmentation models."""
from typing import Optional, Sequence
import tensorflow as tf
class SegmentationLossDiceScore(object):
"""Semantic segmentation loss using generalized dice score.
Dice score (DSC) is a similarity measure that equals twice the number of
elements common to both sets divided by the sum of the number of elements
in each set. It is commonly used to evaluate segmentation performance to
measure the overlap of predicted and groundtruth regions.
(https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient)
Generalized dice score is the dice score weighted by the volume of groundtruth
labels per class. Adaptive dice score adds weights to generalized dice score.
It assigns larger weights to lower dice score, so that wrong predictions
contribute more to the total loss. Model will then be trained to focus more on
these hard examples.
"""
def __init__(self,
metric_type: Optional[str] = None,
axis: Optional[Sequence[int]] = (1, 2, 3)):
"""Initializes dice score loss object.
Args:
metric_type: An optional `str` specifying the type of the dice score to
compute. Compute generalized or adaptive dice score if metric type is
`generalized` or `adaptive`; otherwise compute original dice score.
axis: An optional sequence of `int` specifying the axis to perform reduce
ops for raw dice score.
"""
self._dice_score = 0
self._metric_type = metric_type
self._axis = axis
def __call__(self, logits: tf.Tensor, labels: tf.Tensor) -> tf.Tensor:
"""Computes and returns a loss based on 1 - dice score.
Args:
logits: A Tensor of the prediction.
labels: A Tensor of the groundtruth label.
Returns:
The loss value of (1 - dice score).
"""
labels = tf.cast(labels, logits.dtype)
if labels.get_shape().ndims < 2 or logits.get_shape().ndims < 2:
raise ValueError('The labels and logits must be at least rank 2.')
epsilon = tf.keras.backend.epsilon()
keep_label_axis = list(range(len(logits.shape) - 1))
keep_batch_axis = list(range(1, len(logits.shape)))
# Compute sample mask to filter out samples with both all-0's labels and
# predictions because such samples should not contribute to mean dice score
# in this batch.
sample_mask = tf.logical_or(
tf.cast(tf.reduce_sum(labels, axis=keep_batch_axis), dtype=tf.bool),
tf.cast(tf.reduce_sum(logits, axis=keep_batch_axis), dtype=tf.bool))
labels = tf.boolean_mask(labels, sample_mask)
logits = tf.boolean_mask(logits, sample_mask)
# If all samples are filtered out, return 0 as the loss so this batch does
# not contribute.
if labels.shape[0] == 0:
return tf.convert_to_tensor(0.0)
# Calculate intersections and unions per class.
intersection = tf.reduce_sum(labels * logits, axis=keep_label_axis)
union = tf.reduce_sum(labels + logits, axis=keep_label_axis)
if self._metric_type == 'generalized':
# Calculate the volume of groundtruth labels.
w = tf.math.reciprocal(
tf.square(tf.reduce_sum(labels, axis=keep_label_axis)) + epsilon)
# Calculate the weighted dice score and normalizer.
dice = 2 * tf.reduce_sum(w * intersection)
normalizer = tf.reduce_sum(w * union)
if normalizer == 0:
return tf.convert_to_tensor(1.0)
dice = tf.cast(dice, dtype=tf.float32)
normalizer = tf.cast(normalizer, dtype=tf.float32)
return 1 - tf.reduce_mean(dice / normalizer)
elif self._metric_type == 'adaptive':
dice = 2.0 * intersection / (union + epsilon)
# Calculate weights based on Dice scores.
weights = tf.exp(-1.0 * dice)
# Multiply weights by corresponding scores and get sum.
weighted_dice = tf.reduce_sum(weights * dice)
# Calculate normalization factor.
normalizer = tf.cast(tf.size(input=dice), dtype=tf.float32) * tf.exp(-1.0)
if normalizer == 0:
return tf.convert_to_tensor(1.0)
weighted_dice = tf.cast(weighted_dice, dtype=tf.float32)
return 1 - tf.reduce_mean(weighted_dice / normalizer)
else:
summation = tf.reduce_sum(
labels, axis=self._axis) + tf.reduce_sum(
logits, axis=self._axis)
dice = (2 * tf.reduce_sum(labels * logits, axis=self._axis)) / (
summation + epsilon)
return 1 - tf.reduce_mean(dice)
| 5,068 | 39.552 | 80 | py |
models | models-master/official/projects/triviaqa/evaluate.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evalutes TriviaQA predictions."""
import json
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from official.projects.triviaqa import evaluation
flags.DEFINE_string('gold_path', None,
'Path to golden validation, i.e. wikipedia-dev.json.')
flags.DEFINE_string('predictions_path', None,
'Path to predictions in JSON format')
FLAGS = flags.FLAGS
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
with tf.io.gfile.GFile(FLAGS.gold_path) as f:
ground_truth = {
datum['QuestionId']: datum['Answer'] for datum in json.load(f)['Data']
}
with tf.io.gfile.GFile(FLAGS.predictions_path) as f:
predictions = json.load(f)
logging.info(evaluation.evaluate_triviaqa(ground_truth, predictions))
if __name__ == '__main__':
flags.mark_flag_as_required('predictions_path')
app.run(main)
| 1,550 | 30.653061 | 78 | py |
models | models-master/official/projects/triviaqa/prediction.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for inference."""
import tensorflow as tf
def split_and_pad(strategy, batch_size, x):
"""Split and pad for interence."""
per_replica_size = batch_size // strategy.num_replicas_in_sync
def slice_fn(x, i):
begin = min(x.shape[0], i * per_replica_size)
end = min(x.shape[0], (i + 1) * per_replica_size)
indices = tf.range(begin, end, dtype=tf.int32)
return tf.gather(x, tf.pad(indices, [[0, per_replica_size - end + begin]]))
# pylint: disable=g-long-lambda
return tf.nest.map_structure(
lambda x: strategy.experimental_distribute_values_from_function(
lambda ctx: slice_fn(x, ctx.replica_id_in_sync_group)), x)
# pylint: enable=g-long-lambda
def decode_logits(top_k, max_size, logits, default):
"""Get the span from logits."""
logits = tf.transpose(logits, [0, 2, 1])
values, indices = tf.math.top_k(logits, top_k)
width = (
tf.expand_dims(indices[:, 1, :], -2) -
tf.expand_dims(indices[:, 0, :], -1))
mask = tf.logical_and(width >= 0, width <= max_size)
scores = (
tf.expand_dims(values[:, 0, :], -1) + tf.expand_dims(values[:, 1, :], -2))
scores = tf.where(mask, scores, -1e8)
flat_indices = tf.argmax(tf.reshape(scores, (-1, top_k * top_k)), -1)
begin = tf.gather(
indices[:, 0, :], tf.math.floordiv(flat_indices, top_k), batch_dims=1)
end = tf.gather(
indices[:, 1, :], tf.math.mod(flat_indices, top_k), batch_dims=1)
reduced_mask = tf.math.reduce_any(mask, [-1, -2])
return (tf.where(reduced_mask, begin,
default), tf.where(reduced_mask, end, default),
tf.math.reduce_max(scores, [-1, -2]))
@tf.function
def decode_answer(context, begin, end, token_offsets, end_limit):
i = tf.gather(token_offsets, begin, batch_dims=1)
j = tf.gather(token_offsets, tf.minimum(end + 1, end_limit), batch_dims=1)
j = tf.where(end == end_limit, tf.cast(tf.strings.length(context), tf.int64),
j)
return tf.strings.substr(context, i, j - i)
def distributed_logits_fn(model, x):
return model.distribute_strategy.run(
lambda x: model(x, training=False), args=(x,))
| 2,735 | 38.085714 | 80 | py |
models | models-master/official/projects/triviaqa/modeling.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modeling for TriviaQA."""
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.configs import encoders
class TriviaQaHead(tf.keras.layers.Layer):
"""Computes logits given token and global embeddings."""
def __init__(self,
intermediate_size,
intermediate_activation=tf_utils.get_activation('gelu'),
dropout_rate=0.0,
attention_dropout_rate=0.0,
**kwargs):
super(TriviaQaHead, self).__init__(**kwargs)
self._attention_dropout = tf.keras.layers.Dropout(attention_dropout_rate)
self._intermediate_dense = tf.keras.layers.Dense(intermediate_size)
self._intermediate_activation = tf.keras.layers.Activation(
intermediate_activation)
self._output_dropout = tf.keras.layers.Dropout(dropout_rate)
self._output_layer_norm = tf.keras.layers.LayerNormalization()
self._logits_dense = tf.keras.layers.Dense(2)
def build(self, input_shape):
output_shape = input_shape['token_embeddings'][-1]
self._output_dense = tf.keras.layers.Dense(output_shape)
super(TriviaQaHead, self).build(input_shape)
def call(self, inputs, training=None):
token_embeddings = inputs['token_embeddings']
token_ids = inputs['token_ids']
question_lengths = inputs['question_lengths']
x = self._attention_dropout(token_embeddings, training=training)
intermediate_outputs = self._intermediate_dense(x)
intermediate_outputs = self._intermediate_activation(intermediate_outputs)
outputs = self._output_dense(intermediate_outputs)
outputs = self._output_dropout(outputs, training=training)
outputs = self._output_layer_norm(outputs + token_embeddings)
logits = self._logits_dense(outputs)
logits -= tf.expand_dims(
tf.cast(tf.equal(token_ids, 0), tf.float32) + tf.sequence_mask(
question_lengths, logits.shape[-2], dtype=tf.float32), -1) * 1e6
return logits
class TriviaQaModel(tf.keras.Model):
"""Model for TriviaQA."""
def __init__(self, model_config: encoders.EncoderConfig, sequence_length: int,
**kwargs):
inputs = dict(
token_ids=tf.keras.Input((sequence_length,), dtype=tf.int32),
question_lengths=tf.keras.Input((), dtype=tf.int32))
encoder = encoders.build_encoder(model_config)
x = encoder(
dict(
input_word_ids=inputs['token_ids'],
input_mask=tf.cast(inputs['token_ids'] > 0, tf.int32),
input_type_ids=1 -
tf.sequence_mask(inputs['question_lengths'], sequence_length,
tf.int32)))['sequence_output']
logits = TriviaQaHead(
model_config.get().intermediate_size,
dropout_rate=model_config.get().dropout_rate,
attention_dropout_rate=model_config.get().attention_dropout_rate)(
dict(
token_embeddings=x,
token_ids=inputs['token_ids'],
question_lengths=inputs['question_lengths']))
super(TriviaQaModel, self).__init__(inputs, logits, **kwargs)
self._encoder = encoder
@property
def encoder(self):
return self._encoder
class SpanOrCrossEntropyLoss(tf.keras.losses.Loss):
"""Cross entropy loss for multiple correct answers.
See https://arxiv.org/abs/1710.10723.
"""
def call(self, y_true, y_pred):
y_pred_masked = y_pred - tf.cast(y_true < 0.5, tf.float32) * 1e6
or_cross_entropy = (
tf.math.reduce_logsumexp(y_pred, axis=-2) -
tf.math.reduce_logsumexp(y_pred_masked, axis=-2))
return tf.math.reduce_sum(or_cross_entropy, -1)
def smooth_labels(label_smoothing, labels, question_lengths, token_ids):
mask = 1. - (
tf.cast(tf.equal(token_ids, 0), tf.float32) +
tf.sequence_mask(question_lengths, labels.shape[-2], dtype=tf.float32))
num_classes = tf.expand_dims(tf.math.reduce_sum(mask, -1, keepdims=True), -1)
labels = (1. - label_smoothing) * labels + (label_smoothing / num_classes)
return labels * tf.expand_dims(mask, -1)
| 4,629 | 39.26087 | 80 | py |
models | models-master/official/projects/triviaqa/dataset.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TriviaQA: A Reading Comprehension Dataset."""
import functools
import json
import os
from absl import logging
import apache_beam as beam
import six
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
from official.projects.triviaqa import preprocess
_CITATION = """
@article{2017arXivtriviaqa,
author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},
Daniel and {Zettlemoyer}, Luke},
title = "{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}",
journal = {arXiv e-prints},
year = 2017,
eid = {arXiv:1705.03551},
pages = {arXiv:1705.03551},
archivePrefix = {arXiv},
eprint = {1705.03551},
}
"""
_DOWNLOAD_URL_TMPL = (
"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-{}.tar.gz")
_TRAIN_FILE_FORMAT = "*-train.json"
_VALIDATION_FILE_FORMAT = "*-dev.json"
_TEST_FILE_FORMAT = "*test-without-answers.json"
_WEB_EVIDENCE_DIR = "evidence/web"
_WIKI_EVIDENCE_DIR = "evidence/wikipedia"
_DESCRIPTION = """\
TriviaqQA is a reading comprehension dataset containing over 650K
question-answer-evidence triples. TriviaqQA includes 95K question-answer
pairs authored by trivia enthusiasts and independently gathered evidence
documents, six per question on average, that provide high quality distant
supervision for answering the questions.
"""
_RC_DESCRIPTION = """\
Question-answer pairs where all documents for a given question contain the
answer string(s).
"""
_UNFILTERED_DESCRIPTION = """\
110k question-answer pairs for open domain QA where not all documents for a
given question contain the answer string(s). This makes the unfiltered dataset
more appropriate for IR-style QA.
"""
_CONTEXT_ADDENDUM = "Includes context from Wikipedia and search results."
def _web_evidence_dir(tmp_dir):
return tf.io.gfile.glob(os.path.join(tmp_dir, _WEB_EVIDENCE_DIR))
def _wiki_evidence_dir(tmp_dir):
return tf.io.gfile.glob(os.path.join(tmp_dir, _WIKI_EVIDENCE_DIR))
class TriviaQAConfig(tfds.core.BuilderConfig):
"""BuilderConfig for TriviaQA."""
def __init__(self, *, unfiltered=False, exclude_context=False, **kwargs):
"""BuilderConfig for TriviaQA.
Args:
unfiltered: bool, whether to use the unfiltered version of the dataset,
intended for open-domain QA.
exclude_context: bool, whether to exclude Wikipedia and search context for
reduced size.
**kwargs: keyword arguments forwarded to super.
"""
name = "unfiltered" if unfiltered else "rc"
if exclude_context:
name += ".nocontext"
description = _UNFILTERED_DESCRIPTION if unfiltered else _RC_DESCRIPTION
if not exclude_context:
description += _CONTEXT_ADDENDUM
super(TriviaQAConfig, self).__init__(
name=name,
description=description,
version=tfds.core.Version("1.1.1"),
**kwargs)
self.unfiltered = unfiltered
self.exclude_context = exclude_context
class BigBirdTriviaQAConfig(tfds.core.BuilderConfig):
"""BuilderConfig for TriviaQA."""
def __init__(self, **kwargs):
"""BuilderConfig for TriviaQA.
Args:
**kwargs: keyword arguments forwarded to super.
"""
name = "rc_wiki.preprocessed"
description = _RC_DESCRIPTION
super(BigBirdTriviaQAConfig, self).__init__(
name=name,
description=description,
version=tfds.core.Version("1.1.1"),
**kwargs)
self.unfiltered = False
self.exclude_context = False
def configure(self,
sentencepiece_model_path,
sequence_length,
stride,
global_sequence_length=None):
"""Configures additional user-specified arguments."""
self.sentencepiece_model_path = sentencepiece_model_path
self.sequence_length = sequence_length
self.stride = stride
if global_sequence_length is None and sequence_length is not None:
self.global_sequence_length = sequence_length // 16 + 64
else:
self.global_sequence_length = global_sequence_length
logging.info(
"""
global_sequence_length: %s
sequence_length: %s
stride: %s
sentencepiece_model_path: %s""",
self.global_sequence_length, self.sequence_length,
self.stride, self.sentencepiece_model_path)
def validate(self):
"""Validates that user specifies valid arguments."""
if self.sequence_length is None:
raise ValueError("sequence_length must be specified for BigBird.")
if self.stride is None:
raise ValueError("stride must be specified for BigBird.")
if self.sentencepiece_model_path is None:
raise ValueError(
"sentencepiece_model_path must be specified for BigBird.")
def filter_files_for_big_bird(files):
filtered_files = [f for f in files if os.path.basename(f).startswith("wiki")]
assert len(filtered_files) == 1, "There should only be one wikipedia file."
return filtered_files
class TriviaQA(tfds.core.BeamBasedBuilder):
"""TriviaQA is a reading comprehension dataset.
It containss over 650K question-answer-evidence triples.
"""
name = "bigbird_trivia_qa"
BUILDER_CONFIGS = [
BigBirdTriviaQAConfig(),
TriviaQAConfig(unfiltered=False, exclude_context=False), # rc
TriviaQAConfig(unfiltered=False, exclude_context=True), # rc.nocontext
TriviaQAConfig(unfiltered=True, exclude_context=False), # unfiltered
TriviaQAConfig(unfiltered=True, exclude_context=True),
# unfilered.nocontext
]
def __init__(self,
*,
sentencepiece_model_path=None,
sequence_length=None,
stride=None,
global_sequence_length=None,
**kwargs):
super(TriviaQA, self).__init__(**kwargs)
if isinstance(self.builder_config, BigBirdTriviaQAConfig):
self.builder_config.configure(
sentencepiece_model_path=sentencepiece_model_path,
sequence_length=sequence_length,
stride=stride,
global_sequence_length=global_sequence_length)
def _info(self):
if isinstance(self.builder_config, BigBirdTriviaQAConfig):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
supervised_keys=None,
homepage="http://nlp.cs.washington.edu/triviaqa/",
citation=_CITATION,
features=tfds.features.FeaturesDict({
"id": tfds.features.Text(),
"qid": tfds.features.Text(),
"question": tfds.features.Text(),
"context": tfds.features.Text(),
# Sequence features.
"token_ids": tfds.features.Tensor(shape=(None,), dtype=tf.int64),
"token_offsets":
tfds.features.Tensor(shape=(None,), dtype=tf.int64),
"segment_ids":
tfds.features.Tensor(shape=(None,), dtype=tf.int64),
"global_token_ids":
tfds.features.Tensor(shape=(None,), dtype=tf.int64),
# Start and end indices (inclusive).
"answers":
tfds.features.Tensor(shape=(None, 2), dtype=tf.int64),
}))
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"question":
tfds.features.Text(),
"question_id":
tfds.features.Text(),
"question_source":
tfds.features.Text(),
"entity_pages":
tfds.features.Sequence({
"doc_source":
tfds.features.Text(),
"filename":
tfds.features.Text(),
"title":
tfds.features.Text(),
"wiki_context":
tfds.features.Text(),
}),
"search_results":
tfds.features.Sequence({
"description":
tfds.features.Text(),
"filename":
tfds.features.Text(),
"rank":
tf.int32,
"title":
tfds.features.Text(),
"url":
tfds.features.Text(),
"search_context":
tfds.features.Text(),
}),
"answer":
tfds.features.FeaturesDict({
"aliases":
tfds.features.Sequence(tfds.features.Text()),
"normalized_aliases":
tfds.features.Sequence(tfds.features.Text()),
"matched_wiki_entity_name":
tfds.features.Text(),
"normalized_matched_wiki_entity_name":
tfds.features.Text(),
"normalized_value":
tfds.features.Text(),
"type":
tfds.features.Text(),
"value":
tfds.features.Text(),
}),
}),
supervised_keys=None,
homepage="http://nlp.cs.washington.edu/triviaqa/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
cfg = self.builder_config
download_urls = dict()
if not (cfg.unfiltered and cfg.exclude_context):
download_urls["rc"] = _DOWNLOAD_URL_TMPL.format("rc")
if cfg.unfiltered:
download_urls["unfiltered"] = _DOWNLOAD_URL_TMPL.format("unfiltered")
file_paths = dl_manager.download_and_extract(download_urls)
qa_dir = (
os.path.join(file_paths["unfiltered"], "triviaqa-unfiltered")
if cfg.unfiltered else
os.path.join(file_paths["rc"], "qa"))
train_files = tf.io.gfile.glob(os.path.join(qa_dir, _TRAIN_FILE_FORMAT))
valid_files = tf.io.gfile.glob(
os.path.join(qa_dir, _VALIDATION_FILE_FORMAT))
test_files = tf.io.gfile.glob(os.path.join(qa_dir, _TEST_FILE_FORMAT))
if cfg.exclude_context:
web_evidence_dir = None
wiki_evidence_dir = None
else:
web_evidence_dir = os.path.join(file_paths["rc"], _WEB_EVIDENCE_DIR)
wiki_evidence_dir = os.path.join(file_paths["rc"], _WIKI_EVIDENCE_DIR)
if isinstance(cfg, BigBirdTriviaQAConfig):
train_files = filter_files_for_big_bird(train_files)
valid_files = filter_files_for_big_bird(valid_files)
test_files = filter_files_for_big_bird(test_files)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={"files": train_files,
"web_dir": web_evidence_dir,
"wiki_dir": wiki_evidence_dir,
"answer": True}),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={"files": valid_files,
"web_dir": web_evidence_dir,
"wiki_dir": wiki_evidence_dir,
"answer": True}),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={"files": test_files,
"web_dir": web_evidence_dir,
"wiki_dir": wiki_evidence_dir,
"answer": False}),
]
def _build_pcollection(self, pipeline, files, web_dir, wiki_dir, answer):
if isinstance(self.builder_config, BigBirdTriviaQAConfig):
self.builder_config.validate()
question_answers = preprocess.read_question_answers(files[0])
return preprocess.make_pipeline(
pipeline,
question_answers=question_answers,
answer=answer,
max_num_tokens=self.builder_config.sequence_length,
max_num_global_tokens=self.builder_config.global_sequence_length,
stride=self.builder_config.stride,
sentencepiece_model_path=self.builder_config.sentencepiece_model_path,
wikipedia_dir=wiki_dir,
web_dir=web_dir)
parse_example_fn = functools.partial(parse_example,
self.builder_config.exclude_context,
web_dir, wiki_dir)
return (pipeline
| beam.Create(files)
| beam.ParDo(ReadQuestions())
| beam.Reshuffle()
| beam.Map(parse_example_fn))
class ReadQuestions(beam.DoFn):
"""Read questions from JSON."""
def process(self, file):
with tf.io.gfile.GFile(file) as f:
data = json.load(f)
for question in data["Data"]:
example = {"SourceFile": os.path.basename(file)}
example.update(question)
yield example
def parse_example(exclude_context, web_dir, wiki_dir, article):
"""Return a single example from an article JSON record."""
def _strip(collection):
return [item.strip() for item in collection]
if "Answer" in article:
answer = article["Answer"]
answer_dict = {
"aliases":
_strip(answer["Aliases"]),
"normalized_aliases":
_strip(answer["NormalizedAliases"]),
"matched_wiki_entity_name":
answer.get("MatchedWikiEntryName", "").strip(),
"normalized_matched_wiki_entity_name":
answer.get("NormalizedMatchedWikiEntryName", "").strip(),
"normalized_value":
answer["NormalizedValue"].strip(),
"type":
answer["Type"].strip(),
"value":
answer["Value"].strip(),
}
else:
answer_dict = {
"aliases": [],
"normalized_aliases": [],
"matched_wiki_entity_name": "<unk>",
"normalized_matched_wiki_entity_name": "<unk>",
"normalized_value": "<unk>",
"type": "",
"value": "<unk>",
}
if exclude_context:
article["SearchResults"] = []
article["EntityPages"] = []
def _add_context(collection, context_field, file_dir):
"""Adds context from file, or skips if file does not exist."""
new_items = []
for item in collection:
if "Filename" not in item:
logging.info("Missing context 'Filename', skipping.")
continue
new_item = item.copy()
fname = item["Filename"]
try:
with tf.io.gfile.GFile(os.path.join(file_dir, fname)) as f:
new_item[context_field] = f.read()
except (IOError, tf.errors.NotFoundError):
logging.info("File does not exist, skipping: %s", fname)
continue
new_items.append(new_item)
return new_items
def _strip_if_str(v):
return v.strip() if isinstance(v, six.string_types) else v
def _transpose_and_strip_dicts(dicts, field_names):
return {
tfds.core.naming.camelcase_to_snakecase(k):
[_strip_if_str(d[k]) for d in dicts] for k in field_names
}
search_results = _transpose_and_strip_dicts(
_add_context(article.get("SearchResults", []), "SearchContext", web_dir),
["Description", "Filename", "Rank", "Title", "Url", "SearchContext"])
entity_pages = _transpose_and_strip_dicts(
_add_context(article.get("EntityPages", []), "WikiContext", wiki_dir),
["DocSource", "Filename", "Title", "WikiContext"])
question = article["Question"].strip()
question_id = article["QuestionId"]
question_source = article["QuestionSource"].strip()
return f"{article['SourceFile']}_{question_id}", {
"entity_pages": entity_pages,
"search_results": search_results,
"question": question,
"question_id": question_id,
"question_source": question_source,
"answer": answer_dict,
}
| 16,320 | 34.713348 | 109 | py |
models | models-master/official/projects/triviaqa/download_and_prepare.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Downloads and prepares TriviaQA dataset."""
from unittest import mock
from absl import app
from absl import flags
from absl import logging
import apache_beam as beam
import tensorflow_datasets as tfds
from official.projects.triviaqa import dataset # pylint: disable=unused-import
flags.DEFINE_integer('sequence_length', 4096, 'Max number of tokens.')
flags.DEFINE_integer(
'global_sequence_length', None,
'Max number of question tokens plus sentences. If not set, defaults to '
'sequence_length // 16 + 64.')
flags.DEFINE_integer(
'stride', 3072,
'For documents longer than `sequence_length`, where to split them.')
flags.DEFINE_string(
'sentencepiece_model_path', None,
'SentencePiece model to use for tokenization.')
flags.DEFINE_string('data_dir', None, 'Data directory for TFDS.')
flags.DEFINE_string('runner', 'DirectRunner', 'Beam runner to use.')
FLAGS = flags.FLAGS
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
builder = tfds.builder(
'bigbird_trivia_qa/rc_wiki.preprocessed',
data_dir=FLAGS.data_dir,
sentencepiece_model_path=FLAGS.sentencepiece_model_path,
sequence_length=FLAGS.sequence_length,
global_sequence_length=FLAGS.global_sequence_length,
stride=FLAGS.stride)
download_config = tfds.download.DownloadConfig(
beam_options=beam.options.pipeline_options.PipelineOptions(flags=[
f'--runner={FLAGS.runner}',
'--direct_num_workers=8',
'--direct_running_mode=multi_processing',
]))
with mock.patch('tensorflow_datasets.core.download.extractor._normpath',
new=lambda x: x):
builder.download_and_prepare(download_config=download_config)
logging.info(builder.info.splits)
if __name__ == '__main__':
flags.mark_flag_as_required('sentencepiece_model_path')
app.run(main)
| 2,497 | 33.219178 | 79 | py |
models | models-master/official/projects/triviaqa/inputs.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input processing for TriviaQA."""
import os
from typing import Optional, Text, Union
import tensorflow as tf
import tensorflow_datasets as tfds
from official.modeling import tf_utils
from official.projects.triviaqa import dataset # pylint: disable=unused-import
def _flatten_dims(tensor: tf.Tensor,
first_dim: Optional[int] = 0,
last_dim: Optional[int] = -1,
name: Optional[Text] = None) -> tf.Tensor:
"""Flattens the given span of dimensions in `tensor`.
Args:
tensor: [..., first_dim_size, ...middle_dims..., last_dim_size, ...] shaped
Tensor.
first_dim: The first dimension to flatten (inclusive). Must be a valid index
for the rank of `tensor`. Default is 0.
last_dim: The last dimension to flatten (inclusive). Must be a valid index
for the rank of `tensor`. Default is -1.
name: A name for the operation (optional).
Returns:
Tensor of shape [..., flattened_dim_size, ...] where
flattened_dim_size = first_dim_size * ...middle_dims... * last_dim_size.
"""
with tf.name_scope(name or 'flatten_dims'):
tensor = tf.convert_to_tensor(tensor)
rank = tensor.shape.rank
if rank is None:
raise ValueError('Static rank of `tensor` must be known.')
if first_dim < 0: # pytype: disable=unsupported-operands
first_dim += rank
if first_dim < 0 or first_dim >= rank: # pytype: disable=unsupported-operands
raise ValueError('`first_dim` out of bounds for `tensor` rank.')
if last_dim < 0: # pytype: disable=unsupported-operands
last_dim += rank
if last_dim < 0 or last_dim >= rank: # pytype: disable=unsupported-operands
raise ValueError('`last_dim` out of bounds for `tensor` rank.')
if first_dim > last_dim: # pytype: disable=unsupported-operands
raise ValueError('`first_dim` must not be larger than `last_dim`.')
# Try to calculate static flattened dim size if all input sizes to flatten
# are statically known. Otherwise, just use -1.
flat_dims_shape = tensor.shape[first_dim:(last_dim + 1)].as_list()
flattened_dim_size = 1
for size in flat_dims_shape:
if size is None:
flattened_dim_size = -1
break
flattened_dim_size *= size
old_shape = tf.shape(tensor)
output_shape = tf.concat([
old_shape[:first_dim], [flattened_dim_size], old_shape[(last_dim + 1):]
], 0)
return tf.reshape(tensor, output_shape)
def _pad_to_multiple(tensor: tf.Tensor,
factor: Union[int, tf.Tensor],
axis: int,
mode: Optional[Text] = 'CONSTANT',
constant_values=0,
name: Optional[Text] = None) -> tf.Tensor:
"""Pads `tensor` on a given `axis` to be a multiple of `factor`.
Padding will be concatenated to the end of the axis only, not the beginning.
If the length along `axis` is already a multiple of `factor`, this is
effectively a no-op.
Args:
tensor: A Tensor with rank >= 1 to pad.
factor: Positive integer factor to pad for. If a Tensor, must be a scalar
int.
axis: A valid axis in `tensor` to pad.
mode: The padding mode to use according to `tf.pad`. Defaults to 'CONSTANT'.
constant_values: For 'CONSTANT' mode, the scalar pad value to use within
`tf.pad`. Defaults to 0. Must be same type as `tensor`.
name: A name for the operation (optional).
Returns:
The padded Tensor result.
"""
with tf.name_scope(name or 'pad_to_multiple'):
tensor = tf.convert_to_tensor(tensor)
if isinstance(factor, int) and factor < 1:
raise ValueError('`factor` must be positive.')
rank = tensor.shape.rank
if rank is None:
raise ValueError('Static rank of `tensor` must be known.')
if axis < 0:
axis += rank
if axis < 0 or axis >= rank:
raise ValueError('`axis` out of bounds for `tensor` rank.')
axis_len = tf_utils.get_shape_list(tensor)[axis]
pad_len = -axis_len % factor
paddings = pad_len * tf.one_hot([-1, axis], rank, axis=0, dtype=tf.int32)
return tf.pad(
tensor=tensor,
paddings=paddings,
mode=mode,
constant_values=constant_values)
def _skew_elements_right(tensor: tf.Tensor,
axis: int,
pad_value=0,
name: Optional[Text] = None) -> tf.Tensor:
"""Skews successive elements right along the given `axis`.
This changes an input like
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
into the following:
[
[1, 2, 3, 0, 0],
[0, 4, 5, 6, 0],
[0, 0, 7, 8, 9]
]
Args:
tensor: Tensor of shape [..., num_rows, axis_len, ...].
axis: A valid axis in `tensor` to skew along. It must not be the first axis
in `tensor`.
pad_value: The scalar pad value to use. Defaults to 0. Must be the same type
as `tensor`.
name: A name for the operation (optional).
Returns:
Tensor of shape [..., num_rows, axis_len + num_rows - 1, ...].
"""
with tf.name_scope(name or 'skew_elements_right'):
tensor = tf.convert_to_tensor(tensor)
rank = tensor.shape.rank
num_rows = tf_utils.get_shape_list(tensor)[axis - 1]
axis_len = tf_utils.get_shape_list(tensor)[axis]
if rank is None:
raise ValueError('Static rank of `tensor` must be known.')
if axis < 0:
axis += rank
if axis <= 0 or axis >= rank:
raise ValueError('`axis` out of bounds for `tensor` rank.')
output_len = axis_len + num_rows - 1
paddings = num_rows * tf.one_hot([-1, axis], rank, axis=0, dtype=tf.int32)
# [..., num_rows, axis_len + num_rows, ...]
padded_tensor = tf.pad(tensor, paddings, constant_values=pad_value)
# [..., num_rows * (axis_len + num_rows), ...]
flat_tensor = _flatten_dims(
padded_tensor, first_dim=axis - 1, last_dim=axis)
padded_tensor2 = _pad_to_multiple(
flat_tensor,
factor=output_len,
axis=axis - 1,
constant_values=pad_value)
# [..., num_rows + 1, output_len, ...]
new_shape = tf.concat([
tf.shape(tensor)[:(axis - 1)], [num_rows + 1, output_len],
tf.shape(tensor)[(axis + 1):]
], 0)
reshaped_tensor = tf.reshape(padded_tensor2, new_shape)
# [..., num_rows, output_len, ...]
output_shape = new_shape - tf.one_hot(axis - 1, depth=rank, dtype=tf.int32)
return tf.slice(
reshaped_tensor, begin=tf.zeros_like(output_shape), size=output_shape)
class RelativePositionGenerator(object):
"""Generates `relative_att_ids` for purely distance-based relative positions.
This implements the clipped relative position representations originally
described in https://arxiv.org/abs/1803.02155 .
Attributes:
max_distance: Integer passed from `__init__`.
ignore_direction: Bool passed from `__init__`.
relative_vocab_size: Integer representing the maximum number of unique ids
output from this generator.
left_pad_value: Integer id for all positions at or beyond max_distance to
the left.
right_pad_value: Integer id for all positions at or beyond max_distance to
the right.
"""
def __init__(self, max_distance: int, ignore_direction: bool = False):
"""Init.
Args:
max_distance: The maximum distance to represent. Must not be negative. All
larger distances will be clipped to this value.
ignore_direction: If True, both left and right position representations
will have the same ids based on absolute distance (resulting in
symmetric ids around the center token).
"""
if max_distance < 0:
raise ValueError('`max_distance` must not be negative.')
self.max_distance = max_distance
self.ignore_direction = ignore_direction
self.right_pad_value = max_distance
self.left_pad_value = max_distance if ignore_direction else 2 * max_distance
# 0 is the first id, so vocab size is 1 + the largest id (left pad value).
self.relative_vocab_size = self.left_pad_value + 1
def make_relative_att_ids(self,
seq_len: Union[int, tf.Tensor],
batch_size: Optional[Union[int, tf.Tensor]] = 1,
name: Optional[Text] = None) -> tf.Tensor:
"""Makes relative position ids for full self-attention.
For example, if `max_distance` is 3, `ignore_direction` is False, `seq_len`
is 6, and `batch_size` is 1, the result is the following:
[[
[0, 1, 2, 3, 3, 3],
[4, 0, 1, 2, 3, 3],
[5, 4, 0, 1, 2, 3],
[6, 5, 4, 0, 1, 2],
[6, 6, 5, 4, 0, 1],
[6, 6, 6, 5, 4, 0],
]]
Args:
seq_len: The sequence length to create ids for. Must be positive. If a
Tensor, must be a scalar int.
batch_size: The batch size of the result (default 1). Must be positive. If
a Tensor, must be a scalar int. All examples in the batch will have the
same id pattern.
name: A name for the operation (optional).
Returns:
<int32>[batch_size, seq_len, seq_len] Tensor of relative position ids.
"""
with tf.name_scope(name or 'make_relative_att_ids'):
if isinstance(seq_len, int) and seq_len < 1:
raise ValueError('`seq_len` must be positive.')
if isinstance(batch_size, int) and batch_size < 1:
raise ValueError('`batch_size` must be positive.')
# We need the id_pattern to cover all tokens to the left of the last token
# and all tokens to the right of the first token at the same time.
window_size = 2 * seq_len - 1
# [window_size]
id_pattern = self._make_relative_id_pattern(window_size)
# [seq_len, window_size]
id_tensor = tf.tile(id_pattern[tf.newaxis, :], [seq_len, 1])
# [seq_len, window_size + seq_len - 1]
id_tensor = _skew_elements_right(id_tensor, -1)
# [seq_len, seq_len]
id_tensor = tf.slice(id_tensor, [0, seq_len - 1], [seq_len, seq_len])
return tf.tile(id_tensor[tf.newaxis, :, :], [batch_size, 1, 1])
def make_local_relative_att_ids(self,
seq_len: Union[int, tf.Tensor],
local_radius: int,
batch_size: Optional[Union[int,
tf.Tensor]] = 1,
name: Optional[Text] = None) -> tf.Tensor:
"""Makes relative position ids for local self-attention.
The result can be used as `relative_att_ids` in
`layers.RelativeLocalSelfAttention`.
For example, if `max_distance` is 3, `ignore_direction` is False, `seq_len`
is 4, `local_radius` is 5, and `batch_size` is 1, the result is the
following:
[[
[6, 6, 6, 5, 4, 0, 1, 2, 3, 3, 3],
[6, 6, 6, 5, 4, 0, 1, 2, 3, 3, 3],
[6, 6, 6, 5, 4, 0, 1, 2, 3, 3, 3],
[6, 6, 6, 5, 4, 0, 1, 2, 3, 3, 3],
]]
Args:
seq_len: The sequence length to create ids for. Must be positive. If a
Tensor, must be a scalar int.
local_radius: The local radius as expected by
`layers.RelativeLocalSelfAttention`. Must be positive.
batch_size: The batch size of the result (default 1). Must be positive. If
a Tensor, must be a scalar int. All examples in the batch will have the
same id pattern.
name: A name for the operation (optional).
Returns:
<int32>[batch_size, seq_len, 2*local_radius + 1] Tensor of relative
position ids.
"""
with tf.name_scope(name or 'make_local_relative_att_ids'):
if isinstance(seq_len, int) and seq_len < 1:
raise ValueError('`seq_len` must be positive.')
if local_radius < 1:
raise ValueError('`local_radius` must be positive.')
if isinstance(batch_size, int) and batch_size < 1:
raise ValueError('`batch_size` must be positive.')
window_size = 2 * local_radius + 1
# [window_size]
id_pattern = self._make_relative_id_pattern(window_size)
return tf.tile(id_pattern[tf.newaxis, tf.newaxis, :],
[batch_size, seq_len, 1])
def _make_relative_id_pattern(
self, window_size: Union[int, tf.Tensor]) -> tf.Tensor:
"""Helper for making the relative id pattern for a particular window size.
For example, if `max_distance` is 3, `ignore_direction` is False, and
`window_size` is 11, the result is the following:
[6, 6, 6, 5, 4, 0, 1, 2, 3, 3, 3].
Args:
window_size: Window size to return relative ids for. Must be positive and
odd since ids will be relative to the center of the window. If a Tensor,
must be a scalar int.
Returns:
<int32>[window_size] Tensor of relative position ids.
"""
if isinstance(window_size, int):
if window_size < 1:
raise ValueError('`window_size` must be positive.')
if window_size % 2 != 1:
raise ValueError('`window_size` must be odd.')
x = tf.range(self.max_distance + 1, dtype=tf.int32)
x = tf.pad(x, [[self.max_distance, 0]], mode='REFLECT')
if not self.ignore_direction:
direction_adder = tf.concat([
tf.fill([self.max_distance], self.max_distance),
tf.zeros([self.max_distance + 1], dtype=tf.int32)
], 0)
x += direction_adder
len_x = x.shape.as_list()[0]
if len_x > window_size:
trim_amount = (len_x - window_size) // 2
return x[trim_amount:-trim_amount]
pad_amount = (window_size - len_x) // 2
result = tf.pad(x, [[pad_amount, 0]], constant_values=self.left_pad_value)
result = tf.pad(
result, [[0, pad_amount]], constant_values=self.right_pad_value)
return result
def read_batches(data_dir,
split,
batch_size,
include_answers=True,
shuffle=False,
drop_final_batch=False,
compression_type=''):
"""Read TriviaQA batches."""
features = {
'id': tf.io.FixedLenFeature([], tf.string),
'qid': tf.io.FixedLenFeature([], tf.string),
'context': tf.io.FixedLenFeature([], tf.string),
'question': tf.io.FixedLenFeature([], tf.string),
'global_token_ids': tf.io.RaggedFeature(tf.int64),
'token_ids': tf.io.RaggedFeature(tf.int64),
'segment_ids': tf.io.RaggedFeature(tf.int64),
'token_offsets': tf.io.RaggedFeature(tf.int64),
}
if include_answers:
features['answers'] = tf.io.RaggedFeature(
tf.int64, partitions=(tf.io.RaggedFeature.UniformRowLength(2),)) # pytype: disable=attribute-error
dataset_builder = tfds.builder(
'bigbird_trivia_qa/rc_wiki.preprocessed', data_dir=data_dir)
split_info = dataset_builder.info.splits[split]
return tf.data.experimental.make_batched_features_dataset(
[
os.path.join(dataset_builder.data_dir, filename)
for filename in split_info.filenames
],
batch_size=batch_size,
features=features,
reader=lambda path: tf.data.TFRecordDataset(path, compression_type),
label_key='answers' if include_answers else None,
num_epochs=1,
shuffle=shuffle,
shuffle_buffer_size=split_info.num_examples,
prefetch_buffer_size=tf.data.experimental.AUTOTUNE,
sloppy_ordering=True,
drop_final_batch=drop_final_batch,
reader_num_threads=8,
parser_num_threads=16)
def scatter_labels(labels, batch_size, sequence_length):
"""Create one hot labels."""
row_ids = labels.value_rowids()
indices = tf.concat(
(tf.stack((row_ids, tf.cast(labels.flat_values[:, 0],
tf.int32), tf.zeros_like(row_ids)), -1),
tf.stack((row_ids, tf.cast(labels.flat_values[:, 1],
tf.int32), tf.ones_like(row_ids)), -1)), 0)
one_hot_labels = tf.scatter_nd(indices,
tf.ones(tf.shape(indices)[0], tf.float32),
(batch_size, sequence_length, 2))
return tf.minimum(one_hot_labels, 1.)
def features_map_fn(features, local_radius, relative_pos_max_distance,
use_hard_g2l_mask, padding_id, eos_id, null_id, cls_id,
sep_id, sequence_length, global_sequence_length):
"""Make features."""
batch_size = tf.get_static_value(features['token_ids'].shape[0])
# sequence_lengths = features['token_ids'].row_lengths()
question_lengths = tf.argmax(
tf.equal(features['token_ids'].to_tensor(
shape=(batch_size, global_sequence_length)), sep_id), -1) + 1
mapped_features = dict(
token_ids=tf.cast(
features['token_ids'].to_tensor(shape=(batch_size, sequence_length)),
tf.int32),
global_token_ids=tf.cast(
features['global_token_ids'].to_tensor(
shape=(batch_size, global_sequence_length)), tf.int32),
segment_ids=tf.cast(
features['segment_ids'].to_tensor(
shape=(batch_size, sequence_length)), tf.int32),
)
relative_pos_generator = RelativePositionGenerator(
max_distance=relative_pos_max_distance)
# Only do long-to-long attention for non-null tokens.
# Let the null token attend to itself.
l2l_att_mask = tf.ones((batch_size, sequence_length, 2 * local_radius + 1),
tf.int32)
l2l_att_mask *= 1 - tf.cast(
tf.logical_or(
tf.equal(mapped_features['token_ids'], padding_id),
tf.equal(mapped_features['token_ids'], null_id)),
tf.int32)[:, :, tf.newaxis]
l2l_relative_att_ids = relative_pos_generator.make_local_relative_att_ids(
seq_len=sequence_length, local_radius=local_radius, batch_size=batch_size)
#
l2g_att_mask = tf.ones((batch_size, sequence_length, global_sequence_length),
tf.int32)
l2g_att_mask *= tf.cast(
tf.not_equal(mapped_features['token_ids'], padding_id),
tf.int32)[:, :, tf.newaxis]
l2g_att_mask *= tf.cast(
tf.not_equal(mapped_features['global_token_ids'], padding_id),
tf.int32)[:, tf.newaxis, :]
l2g_relative_att_ids = tf.fill(
(batch_size, sequence_length, global_sequence_length),
relative_pos_generator.relative_vocab_size + 1)
#
g2g_att_mask = tf.ones(
(batch_size, global_sequence_length, global_sequence_length), tf.int32)
g2g_att_mask *= tf.cast(
tf.not_equal(mapped_features['global_token_ids'], padding_id),
tf.int32)[:, :, tf.newaxis]
g2g_relative_att_ids = relative_pos_generator.make_relative_att_ids(
seq_len=global_sequence_length, batch_size=batch_size)
global_sentence_mask = tf.equal(mapped_features['global_token_ids'], eos_id)
global_question_mask = tf.logical_not(
tf.logical_or(
tf.logical_or(
tf.equal(mapped_features['global_token_ids'], cls_id),
tf.equal(mapped_features['global_token_ids'], eos_id)),
tf.equal(mapped_features['global_token_ids'], padding_id)))
g2g_question_mask = tf.logical_and(global_question_mask[:, tf.newaxis, :],
global_question_mask[:, :, tf.newaxis])
g2g_sentence_mask = tf.logical_and(global_sentence_mask[:, tf.newaxis, :],
global_sentence_mask[:, :, tf.newaxis])
g2g_local_mask = tf.cast(
tf.logical_or(g2g_question_mask, g2g_sentence_mask), tf.int32)
g2g_relative_att_ids *= g2g_local_mask
g2g_relative_att_ids += (1 - g2g_local_mask) * (
relative_pos_generator.relative_vocab_size + 2)
#
g2l_att_mask = tf.transpose(l2g_att_mask, [0, 2, 1])
if use_hard_g2l_mask:
global_range = tf.range(
global_sequence_length, dtype=mapped_features['global_token_ids'].dtype)
g2l_att_mask *= tf.cast(
tf.logical_or(
tf.equal(
mapped_features['global_token_ids'], cls_id)[:, :, tf.newaxis],
tf.equal(global_range[tf.newaxis, :, tf.newaxis],
mapped_features['segment_ids'][:, tf.newaxis, :])),
tf.int32)
g2l_relative_att_ids = tf.transpose(l2g_relative_att_ids, [0, 2, 1])
mapped_features.update(
dict(
l2l_att_mask=l2l_att_mask,
l2l_relative_att_ids=l2l_relative_att_ids,
l2g_att_mask=l2g_att_mask,
l2g_relative_att_ids=l2g_relative_att_ids,
g2g_att_mask=g2g_att_mask,
g2g_relative_att_ids=g2g_relative_att_ids,
g2l_att_mask=g2l_att_mask,
g2l_relative_att_ids=g2l_relative_att_ids,
question_lengths=question_lengths,
))
return mapped_features
def labels_map_fn(token_ids, labels, sequence_length):
batch_size = tf.get_static_value(labels.shape[0])
row_lengths = labels.row_lengths()
empty_token_index = token_ids.row_lengths() - 1
one_hot_labels = scatter_labels(labels, batch_size, sequence_length)
one_hot_labels += (tf.cast(row_lengths == 0, tf.float32)[:, tf.newaxis] *
tf.one_hot(empty_token_index, sequence_length))[:, :,
tf.newaxis]
return one_hot_labels
| 21,718 | 38.56102 | 107 | py |
models | models-master/official/projects/triviaqa/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/triviaqa/evaluation.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Official evaluation script for v1.0 of the TriviaQA dataset.
Forked from
https://github.com/mandarjoshi90/triviaqa/blob/master/evaluation/triviaqa_evaluation.py.
Modifications are removal of main function.
"""
import collections
import re
import string
import sys
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def handle_punc(text):
exclude = set(string.punctuation + ''.join([u'‘', u'’', u'´', u'`']))
return ''.join(ch if ch not in exclude else ' ' for ch in text)
def lower(text):
return text.lower()
def replace_underscore(text):
return text.replace('_', ' ')
return white_space_fix(
remove_articles(handle_punc(lower(replace_underscore(s))))).strip()
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = (
collections.Counter(prediction_tokens)
& collections.Counter(ground_truth_tokens))
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def is_exact_match(answer_object, prediction):
ground_truths = get_ground_truths(answer_object)
for ground_truth in ground_truths:
if exact_match_score(prediction, ground_truth):
return True
return False
def has_exact_match(ground_truths, candidates):
for ground_truth in ground_truths:
if ground_truth in candidates:
return True
return False
def get_ground_truths(answer):
return answer['NormalizedAliases'] + [
normalize_answer(ans) for ans in answer.get('HumanAnswers', [])
]
def get_oracle_score(ground_truth,
predicted_answers,
qid_list=None,
mute=False):
exact_match = common = 0
if qid_list is None:
qid_list = ground_truth.keys()
for qid in qid_list:
if qid not in predicted_answers:
if not mute:
message = 'Irrelavant question {} will receive score 0.'.format(qid)
print(message, file=sys.stderr)
continue
common += 1
prediction = normalize_answer(predicted_answers[qid])
ground_truths = get_ground_truths(ground_truth[qid])
em_for_this_question = has_exact_match(ground_truths, prediction)
exact_match += int(em_for_this_question)
exact_match = 100.0 * exact_match / len(qid_list)
return {
'oracle_exact_match': exact_match,
'common': common,
'denominator': len(qid_list),
'pred_len': len(predicted_answers),
'gold_len': len(ground_truth)
}
def evaluate_triviaqa(ground_truth,
predicted_answers,
qid_list=None,
mute=False):
f1 = exact_match = common = 0
if qid_list is None:
qid_list = ground_truth.keys()
for qid in qid_list:
if qid not in predicted_answers:
if not mute:
message = 'Missed question {} will receive score 0.'.format(qid)
print(message, file=sys.stderr)
continue
if qid not in ground_truth:
if not mute:
message = 'Irrelavant question {} will receive score 0.'.format(qid)
print(message, file=sys.stderr)
continue
common += 1
prediction = predicted_answers[qid]
ground_truths = get_ground_truths(ground_truth[qid])
em_for_this_question = metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
if em_for_this_question == 0 and not mute:
print('em=0:', prediction, ground_truths)
exact_match += em_for_this_question
f1_for_this_question = metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
f1 += f1_for_this_question
exact_match = 100.0 * exact_match / len(qid_list)
f1 = 100.0 * f1 / len(qid_list)
return {
'exact_match': exact_match,
'f1': f1,
'common': common,
'denominator': len(qid_list),
'pred_len': len(predicted_answers),
'gold_len': len(ground_truth)
}
| 5,288 | 30.295858 | 88 | py |
models | models-master/official/projects/triviaqa/predict.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TriviaQA script for inference."""
import collections
import contextlib
import functools
import json
import operator
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
import tensorflow_datasets as tfds
import sentencepiece as spm
from official.nlp.configs import encoders # pylint: disable=unused-import
from official.projects.triviaqa import evaluation
from official.projects.triviaqa import inputs
from official.projects.triviaqa import prediction
flags.DEFINE_string('data_dir', None, 'TensorFlow Datasets directory.')
flags.DEFINE_enum('split', None,
[tfds.Split.TRAIN, tfds.Split.VALIDATION, tfds.Split.TEST],
'For which split to generate predictions.')
flags.DEFINE_string('predictions_path', None, 'Output for predictions.')
flags.DEFINE_string('sentencepiece_model_path', None,
'Path to sentence piece model.')
flags.DEFINE_integer('bigbird_block_size', 64,
'Size of blocks for sparse block attention.')
flags.DEFINE_string('saved_model_dir', None,
'Path from which to initialize model and weights.')
flags.DEFINE_integer('sequence_length', 4096, 'Maximum number of tokens.')
flags.DEFINE_integer('global_sequence_length', 320,
'Maximum number of global tokens.')
flags.DEFINE_integer('batch_size', 32, 'Size of batch.')
flags.DEFINE_string('master', '', 'Address of the TPU master.')
flags.DEFINE_integer('decode_top_k', 8,
'Maximum number of tokens to consider for begin/end.')
flags.DEFINE_integer('decode_max_size', 16,
'Maximum number of sentence pieces in an answer.')
FLAGS = flags.FLAGS
@contextlib.contextmanager
def worker_context():
if FLAGS.master:
with tf.device('/job:worker') as d:
yield d
else:
yield
def read_sentencepiece_model(path):
with tf.io.gfile.GFile(path, 'rb') as file:
processor = spm.SentencePieceProcessor()
processor.LoadFromSerializedProto(file.read())
return processor
def predict(sp_processor, features_map_fn, logits_fn, decode_logits_fn,
split_and_pad_fn, distribute_strategy, dataset):
"""Make predictions."""
predictions = collections.defaultdict(list)
for _, features in dataset.enumerate():
token_ids = features['token_ids']
x = split_and_pad_fn(features_map_fn(features))
logits = tf.concat(
distribute_strategy.experimental_local_results(logits_fn(x)), 0)
logits = logits[:features['token_ids'].shape[0]]
end_limit = token_ids.row_lengths() - 1 # inclusive
begin, end, scores = decode_logits_fn(logits, end_limit)
answers = prediction.decode_answer(features['context'], begin, end,
features['token_offsets'],
end_limit).numpy()
for j, (qid, token_id, offset, score, answer) in enumerate(
zip(features['qid'].numpy(),
tf.gather(features['token_ids'], begin, batch_dims=1).numpy(),
tf.gather(features['token_offsets'], begin, batch_dims=1).numpy(),
scores, answers)):
if not answer:
logging.info('%s: %s | NO_ANSWER, %f',
features['id'][j].numpy().decode('utf-8'),
features['question'][j].numpy().decode('utf-8'), score)
continue
if sp_processor.IdToPiece(int(token_id)).startswith('▁') and offset > 0:
answer = answer[1:]
logging.info('%s: %s | %s, %f', features['id'][j].numpy().decode('utf-8'),
features['question'][j].numpy().decode('utf-8'),
answer.decode('utf-8'), score)
predictions[qid.decode('utf-8')].append((score, answer.decode('utf-8')))
predictions = {
qid: evaluation.normalize_answer(
sorted(answers, key=operator.itemgetter(0), reverse=True)[0][1])
for qid, answers in predictions.items()
}
return predictions
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
# Configure input processing.
sp_processor = read_sentencepiece_model(FLAGS.sentencepiece_model_path)
features_map_fn = tf.function(
functools.partial(
inputs.features_map_fn,
local_radius=FLAGS.bigbird_block_size,
relative_pos_max_distance=24,
use_hard_g2l_mask=True,
sequence_length=FLAGS.sequence_length,
global_sequence_length=FLAGS.global_sequence_length,
padding_id=sp_processor.PieceToId('<pad>'),
eos_id=sp_processor.PieceToId('</s>'),
null_id=sp_processor.PieceToId('<empty>'),
cls_id=sp_processor.PieceToId('<ans>'),
sep_id=sp_processor.PieceToId('<sep_0>')),
autograph=False)
# Connect to TPU cluster.
if FLAGS.master:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(FLAGS.master)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
else:
strategy = tf.distribute.MirroredStrategy()
# Initialize datasets.
with worker_context():
_ = tf.random.get_global_generator()
dataset = inputs.read_batches(
FLAGS.data_dir, FLAGS.split, FLAGS.batch_size, include_answers=False)
# Initialize model and compile.
with strategy.scope():
model = tf.keras.models.load_model(FLAGS.saved_model_dir, compile=False)
logging.info('Model initialized. Beginning prediction loop.')
logits_fn = tf.function(
functools.partial(prediction.distributed_logits_fn, model))
decode_logits_fn = tf.function(
functools.partial(prediction.decode_logits, FLAGS.decode_top_k,
FLAGS.decode_max_size))
split_and_pad_fn = tf.function(
functools.partial(prediction.split_and_pad, strategy, FLAGS.batch_size))
# Prediction strategy.
predict_fn = functools.partial(
predict,
sp_processor=sp_processor,
features_map_fn=features_map_fn,
logits_fn=logits_fn,
decode_logits_fn=decode_logits_fn,
split_and_pad_fn=split_and_pad_fn,
distribute_strategy=strategy,
dataset=dataset)
with worker_context():
predictions = predict_fn()
with tf.io.gfile.GFile(FLAGS.predictions_path, 'w') as f:
json.dump(predictions, f)
if __name__ == '__main__':
flags.mark_flags_as_required(['split', 'predictions_path', 'saved_model_dir'])
app.run(main)
| 7,085 | 37.096774 | 80 | py |
models | models-master/official/projects/triviaqa/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TriviaQA training script."""
import collections
import contextlib
import functools
import json
import operator
import os
from absl import app
from absl import flags
from absl import logging
import gin
import tensorflow as tf
import tensorflow_datasets as tfds
import sentencepiece as spm
from official.nlp import optimization as nlp_optimization
from official.nlp.configs import encoders
from official.projects.triviaqa import evaluation
from official.projects.triviaqa import inputs
from official.projects.triviaqa import modeling
from official.projects.triviaqa import prediction
flags.DEFINE_string('data_dir', None, 'Data directory for TensorFlow Datasets.')
flags.DEFINE_string(
'validation_gold_path', None,
'Path to golden validation. Usually, the wikipedia-dev.json file.')
flags.DEFINE_string('model_dir', None,
'Directory for checkpoints and summaries.')
flags.DEFINE_string('model_config_path', None,
'JSON file containing model coniguration.')
flags.DEFINE_string('sentencepiece_model_path', None,
'Path to sentence piece model.')
flags.DEFINE_enum('encoder', 'bigbird',
['bert', 'bigbird', 'albert', 'mobilebert'],
'Which transformer encoder model to use.')
flags.DEFINE_integer('bigbird_block_size', 64,
'Size of blocks for sparse block attention.')
flags.DEFINE_string('init_checkpoint_path', None,
'Path from which to initialize weights.')
flags.DEFINE_integer('train_sequence_length', 4096,
'Maximum number of tokens for training.')
flags.DEFINE_integer('train_global_sequence_length', 320,
'Maximum number of global tokens for training.')
flags.DEFINE_integer('validation_sequence_length', 4096,
'Maximum number of tokens for validation.')
flags.DEFINE_integer('validation_global_sequence_length', 320,
'Maximum number of global tokens for validation.')
flags.DEFINE_integer('batch_size', 32, 'Size of batch.')
flags.DEFINE_string('master', '', 'Address of the TPU master.')
flags.DEFINE_integer('decode_top_k', 8,
'Maximum number of tokens to consider for begin/end.')
flags.DEFINE_integer('decode_max_size', 16,
'Maximum number of sentence pieces in an answer.')
flags.DEFINE_float('dropout_rate', 0.1, 'Dropout rate for hidden layers.')
flags.DEFINE_float('attention_dropout_rate', 0.3,
'Dropout rate for attention layers.')
flags.DEFINE_float('label_smoothing', 1e-1, 'Degree of label smoothing.')
flags.DEFINE_multi_string(
'gin_bindings', [],
'Gin bindings to override the values set in the config files')
FLAGS = flags.FLAGS
@contextlib.contextmanager
def worker_context():
if FLAGS.master:
with tf.device('/job:worker') as d:
yield d
else:
yield
def read_sentencepiece_model(path):
with tf.io.gfile.GFile(path, 'rb') as file:
processor = spm.SentencePieceProcessor()
processor.LoadFromSerializedProto(file.read())
return processor
# Rename old BERT v1 configuration parameters.
_MODEL_CONFIG_REPLACEMENTS = {
'num_hidden_layers': 'num_layers',
'attention_probs_dropout_prob': 'attention_dropout_rate',
'hidden_dropout_prob': 'dropout_rate',
'hidden_act': 'hidden_activation',
'window_size': 'block_size',
}
def read_model_config(encoder,
path,
bigbird_block_size=None) -> encoders.EncoderConfig:
"""Merges the JSON configuration into the encoder configuration."""
with tf.io.gfile.GFile(path) as f:
model_config = json.load(f)
for key, value in _MODEL_CONFIG_REPLACEMENTS.items():
if key in model_config:
model_config[value] = model_config.pop(key)
model_config['attention_dropout_rate'] = FLAGS.attention_dropout_rate
model_config['dropout_rate'] = FLAGS.dropout_rate
model_config['block_size'] = bigbird_block_size
encoder_config = encoders.EncoderConfig(type=encoder)
# Override the default config with those loaded from the JSON file.
encoder_config_keys = encoder_config.get().as_dict().keys()
overrides = {}
for key, value in model_config.items():
if key in encoder_config_keys:
overrides[key] = value
else:
logging.warning('Ignoring config parameter %s=%s', key, value)
encoder_config.get().override(overrides)
return encoder_config
@gin.configurable(denylist=[
'model',
'strategy',
'train_dataset',
'model_dir',
'init_checkpoint_path',
'evaluate_fn',
])
def fit(model,
strategy,
train_dataset,
model_dir,
init_checkpoint_path=None,
evaluate_fn=None,
learning_rate=1e-5,
learning_rate_polynomial_decay_rate=1.,
weight_decay_rate=1e-1,
num_warmup_steps=5000,
num_decay_steps=51000,
num_epochs=6):
"""Train and evaluate."""
hparams = dict(
learning_rate=learning_rate,
num_decay_steps=num_decay_steps,
num_warmup_steps=num_warmup_steps,
num_epochs=num_epochs,
weight_decay_rate=weight_decay_rate,
dropout_rate=FLAGS.dropout_rate,
attention_dropout_rate=FLAGS.attention_dropout_rate,
label_smoothing=FLAGS.label_smoothing)
logging.info(hparams)
learning_rate_schedule = nlp_optimization.WarmUp(
learning_rate,
tf.keras.optimizers.schedules.PolynomialDecay(
learning_rate,
num_decay_steps,
end_learning_rate=0.,
power=learning_rate_polynomial_decay_rate), num_warmup_steps)
with strategy.scope():
optimizer = nlp_optimization.AdamWeightDecay(
learning_rate_schedule,
weight_decay_rate=weight_decay_rate,
epsilon=1e-6,
exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'])
model.compile(optimizer, loss=modeling.SpanOrCrossEntropyLoss())
def init_fn(init_checkpoint_path):
ckpt = tf.train.Checkpoint(encoder=model.encoder)
ckpt.restore(init_checkpoint_path).assert_existing_objects_matched()
with worker_context():
ckpt_manager = tf.train.CheckpointManager(
tf.train.Checkpoint(model=model, optimizer=optimizer),
model_dir,
max_to_keep=None,
init_fn=(functools.partial(init_fn, init_checkpoint_path)
if init_checkpoint_path else None))
with strategy.scope():
ckpt_manager.restore_or_initialize()
val_summary_writer = tf.summary.create_file_writer(
os.path.join(model_dir, 'val'))
best_exact_match = 0.
for epoch in range(len(ckpt_manager.checkpoints), num_epochs):
model.fit(
train_dataset,
callbacks=[
tf.keras.callbacks.TensorBoard(model_dir, write_graph=False),
])
ckpt_path = ckpt_manager.save()
if evaluate_fn is None:
continue
metrics = evaluate_fn()
logging.info('Epoch %d: %s', epoch + 1, metrics)
if best_exact_match < metrics['exact_match']:
best_exact_match = metrics['exact_match']
model.save(os.path.join(model_dir, 'export'), include_optimizer=False)
logging.info('Exporting %s as SavedModel.', ckpt_path)
with val_summary_writer.as_default():
for name, data in metrics.items():
tf.summary.scalar(name, data, epoch + 1)
def evaluate(sp_processor, features_map_fn, labels_map_fn, logits_fn,
decode_logits_fn, split_and_pad_fn, distribute_strategy,
validation_dataset, ground_truth):
"""Run evaluation."""
loss_metric = tf.keras.metrics.Mean()
@tf.function
def update_loss(y, logits):
loss_fn = modeling.SpanOrCrossEntropyLoss(
reduction=tf.keras.losses.Reduction.NONE)
return loss_metric(loss_fn(y, logits))
predictions = collections.defaultdict(list)
for _, (features, labels) in validation_dataset.enumerate():
token_ids = features['token_ids']
y = labels_map_fn(token_ids, labels)
x = split_and_pad_fn(features_map_fn(features))
logits = tf.concat(
distribute_strategy.experimental_local_results(logits_fn(x)), 0)
logits = logits[:features['token_ids'].shape[0]]
update_loss(y, logits)
end_limit = token_ids.row_lengths() - 1 # inclusive
begin, end, scores = decode_logits_fn(logits, end_limit)
answers = prediction.decode_answer(features['context'], begin, end,
features['token_offsets'],
end_limit).numpy()
for _, (qid, token_id, offset, score, answer) in enumerate(
zip(features['qid'].numpy(),
tf.gather(features['token_ids'], begin, batch_dims=1).numpy(),
tf.gather(features['token_offsets'], begin, batch_dims=1).numpy(),
scores, answers)):
if not answer:
continue
if sp_processor.IdToPiece(int(token_id)).startswith('▁') and offset > 0:
answer = answer[1:]
predictions[qid.decode('utf-8')].append((score, answer.decode('utf-8')))
predictions = {
qid: evaluation.normalize_answer(
sorted(answers, key=operator.itemgetter(0), reverse=True)[0][1])
for qid, answers in predictions.items()
}
metrics = evaluation.evaluate_triviaqa(ground_truth, predictions, mute=True)
metrics['loss'] = loss_metric.result().numpy()
return metrics
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
gin.parse_config(FLAGS.gin_bindings)
model_config = read_model_config(
FLAGS.encoder,
FLAGS.model_config_path,
bigbird_block_size=FLAGS.bigbird_block_size)
logging.info(model_config.get().as_dict())
# Configure input processing.
sp_processor = read_sentencepiece_model(FLAGS.sentencepiece_model_path)
features_map_fn = functools.partial(
inputs.features_map_fn,
local_radius=FLAGS.bigbird_block_size,
relative_pos_max_distance=24,
use_hard_g2l_mask=True,
padding_id=sp_processor.PieceToId('<pad>'),
eos_id=sp_processor.PieceToId('</s>'),
null_id=sp_processor.PieceToId('<empty>'),
cls_id=sp_processor.PieceToId('<ans>'),
sep_id=sp_processor.PieceToId('<sep_0>'))
train_features_map_fn = tf.function(
functools.partial(
features_map_fn,
sequence_length=FLAGS.train_sequence_length,
global_sequence_length=FLAGS.train_global_sequence_length),
autograph=False)
train_labels_map_fn = tf.function(
functools.partial(
inputs.labels_map_fn, sequence_length=FLAGS.train_sequence_length))
# Connect to TPU cluster.
if FLAGS.master:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(FLAGS.master)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
else:
strategy = tf.distribute.MirroredStrategy()
# Initialize datasets.
with worker_context():
_ = tf.random.get_global_generator()
train_dataset = inputs.read_batches(
FLAGS.data_dir,
tfds.Split.TRAIN,
FLAGS.batch_size,
shuffle=True,
drop_final_batch=True)
validation_dataset = inputs.read_batches(FLAGS.data_dir,
tfds.Split.VALIDATION,
FLAGS.batch_size)
def train_map_fn(x, y):
features = train_features_map_fn(x)
labels = modeling.smooth_labels(FLAGS.label_smoothing,
train_labels_map_fn(x['token_ids'], y),
features['question_lengths'],
features['token_ids'])
return features, labels
train_dataset = train_dataset.map(train_map_fn, 16).prefetch(16)
# Initialize model and compile.
with strategy.scope():
model = modeling.TriviaQaModel(model_config, FLAGS.train_sequence_length)
logits_fn = tf.function(
functools.partial(prediction.distributed_logits_fn, model))
decode_logits_fn = tf.function(
functools.partial(prediction.decode_logits, FLAGS.decode_top_k,
FLAGS.decode_max_size))
split_and_pad_fn = tf.function(
functools.partial(prediction.split_and_pad, strategy, FLAGS.batch_size))
# Evaluation strategy.
with tf.io.gfile.GFile(FLAGS.validation_gold_path) as f:
ground_truth = {
datum['QuestionId']: datum['Answer'] for datum in json.load(f)['Data']
}
validation_features_map_fn = tf.function(
functools.partial(
features_map_fn,
sequence_length=FLAGS.validation_sequence_length,
global_sequence_length=FLAGS.validation_global_sequence_length),
autograph=False)
validation_labels_map_fn = tf.function(
functools.partial(
inputs.labels_map_fn,
sequence_length=FLAGS.validation_sequence_length))
evaluate_fn = functools.partial(
evaluate,
sp_processor=sp_processor,
features_map_fn=validation_features_map_fn,
labels_map_fn=validation_labels_map_fn,
logits_fn=logits_fn,
decode_logits_fn=decode_logits_fn,
split_and_pad_fn=split_and_pad_fn,
distribute_strategy=strategy,
validation_dataset=validation_dataset,
ground_truth=ground_truth)
logging.info('Model initialized. Beginning training fit loop.')
fit(model, strategy, train_dataset, FLAGS.model_dir,
FLAGS.init_checkpoint_path, evaluate_fn)
if __name__ == '__main__':
flags.mark_flags_as_required([
'model_config_path', 'model_dir', 'sentencepiece_model_path',
'validation_gold_path'
])
app.run(main)
| 14,285 | 36.106494 | 80 | py |
models | models-master/official/projects/triviaqa/preprocess.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for preprocessing TriviaQA data."""
import bisect
import json
import operator
import os
import re
import string
from typing import Any, Dict, Generator, List, Optional, Set, Text, Tuple
from absl import logging
import apache_beam as beam
from apache_beam import metrics
import dataclasses
import nltk
import numpy as np
import tensorflow.io.gfile as gfile
import sentencepiece as spm
from official.projects.triviaqa import evaluation
from official.projects.triviaqa import sentencepiece_pb2
@dataclasses.dataclass
class Question(object):
id: Text
value: Text
@dataclasses.dataclass
class EvidenceInfo(object):
id: Text
source: Text
title: Text
@dataclasses.dataclass
class Evidence(object):
info: EvidenceInfo
text: Text
@dataclasses.dataclass
class Answer(object):
value: Text
aliases: List[Text]
normalized_aliases: List[Text]
@dataclasses.dataclass
class QuestionAnswer(object):
question: Question
evidence_info: List[EvidenceInfo]
answer: Optional[Answer] = None
@dataclasses.dataclass
class QuestionAnswerEvidence(object):
question: Question
evidence: Evidence
answer: Optional[Answer] = None
@dataclasses.dataclass
class Features(object):
id: Text
stride_index: int
question_id: Text
question: Text
context: bytes
token_ids: List[int]
token_offsets: List[int]
global_token_ids: List[int]
segment_ids: List[int]
@dataclasses.dataclass
class Paragraph(object):
sentences: List[sentencepiece_pb2.SentencePieceText]
size: int
@dataclasses.dataclass
class AnswerSpan(object):
begin: int # inclusive
end: int # inclusive
text: Text
def make_paragraph(
sentence_tokenizer: nltk.tokenize.api.TokenizerI,
processor: spm.SentencePieceProcessor,
text: Text,
paragraph_metric: Optional[metrics.Metrics.DelegatingDistribution] = None,
sentence_metric: Optional[metrics.Metrics.DelegatingDistribution] = None
) -> Paragraph:
"""Tokenizes paragraphs."""
paragraph_size = 0
sentences = []
for sentence in sentence_tokenizer.tokenize(text):
sentencepiece_text = sentencepiece_pb2.SentencePieceText.FromString(
processor.EncodeAsSerializedProto(sentence))
paragraph_size += len(sentencepiece_text.pieces)
sentences.append(sentencepiece_text)
if sentence_metric:
sentence_metric.update(len(sentencepiece_text.pieces))
if paragraph_metric:
paragraph_metric.update(paragraph_size)
return Paragraph(sentences=sentences, size=paragraph_size)
def read_question_answers(json_path: Text) -> List[QuestionAnswer]:
"""Read question answers."""
with gfile.GFile(json_path) as f:
data = json.load(f)['Data']
question_answers = []
for datum in data:
question = Question(id=datum['QuestionId'], value=datum['Question'])
if 'Answer' in datum:
answer = Answer(
value=datum['Answer']['Value'],
aliases=datum['Answer']['Aliases'],
normalized_aliases=datum['Answer']['NormalizedAliases'])
else:
answer = None
evidence_info = []
for key in ['EntityPages', 'SearchResults']:
for document in datum.get(key, []):
evidence_info.append(
EvidenceInfo(
id=document['Filename'], title=document['Title'], source=key))
question_answers.append(
QuestionAnswer(
question=question, evidence_info=evidence_info, answer=answer))
return question_answers
def alias_answer(answer: Text, include=None):
alias = answer.replace('_', ' ').lower()
exclude = set(string.punctuation + ''.join(['‘', '’', '´', '`']))
include = include or []
alias = ''.join(c if c not in exclude or c in include else ' ' for c in alias)
return ' '.join(alias.split()).strip()
def make_answer_set(answer: Answer) -> Set[Text]:
"""Apply less aggressive normalization to the answer aliases."""
answers = []
for alias in [answer.value] + answer.aliases:
answers.append(alias_answer(alias))
answers.append(alias_answer(alias, [',', '.']))
answers.append(alias_answer(alias, ['-']))
answers.append(alias_answer(alias, [',', '.', '-']))
answers.append(alias_answer(alias, string.punctuation))
return set(answers + answer.normalized_aliases)
def find_answer_spans(text: bytes, answer_set: Set[Text]) -> List[AnswerSpan]:
"""Find answer spans."""
spans = []
for answer in answer_set:
answer_regex = re.compile(
re.escape(answer).encode('utf-8').replace(b'\\ ', b'[ -]'),
flags=re.IGNORECASE)
for match in re.finditer(answer_regex, text):
spans.append(
AnswerSpan(
begin=match.start(),
end=match.end(),
text=match.group(0).decode('utf-8')))
return sorted(spans, key=operator.attrgetter('begin'))
def realign_answer_span(features: Features, answer_set: Optional[Set[Text]],
processor: spm.SentencePieceProcessor,
span: AnswerSpan) -> Optional[AnswerSpan]:
"""Align answer span to text with given tokens."""
i = bisect.bisect_left(features.token_offsets, span.begin)
if i == len(features.token_offsets) or span.begin < features.token_offsets[i]:
i -= 1
j = i + 1
answer_end = span.begin + len(span.text.encode('utf-8'))
while (j < len(features.token_offsets) and
features.token_offsets[j] < answer_end):
j += 1
j -= 1
sp_answer = (
features.context[features.token_offsets[i]:features.token_offsets[j + 1]]
if j + 1 < len(features.token_offsets) else
features.context[features.token_offsets[i]:])
if (processor.IdToPiece(features.token_ids[i]).startswith('▁') and
features.token_offsets[i] > 0):
sp_answer = sp_answer[1:]
sp_answer = evaluation.normalize_answer(sp_answer.decode('utf-8'))
if answer_set is not None and sp_answer not in answer_set:
# No need to warn if the cause was breaking word boundaries.
if len(sp_answer) and not len(sp_answer) > len(
evaluation.normalize_answer(span.text)):
logging.warning('%s: "%s" not in %s.', features.question_id, sp_answer,
answer_set)
return None
return AnswerSpan(begin=i, end=j, text=span.text)
def read_sentencepiece_model(path):
with gfile.GFile(path, 'rb') as file:
processor = spm.SentencePieceProcessor()
processor.LoadFromSerializedProto(file.read())
return processor
class ReadEvidence(beam.DoFn):
"""Function to read evidence."""
def __init__(self, wikipedia_dir: Text, web_dir: Text):
self._wikipedia_dir = wikipedia_dir
self._web_dir = web_dir
def process(
self, question_answer: QuestionAnswer
) -> Generator[QuestionAnswerEvidence, None, None]:
for info in question_answer.evidence_info:
if info.source == 'EntityPages':
evidence_path = os.path.join(self._wikipedia_dir, info.id)
elif info.source == 'SearchResult':
evidence_path = os.path.join(self._web_dir, info.id)
else:
raise ValueError(f'Unknown evidence source: {info.source}.')
with gfile.GFile(evidence_path, 'rb') as f:
text = f.read().decode('utf-8')
metrics.Metrics.counter('_', 'documents').inc()
yield QuestionAnswerEvidence(
question=question_answer.question,
evidence=Evidence(info=info, text=text),
answer=question_answer.answer)
_CLS_PIECE = '<ans>'
_EOS_PIECE = '</s>'
_SEP_PIECE = '<sep_0>'
# _PARAGRAPH_SEP_PIECE = '<sep_1>'
_NULL_PIECE = '<empty>'
_QUESTION_PIECE = '<unused_34>'
class MakeFeatures(beam.DoFn):
"""Function to make features."""
def __init__(self, sentencepiece_model_path: Text, max_num_tokens: int,
max_num_global_tokens: int, stride: int):
self._sentencepiece_model_path = sentencepiece_model_path
self._max_num_tokens = max_num_tokens
self._max_num_global_tokens = max_num_global_tokens
self._stride = stride
def setup(self):
self._sentence_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
self._sentencepiece_processor = read_sentencepiece_model(
self._sentencepiece_model_path)
def _make_features(self, stride_index: int, paragraph_texts: List[Text],
paragraphs: List[Paragraph],
question_answer_evidence: QuestionAnswerEvidence,
ids: List[int],
paragraph_offset: int) -> Tuple[int, Features]:
global_ids = (
[self._sentencepiece_processor.PieceToId(_CLS_PIECE)] +
[self._sentencepiece_processor.PieceToId(_QUESTION_PIECE)] * len(ids))
segment_ids = [i + 1 for i in range(len(ids))] # offset for CLS token
token_ids, sentences = [], []
offsets, offset, full_text = [-1] * len(ids), 0, True
for i in range(paragraph_offset, len(paragraph_texts)):
if i < len(paragraphs):
paragraph = paragraphs[i]
else:
paragraphs.append(
make_paragraph(
self._sentence_tokenizer,
self._sentencepiece_processor,
paragraph_texts[i],
paragraph_metric=metrics.Metrics.distribution(
'_', 'paragraphs'),
sentence_metric=metrics.Metrics.distribution('_', 'sentences')))
paragraph = paragraphs[-1]
for sentence in paragraph.sentences:
if (len(ids) + len(token_ids) + len(sentence.pieces) + 1 >=
self._max_num_tokens or
len(global_ids) >= self._max_num_global_tokens):
full_text = False
break
for j, piece in enumerate(sentence.pieces):
token_ids.append(piece.id)
segment_ids.append(len(global_ids))
offsets.append(offset + piece.begin)
if j == 0 and sentences:
offsets[-1] -= 1
offset += len(sentence.text.encode('utf-8')) + 1
global_ids.append(self._sentencepiece_processor.PieceToId(_EOS_PIECE))
sentences.append(sentence.text)
if not full_text:
break
context = ' '.join(sentences).encode('utf-8')
token_ids.append(self._sentencepiece_processor.PieceToId(_NULL_PIECE))
offsets.append(len(context))
segment_ids.append(0)
next_paragraph_index = len(paragraph_texts)
if not full_text and self._stride > 0:
shift = paragraphs[paragraph_offset].size
next_paragraph_index = paragraph_offset + 1
while (next_paragraph_index < len(paragraphs) and
shift + paragraphs[next_paragraph_index].size <= self._stride):
shift += paragraphs[next_paragraph_index].size
next_paragraph_index += 1
return next_paragraph_index, Features(
id='{}--{}'.format(question_answer_evidence.question.id,
question_answer_evidence.evidence.info.id),
stride_index=stride_index,
question_id=question_answer_evidence.question.id,
question=question_answer_evidence.question.value,
context=context,
token_ids=ids + token_ids,
global_token_ids=global_ids,
segment_ids=segment_ids,
token_offsets=offsets)
def process(
self, question_answer_evidence: QuestionAnswerEvidence
) -> Generator[Features, None, None]:
# Tokenize question which is shared among all examples.
ids = (
self._sentencepiece_processor.EncodeAsIds(
question_answer_evidence.question.value) +
[self._sentencepiece_processor.PieceToId(_SEP_PIECE)])
paragraph_texts = list(
filter(
lambda p: p,
map(lambda p: p.strip(),
question_answer_evidence.evidence.text.split('\n'))))
stride_index, paragraphs, paragraph_index = 0, [], 0
while paragraph_index < len(paragraph_texts):
paragraph_index, features = self._make_features(stride_index,
paragraph_texts,
paragraphs,
question_answer_evidence,
ids, paragraph_index)
stride_index += 1
yield features
def _handle_exceptional_examples(
features: Features,
processor: spm.SentencePieceProcessor) -> List[AnswerSpan]:
"""Special cases in data."""
if features.id == 'qw_6687--Viola.txt':
pattern = 'three strings in common—G, D, and A'.encode('utf-8')
i = features.context.find(pattern)
if i != -1:
span = AnswerSpan(i + len(pattern) - 1, i + len(pattern), 'A')
span = realign_answer_span(features, None, processor, span)
assert span is not None, 'Span should exist.'
return [span]
if features.id == 'sfq_26183--Vitamin_A.txt':
pattern = ('Vitamin A is a group of unsaturated nutritional organic '
'compounds that includes retinol').encode('utf-8')
i = features.context.find(pattern)
if i != -1:
span = AnswerSpan(i + pattern.find(b'A'), i + pattern.find(b'A') + 1, 'A')
span = realign_answer_span(features, None, processor, span)
assert span is not None, 'Span should exist.'
spans = [span]
span = AnswerSpan(i, i + pattern.find(b'A') + 1, 'Vitamin A')
span = realign_answer_span(features, None, processor, span)
return spans + [span]
if features.id == 'odql_292--Colombia.txt':
pattern = b'Colombia is the third-most populous country in Latin America'
i = features.context.find(pattern)
if i != -1:
span = AnswerSpan(i, i + len(b'Colombia'), 'Colombia')
span = realign_answer_span(features, None, processor, span)
assert span is not None, 'Span should exist.'
return [span]
if features.id == 'tc_1648--Vietnam.txt':
pattern = 'Bảo Đại'.encode('utf-8')
i = features.context.find(pattern)
if i != -1:
span = AnswerSpan(i, i + len(pattern), 'Bảo Đại')
span = realign_answer_span(features, None, processor, span)
assert span is not None, 'Span should exist.'
return [span]
if features.id == 'sfq_22225--Irish_mythology.txt':
pattern = 'Tír na nÓg'.encode('utf-8')
spans = []
i = 0
while features.context.find(pattern, i) != -1:
i = features.context.find(pattern)
span = AnswerSpan(i, i + len(pattern), 'Tír na nÓg')
span = realign_answer_span(features, None, processor, span)
assert span is not None, 'Span should exist.'
spans.append(span)
i += len(pattern)
return spans
return []
class FindAnswerSpans(beam.DoFn):
"""Find answer spans in document."""
def __init__(self, sentencepiece_model_path: Text):
self._sentencepiece_model_path = sentencepiece_model_path
def setup(self):
self._sentencepiece_processor = read_sentencepiece_model(
self._sentencepiece_model_path)
def process(
self,
element: Tuple[Text, List[Features]],
answer_sets: Dict[Text, Set[Text]],
) -> Generator[Tuple[Features, List[AnswerSpan]], None, None]:
question_id, features = element
answer_set = answer_sets[question_id]
has_answer = False
for feature in features:
answer_spans = []
for answer_span in find_answer_spans(feature.context, answer_set):
realigned_answer_span = realign_answer_span(
feature, answer_set, self._sentencepiece_processor, answer_span)
if realigned_answer_span:
answer_spans.append(realigned_answer_span)
if not answer_spans:
answer_spans = _handle_exceptional_examples(
feature, self._sentencepiece_processor)
if answer_spans:
has_answer = True
else:
metrics.Metrics.counter('_', 'answerless_examples').inc()
yield feature, answer_spans
if not has_answer:
metrics.Metrics.counter('_', 'answerless_questions').inc()
logging.error('Question %s has no answer.', question_id)
def make_example(
features: Features,
labels: Optional[List[AnswerSpan]] = None) -> Tuple[Text, Dict[Text, Any]]:
"""Make an example."""
feature = {
'id': features.id,
'qid': features.question_id,
'question': features.question,
'context': features.context,
'token_ids': features.token_ids,
'token_offsets': features.token_offsets,
'segment_ids': features.segment_ids,
'global_token_ids': features.global_token_ids,
}
if labels:
answers = set((label.begin, label.end) for label in labels)
feature['answers'] = np.array([list(answer) for answer in answers],
np.int64)
else:
feature['answers'] = np.zeros([0, 2], np.int64)
metrics.Metrics.counter('_', 'examples').inc()
return f'{features.id}--{features.stride_index}', feature
def make_pipeline(root: beam.Pipeline, question_answers: List[QuestionAnswer],
answer: bool, max_num_tokens: int, max_num_global_tokens: int,
stride: int, sentencepiece_model_path: Text,
wikipedia_dir: Text, web_dir: Text):
"""Makes a Beam pipeline."""
question_answers = (
root | 'CreateQuestionAnswers' >> beam.Create(question_answers))
features = (
question_answers
| 'ReadEvidence' >> beam.ParDo(
ReadEvidence(wikipedia_dir=wikipedia_dir, web_dir=web_dir))
| 'MakeFeatures' >> beam.ParDo(
MakeFeatures(
sentencepiece_model_path=sentencepiece_model_path,
max_num_tokens=max_num_tokens,
max_num_global_tokens=max_num_global_tokens,
stride=stride)))
if answer:
features = features | 'KeyFeature' >> beam.Map(
lambda feature: (feature.question_id, feature))
# pylint: disable=g-long-lambda
answer_sets = (
question_answers
| 'MakeAnswerSet' >>
beam.Map(lambda qa: (qa.question.id, make_answer_set(qa.answer))))
# pylint: enable=g-long-lambda
examples = (
features
| beam.GroupByKey()
| 'FindAnswerSpans' >> beam.ParDo(
FindAnswerSpans(sentencepiece_model_path),
answer_sets=beam.pvalue.AsDict(answer_sets))
| 'MakeExamplesWithLabels' >> beam.MapTuple(make_example))
else:
examples = features | 'MakeExamples' >> beam.Map(make_example)
return examples
| 18,774 | 35.385659 | 80 | py |
models | models-master/official/projects/triviaqa/sentencepiece_pb2.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# pylint: disable=bad-continuation
# pylint: disable=protected-access
# Generated by the protocol buffer compiler. DO NOT EDIT!
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='third_party/sentencepiece/src/sentencepiece.proto',
package='sentencepiece',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n1third_party/sentencepiece/src/sentencepiece.proto\x12\rsentencepiece\"\xdf\x01\n\x11SentencePieceText\x12\x0c\n\x04text\x18\x01 \x01(\t\x12>\n\x06pieces\x18\x02 \x03(\x0b\x32..sentencepiece.SentencePieceText.SentencePiece\x12\r\n\x05score\x18\x03 \x01(\x02\x1a\x62\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\r\x12\x0f\n\x07surface\x18\x03 \x01(\t\x12\r\n\x05\x62\x65gin\x18\x04 \x01(\r\x12\x0b\n\x03\x65nd\x18\x05 \x01(\r*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"J\n\x16NBestSentencePieceText\x12\x30\n\x06nbests\x18\x01 \x03(\x0b\x32 .sentencepiece.SentencePieceText'
)
_SENTENCEPIECETEXT_SENTENCEPIECE = _descriptor.Descriptor(
name='SentencePiece',
full_name='sentencepiece.SentencePieceText.SentencePiece',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='piece',
full_name='sentencepiece.SentencePieceText.SentencePiece.piece',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b''.decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='id',
full_name='sentencepiece.SentencePieceText.SentencePiece.id',
index=1,
number=2,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='surface',
full_name='sentencepiece.SentencePieceText.SentencePiece.surface',
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b''.decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='begin',
full_name='sentencepiece.SentencePieceText.SentencePiece.begin',
index=3,
number=4,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='end',
full_name='sentencepiece.SentencePieceText.SentencePiece.end',
index=4,
number=5,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[
(200, 536870912),
],
oneofs=[],
serialized_start=183,
serialized_end=281,
)
_SENTENCEPIECETEXT = _descriptor.Descriptor(
name='SentencePieceText',
full_name='sentencepiece.SentencePieceText',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='text',
full_name='sentencepiece.SentencePieceText.text',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b''.decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pieces',
full_name='sentencepiece.SentencePieceText.pieces',
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='score',
full_name='sentencepiece.SentencePieceText.score',
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key),
],
extensions=[],
nested_types=[
_SENTENCEPIECETEXT_SENTENCEPIECE,
],
enum_types=[],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[
(200, 536870912),
],
oneofs=[],
serialized_start=69,
serialized_end=292,
)
_NBESTSENTENCEPIECETEXT = _descriptor.Descriptor(
name='NBestSentencePieceText',
full_name='sentencepiece.NBestSentencePieceText',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='nbests',
full_name='sentencepiece.NBestSentencePieceText.nbests',
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[],
serialized_start=294,
serialized_end=368,
)
_SENTENCEPIECETEXT_SENTENCEPIECE.containing_type = _SENTENCEPIECETEXT
_SENTENCEPIECETEXT.fields_by_name[
'pieces'].message_type = _SENTENCEPIECETEXT_SENTENCEPIECE
_NBESTSENTENCEPIECETEXT.fields_by_name[
'nbests'].message_type = _SENTENCEPIECETEXT
DESCRIPTOR.message_types_by_name['SentencePieceText'] = _SENTENCEPIECETEXT
DESCRIPTOR.message_types_by_name[
'NBestSentencePieceText'] = _NBESTSENTENCEPIECETEXT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SentencePieceText = _reflection.GeneratedProtocolMessageType(
'SentencePieceText',
(_message.Message,),
{
'SentencePiece':
_reflection.GeneratedProtocolMessageType(
'SentencePiece',
(_message.Message,),
{
'DESCRIPTOR':
_SENTENCEPIECETEXT_SENTENCEPIECE,
'__module__':
'official.nlp.projects.triviaqa.sentencepiece_pb2'
# @@protoc_insertion_point(class_scope:sentencepiece.SentencePieceText.SentencePiece)
}),
'DESCRIPTOR':
_SENTENCEPIECETEXT,
'__module__':
'official.nlp.projects.triviaqa.sentencepiece_pb2'
# @@protoc_insertion_point(class_scope:sentencepiece.SentencePieceText)
})
_sym_db.RegisterMessage(SentencePieceText)
_sym_db.RegisterMessage(SentencePieceText.SentencePiece)
NBestSentencePieceText = _reflection.GeneratedProtocolMessageType(
'NBestSentencePieceText',
(_message.Message,),
{
'DESCRIPTOR': _NBESTSENTENCEPIECETEXT,
'__module__': 'official.nlp.projects.triviaqa.sentencepiece_pb2'
# @@protoc_insertion_point(class_scope:sentencepiece.NBestSentencePieceText)
})
_sym_db.RegisterMessage(NBestSentencePieceText)
# @@protoc_insertion_point(module_scope)
| 10,818 | 33.565495 | 664 | py |
models | models-master/official/projects/pixel/data_loader.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loads dataset for the Pixel Classification task."""
import dataclasses
from typing import Mapping, Optional, Tuple
import tensorflow as tf
from official.common import dataset_fn
from official.core import config_definitions as cfg
from official.core import input_reader
from official.nlp.data import data_loader
from official.nlp.data import data_loader_factory
LABEL_TYPES_MAP = {'int': tf.int64, 'float': tf.float32}
@dataclasses.dataclass
class PixelDataConfig(cfg.DataConfig):
"""Data config for text classification task."""
input_path: str = ''
global_batch_size: int = 32
is_training: bool = True
label_type: str = 'int'
num_channels: int = 3
input_size: Tuple[int, int] = (16, 4096)
patch_h: int = 16
patch_w: int = 16
# Whether to include the example id number.
include_example_id: bool = False
# Maps the key in TfExample to feature name.
# Either tfrecord, sstable, or recordio.
file_type: str = 'tfrecord'
@data_loader_factory.register_data_loader_cls(PixelDataConfig)
class PixelDataLoader(data_loader.DataLoader):
"""A class to load dataset for text classification task."""
def __init__(self, params):
self._params = params
self._include_example_id = params.include_example_id
def name_to_features_spec(self):
"""Defines features to decode. Subclass may override to append features."""
h, w = self._params.input_size
positions = h // self._params.patch_h * w // self._params.patch_w
name_to_features = {
'pixel_values': tf.io.FixedLenFeature(
[self._params.num_channels, h, w], tf.float32
),
'label': tf.io.FixedLenFeature([1], tf.int64),
'attention_mask': tf.io.FixedLenFeature([positions], tf.float32),
}
if self._include_example_id:
name_to_features['example_id'] = tf.io.FixedLenFeature([], tf.int64)
return name_to_features
def _decode(self, record: tf.Tensor):
"""Decodes a serialized tf.Example."""
example = tf.io.parse_single_example(record, self.name_to_features_spec())
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in example:
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example
def _parse(self, record: Mapping[str, tf.Tensor]):
"""Parses raw tensors into a dict of tensors to be consumed by the model."""
key_mapping = {
'pixel_values': 'pixel_values',
'label': 'label',
'attention_mask': 'attention_mask',
}
ret = {}
for record_key in record:
if record_key in key_mapping:
ret[key_mapping[record_key]] = record[record_key]
else:
ret[record_key] = record[record_key]
return ret
def load(self, input_context: Optional[tf.distribute.InputContext] = None):
"""Returns a tf.dataset.Dataset."""
reader = input_reader.InputReader(
dataset_fn=dataset_fn.pick_dataset_fn(self._params.file_type),
params=self._params,
decoder_fn=self._decode,
parser_fn=self._parse,
)
return reader.read(input_context)
| 3,761 | 32.891892 | 80 | py |
models | models-master/official/projects/pixel/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Model Garden Vision training driver, register pixel configs."""
from absl import app
from official.common import flags as tfm_flags
# pylint: disable=unused-import
from official.projects.pixel.configs import pixel
from official.projects.pixel.tasks import classification
# pylint: enable=unused-import
from official.vision import train
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(train.main)
| 1,038 | 34.827586 | 77 | py |
models | models-master/official/projects/pixel/configs/pixel.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pixel configurations."""
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.projects.pixel.data_loader import PixelDataConfig
from official.projects.pixel.tasks.classification import PixelConfig
from official.projects.pixel.tasks.classification import PixelModelConfig
@exp_factory.register_config_factory('pixel_sst2_finetune')
def pixel_sst2_finetune() -> cfg.ExperimentConfig:
"""Config to get results that matches https://github.com/xplip/pixel for sst2."""
train_batch_size = 256
eval_batch_size = 32
num_train_steps = 15000
input_size = (16, 4096)
patch_h, patch_w = 16, 16
num_channels = 3
num_classes = 2
config = cfg.ExperimentConfig(
task=PixelConfig(
train_data=PixelDataConfig(
input_path=None,
is_training=True,
global_batch_size=train_batch_size,
shuffle_buffer_size=10000,
drop_remainder=True,
input_size=input_size,
patch_h=patch_h,
patch_w=patch_w,
num_channels=num_channels,
),
validation_data=PixelDataConfig(
input_path=None,
is_training=False,
global_batch_size=eval_batch_size,
shuffle_buffer_size=10000,
drop_remainder=True,
input_size=input_size,
patch_h=patch_h,
patch_w=patch_w,
num_channels=num_channels,
),
model=PixelModelConfig(
filters=768,
num_layers=12,
mlp_dim=3072,
num_heads=12,
dropout_rate=0.1,
attention_dropout_rate=0.1,
init_stochastic_depth_rate=0.0,
),
init_checkpoint=None,
input_size=input_size,
patch_h=patch_h,
patch_w=patch_w,
num_channels=num_channels,
num_classes=num_classes,
),
trainer=cfg.TrainerConfig(
train_steps=num_train_steps,
validation_steps=27,
steps_per_loop=100,
summary_interval=100,
checkpoint_interval=100,
validation_interval=100,
max_to_keep=1,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
},
'learning_rate': {
'type': 'polynomial',
'cycle': False,
'polynomial': {
'decay_steps': num_train_steps,
'end_learning_rate': 0.0,
'initial_learning_rate': 3.0e-05,
'power': 1.0,
},
},
'warmup': {
'type': 'polynomial',
'polynomial': {
'warmup_steps': 100,
'power': 1.0,
},
},
}),
),
restrictions=[
'task.train_data.is_training != None',
],
)
return config
| 3,757 | 32.256637 | 83 | py |
models | models-master/official/projects/pixel/utils/convert_numpy_weights_to_tf.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert pixel model from numpy weights to official.projects.pixel."""
import json
import sys
import numpy as np
import tensorflow as tf
from official.projects.pixel.tasks import classification
def convert(vit_encoder, hf_model_param_dict):
"""Convert pixel model from huggingface to official.projects.pixel."""
num_layers = 12
num_attention_heads = 12
hidden_size = 768
head_size = hidden_size // num_attention_heads
assert head_size * num_attention_heads == hidden_size
vit_encoder.encoder.patch_to_embed.set_weights([
hf_model_param_dict[
"vit.embeddings.patch_embeddings.projection.weight"
].transpose(2, 3, 1, 0),
hf_model_param_dict["vit.embeddings.patch_embeddings.projection.bias"],
])
# pylint: disable=protected-access
vit_encoder.encoder.encoder._pos_embed.pos_embedding.assign(
hf_model_param_dict["vit.embeddings.position_embeddings"][:, :257]
)
vit_encoder.encoder.encoder._norm.set_weights([
hf_model_param_dict["vit.layernorm.weight"],
hf_model_param_dict["vit.layernorm.bias"],
])
vit_encoder.encoder.token_cls.cls.assign(
hf_model_param_dict["vit.embeddings.cls_token"]
)
for layer_num in range(num_layers):
vit_encoder.encoder.encoder._encoder_layers[
layer_num
]._attention_layer._query_dense.set_weights([
hf_model_param_dict[
f"vit.encoder.layer.{layer_num}.attention.attention.query.weight"
].T.reshape((hidden_size, num_attention_heads, head_size)),
hf_model_param_dict[
f"vit.encoder.layer.{layer_num}.attention.attention.query.bias"
].reshape((num_attention_heads, head_size)),
])
vit_encoder.encoder.encoder._encoder_layers[
layer_num
]._attention_layer._key_dense.set_weights([
hf_model_param_dict[
f"vit.encoder.layer.{layer_num}.attention.attention.key.weight"
].T.reshape((hidden_size, num_attention_heads, head_size)),
hf_model_param_dict[
f"vit.encoder.layer.{layer_num}.attention.attention.key.bias"
].reshape((num_attention_heads, head_size)),
])
vit_encoder.encoder.encoder._encoder_layers[
layer_num
]._attention_layer._value_dense.set_weights([
hf_model_param_dict[
f"vit.encoder.layer.{layer_num}.attention.attention.value.weight"
].T.reshape((hidden_size, num_attention_heads, head_size)),
hf_model_param_dict[
f"vit.encoder.layer.{layer_num}.attention.attention.value.bias"
].reshape((num_attention_heads, head_size)),
])
vit_encoder.encoder.encoder._encoder_layers[
layer_num
]._attention_layer._output_dense.set_weights([
hf_model_param_dict[
f"vit.encoder.layer.{layer_num}.attention.output.dense.weight"
].T.reshape((num_attention_heads, head_size, hidden_size)),
hf_model_param_dict[
f"vit.encoder.layer.{layer_num}.attention.output.dense.bias"
],
])
vit_encoder.encoder.encoder._encoder_layers[
layer_num
]._attention_layer_norm.set_weights([
hf_model_param_dict[
f"vit.encoder.layer.{layer_num}.layernorm_before.weight"
],
hf_model_param_dict[
f"vit.encoder.layer.{layer_num}.layernorm_before.bias"
],
])
vit_encoder.encoder.encoder._encoder_layers[
layer_num
]._intermediate_dense.set_weights([
hf_model_param_dict[
f"vit.encoder.layer.{layer_num}.intermediate.dense.weight"
].T,
hf_model_param_dict[
f"vit.encoder.layer.{layer_num}.intermediate.dense.bias"
],
])
vit_encoder.encoder.encoder._encoder_layers[
layer_num
]._output_dense.set_weights([
hf_model_param_dict[
f"vit.encoder.layer.{layer_num}.output.dense.weight"
].T,
hf_model_param_dict[f"vit.encoder.layer.{layer_num}.output.dense.bias"],
])
vit_encoder.encoder.encoder._encoder_layers[
layer_num
]._output_layer_norm.set_weights([
hf_model_param_dict[
f"vit.encoder.layer.{layer_num}.layernorm_after.weight"
],
hf_model_param_dict[
f"vit.encoder.layer.{layer_num}.layernorm_after.bias"
],
])
if __name__ == "__main__":
data_path = sys.argv[1]
output_model_name = sys.argv[2] if len(sys.argv) > 2 else "pixel_encoder.ckpt"
model_name = json.load(open(f"{data_path}/model_name.json"))
model_params = np.load(
open(f"{data_path}/model_param.npy", "rb"), allow_pickle=True
)
config = classification.PixelConfig()
task = classification.PixelClassificationTask(config)
model = task.build_model()
convert(model, {k: v for k, v in zip(model_name, model_params)})
tf.train.Checkpoint(encoder=model.encoder).write(output_model_name)
| 5,455 | 36.115646 | 80 | py |
models | models-master/official/projects/pixel/modeling/pixel.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pixel models."""
import tensorflow as tf
from official.vision.modeling.backbones import vit
layers = tf.keras.layers
class ViTEncoder(vit.Encoder):
"""ViT Encoder.
The original vit implementation in official/vision/modeling/backbones/vit.py
does not support attention masks. This version allows passing the attention
mask in call along with inputs as a (bs, seqlen) tensor.
"""
def call(self, inputs, training=None):
x, mask = inputs
if self._add_pos_embed:
x = self._pos_embed(x, inputs_positions=self._inputs_positions)
x = self._dropout(x, training=training)
for encoder_layer in self._encoder_layers:
x = encoder_layer((x, mask), training=training)
x = self._norm(x)
return x
class VisionTransformer(tf.keras.layers.Layer):
"""ViT backbone."""
def __init__(
self,
patch_h,
patch_w,
filters,
num_layers,
mlp_dim,
num_heads,
dropout_rate,
attention_dropout_rate,
init_stochastic_depth_rate,
**kwargs
):
super().__init__(**kwargs)
self.patch_h = patch_h
self.patch_w = patch_w
self.filters = filters
self.num_layers = num_layers
self.mlp_dim = mlp_dim
self.num_heads = num_heads
self.dropout_rate = dropout_rate
self.attention_dropout_rate = attention_dropout_rate
self.init_stochastic_depth_rate = init_stochastic_depth_rate
def build(self, input_shape):
self.patch_to_embed = tf.keras.layers.Conv2D(
filters=self.filters,
kernel_size=(self.patch_h, self.patch_w),
strides=(self.patch_h, self.patch_w),
padding='valid',
kernel_initializer='lecun_normal',
)
self.encoder = ViTEncoder(
num_layers=self.num_layers,
mlp_dim=self.mlp_dim,
num_heads=self.num_heads,
dropout_rate=self.dropout_rate,
attention_dropout_rate=self.attention_dropout_rate,
init_stochastic_depth_rate=self.init_stochastic_depth_rate,
add_pos_embed=True,
)
self.token_cls = vit.TokenLayer()
super().build(input_shape)
def to_embed(self, patches):
return self.patch_to_embed(patches)
def insert_cls(self, patch_embeds):
return self.token_cls(patch_embeds)
def call(self, inputs): # pylint:disable=signature-mismatch
if isinstance(inputs, dict):
images = inputs.get('pixel_values', None)
attention_mask = inputs.get('attention_mask', None)
attention_mask = tf.transpose(
tf.concat(
values=[
tf.ones((1, tf.shape(attention_mask)[0]), tf.float32),
tf.transpose(attention_mask),
],
axis=0,
)
)
attention_mask = tf.einsum('ij,ik->ijk', attention_mask, attention_mask)
attention_mask = tf.cast(attention_mask, tf.int32)
else:
raise ValueError('Unexpected inputs type to %s.' % self.__class__)
images = tf.transpose(images, perm=[0, 2, 3, 1])
patch_embeds = self.to_embed(images)
patch_shape = tf.shape(patch_embeds)
patch_embeds = tf.reshape(
patch_embeds, (patch_shape[0], -1, patch_shape[-1])
)
patch_embeds = self.insert_cls(patch_embeds)
return self.encoder((patch_embeds, attention_mask))
class PixelClassifier(tf.keras.layers.Layer):
"""Pixel classifier for finetuning. Uses the cls token."""
def __init__(self, encoder, num_classes, **kwargs):
super().__init__(**kwargs)
self.encoder = encoder
self.linear = tf.keras.layers.Dense(
num_classes,
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01),
)
def call(self, inputs):
encoded = self.encoder(inputs)
return self.linear(encoded[:, 0])
class PixelLinearClassifier(tf.keras.layers.Layer):
"""Pixel classifier for finetuning.
This is a layer with additional layer norm and linear layer in the
classification head. Uses the average of all token representations
"""
def __init__(self, encoder, num_classes, num_filters, **kwargs):
super().__init__(**kwargs)
self.encoder = encoder
self.num_filters = num_filters
self.linear_clas = tf.keras.layers.Dense(
num_classes,
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01),
)
self.norm = tf.keras.layers.LayerNormalization(
name='classification_layer_norm',
axis=-1,
epsilon=1e-6,
dtype=tf.float32,
)
self.linear_trans = tf.keras.layers.Dense(
num_filters,
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01),
)
self.activation = tf.keras.layers.Activation('gelu')
self.dropout = tf.keras.layers.Dropout(0.1)
def call(self, inputs, training=False):
attention_mask = inputs.get('attention_mask')
mask_lengths = tf.expand_dims(tf.reduce_sum(attention_mask, axis=1), 1)
attention_mask = tf.tile(
tf.expand_dims(attention_mask, 2), [1, 1, self.num_filters]
)
encoded = self.encoder(inputs)
encoded = self.norm(self.activation(self.linear_trans(encoded)))
encoded = self.dropout(encoded, training=training)
mean_pooling = (
tf.reduce_sum(encoded[:, 1:, :] * attention_mask, axis=1) / mask_lengths
)
return self.linear_clas(mean_pooling)
| 5,897 | 30.37234 | 80 | py |
models | models-master/official/projects/pixel/tasks/classification.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Text classification task with ViT."""
import dataclasses
from typing import Tuple
import numpy as np
from scipy import stats
from sklearn import metrics as sklearn_metrics
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.data import data_loader_factory
from official.projects.pixel.modeling import pixel
@dataclasses.dataclass
class PixelModelConfig(base_config.Config):
"""The model configuration."""
filters: int = 768
num_layers: int = 12
mlp_dim: int = 3072
num_heads: int = 12
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
init_stochastic_depth_rate: float = 0.0
@dataclasses.dataclass
class PixelConfig(cfg.TaskConfig):
"""The task configuration."""
train_data: cfg.DataConfig = cfg.DataConfig()
validation_data: cfg.DataConfig = cfg.DataConfig()
patch_h: int = 16
patch_w: int = 16
num_classes: int = 2
num_channels: int = 3
input_size: Tuple[int, int] = (16, 4096)
model: PixelModelConfig = PixelModelConfig()
@task_factory.register_task_cls(PixelConfig)
class PixelClassificationTask(base_task.Task):
"""Text classificaiton with Pixel and load checkpoint if exists."""
label_field: str = 'label'
metric_type: str = 'accuracy'
def build_model(self) -> tf.keras.Model:
encoder = pixel.VisionTransformer(
self.task_config.patch_h,
self.task_config.patch_w,
self.task_config.model.filters,
self.task_config.model.num_layers,
self.task_config.model.mlp_dim,
self.task_config.model.num_heads,
self.task_config.model.dropout_rate,
self.task_config.model.attention_dropout_rate,
self.task_config.model.init_stochastic_depth_rate,
)
model = pixel.PixelLinearClassifier(
encoder, self.task_config.num_classes, self.task_config.model.filters
)
h, w = self.task_config.input_size
positions = h // self.task_config.patch_h * w // self.task_config.patch_w
model({
'label': tf.zeros((1,)),
'pixel_values': tf.zeros((1, self.task_config.num_channels, h, w)),
'attention_mask': tf.zeros((1, positions)),
})
return model
def build_inputs(self, params, input_context=None):
return data_loader_factory.get_data_loader(params).load(input_context)
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
label_ids = labels[self.label_field]
if self.task_config.num_classes == 1:
loss = tf.keras.losses.mean_squared_error(label_ids, model_outputs)
else:
loss = tf.keras.losses.sparse_categorical_crossentropy(
label_ids, tf.cast(model_outputs, tf.float32), from_logits=True
)
if aux_losses:
loss += tf.add_n(aux_losses)
return tf_utils.safe_mean(loss)
def initialize(self, model: tf.keras.Model):
"""Load encoder if checkpoint exists.
Args:
model: The keras.Model built or used by this task.
"""
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
if not ckpt_dir_or_file:
return
ckpt = tf.train.Checkpoint(encoder=model.encoder)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
def build_metrics(self, training=None):
del training
if self.task_config.num_classes == 1:
metrics = [tf.keras.metrics.MeanSquaredError()]
elif self.task_config.num_classes == 2:
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='cls_accuracy'),
tf.keras.metrics.AUC(name='auc', curve='PR'),
]
else:
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='cls_accuracy'),
]
return metrics
def process_metrics(self, metrics, labels, model_outputs):
for metric in metrics:
if metric.name == 'auc':
# Convert the logit to probability and extract the probability of True..
metric.update_state(
labels[self.label_field],
tf.expand_dims(tf.nn.softmax(model_outputs)[:, 1], axis=1),
)
if metric.name == 'cls_accuracy':
metric.update_state(labels[self.label_field], model_outputs)
def process_compiled_metrics(self, compiled_metrics, labels, model_outputs):
compiled_metrics.update_state(labels[self.label_field], model_outputs)
def validation_step(self, inputs, model: tf.keras.Model, metrics=None):
features, labels = inputs, inputs
outputs = self.inference_step(features, model)
loss = self.build_losses(
labels=labels, model_outputs=outputs, aux_losses=model.losses
)
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, labels, outputs)
if model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in metrics or []})
logs.update({m.name: m.result() for m in model.metrics})
if self.metric_type == 'matthews_corrcoef':
logs.update({
'sentence_prediction': (
tf.expand_dims( # Ensure one prediction along batch dimension.
tf.math.argmax(outputs, axis=1), axis=1
)
),
'labels': labels[self.label_field],
})
else:
logs.update({
'sentence_prediction': outputs,
'labels': labels[self.label_field],
})
return logs
def aggregate_logs(self, state=None, step_outputs=None):
if self.metric_type == 'accuracy':
return None
if state is None:
state = {'sentence_prediction': [], 'labels': []}
state['sentence_prediction'].append(
np.concatenate(
[v.numpy() for v in step_outputs['sentence_prediction']], axis=0
)
)
state['labels'].append(
np.concatenate([v.numpy() for v in step_outputs['labels']], axis=0)
)
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
if self.metric_type == 'accuracy':
return None
preds = np.concatenate(aggregated_logs['sentence_prediction'], axis=0)
labels = np.concatenate(aggregated_logs['labels'], axis=0)
if self.metric_type == 'f1':
preds = np.argmax(preds, axis=1)
return {self.metric_type: sklearn_metrics.f1_score(labels, preds)}
elif self.metric_type == 'matthews_corrcoef':
preds = np.reshape(preds, -1)
labels = np.reshape(labels, -1)
return {
self.metric_type: sklearn_metrics.matthews_corrcoef(preds, labels)
}
elif self.metric_type == 'pearson_spearman_corr':
preds = np.reshape(preds, -1)
labels = np.reshape(labels, -1)
pearson_corr = stats.pearsonr(preds, labels)[0]
spearman_corr = stats.spearmanr(preds, labels)[0]
corr_metric = (pearson_corr + spearman_corr) / 2
return {self.metric_type: corr_metric}
| 7,720 | 34.255708 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.