repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/official/projects/qat/vision/quantization/helper.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantization helpers."""
from __future__ import annotations
import copy
from typing import Any, Dict, List, Optional, Type, Union
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.projects.qat.vision.quantization import configs
_QUANTIZATION_WEIGHT_NAMES = [
'output_max',
'output_min',
'optimizer_step',
'kernel_min',
'kernel_max',
'add_three_min',
'add_three_max',
'divide_six_min',
'divide_six_max',
'depthwise_kernel_min',
'depthwise_kernel_max',
'pointwise_kernel_min',
'pointwise_kernel_max',
'reduce_mean_quantizer_vars_min',
'reduce_mean_quantizer_vars_max',
'quantize_layer_min',
'quantize_layer_max',
'quantize_layer_1_min',
'quantize_layer_1_max',
'quantize_layer_2_min',
'quantize_layer_2_max',
'quantize_layer_3_min',
'quantize_layer_3_max',
'post_activation_min',
'post_activation_max',
]
_ORIGINAL_WEIGHT_NAME = [
'kernel',
'depthwise_kernel',
'pointwise_kernel',
'gamma',
'beta',
'moving_mean',
'moving_variance',
'bias',
]
def is_quantization_weight_name(name: str) -> bool:
simple_name = name.split('/')[-1].split(':')[0]
if simple_name in _QUANTIZATION_WEIGHT_NAMES:
return True
if simple_name in _ORIGINAL_WEIGHT_NAME:
return False
raise ValueError('Variable name {} is not supported.'.format(simple_name))
def copy_original_weights(original_model: tf.keras.Model,
quantized_model: tf.keras.Model):
"""Helper function that copy the original model weights to quantized model."""
original_weight_value = original_model.get_weights()
weight_values = quantized_model.get_weights()
original_idx = 0
for idx, weight in enumerate(quantized_model.weights):
if not is_quantization_weight_name(weight.name):
if original_idx >= len(original_weight_value):
raise ValueError('Not enought original model weights.')
weight_values[idx] = original_weight_value[original_idx]
original_idx = original_idx + 1
if original_idx < len(original_weight_value):
raise ValueError('Not enought quantized model weights.')
quantized_model.set_weights(weight_values)
class LayerQuantizerHelper(object):
"""Helper class that handles quantizers."""
def __init__(self, *args, **kwargs):
self._quantizers = {}
self._quantizer_vars = {}
super().__init__(*args, **kwargs)
def _all_value_quantizer(self):
return tfmot.quantization.keras.quantizers.AllValuesQuantizer(
num_bits=8, per_axis=False, symmetric=False, narrow_range=False)
def _moving_average_quantizer(self):
return tfmot.quantization.keras.quantizers.MovingAverageQuantizer(
num_bits=8, per_axis=False, symmetric=False, narrow_range=False)
def _add_quantizer(self, name, all_value_quantizer=False):
if all_value_quantizer:
self._quantizers[name] = self._all_value_quantizer()
else:
self._quantizers[name] = self._moving_average_quantizer()
def _apply_quantizer(self, name, inputs, training, **kwargs):
return self._quantizers[name](
inputs, training, self._quantizer_vars[name], **kwargs)
def _build_quantizer_vars(self):
for name in self._quantizers:
self._quantizer_vars[name] = self._quantizers[name].build(
tensor_shape=None, name=name, layer=self)
class NoOpActivation:
"""No-op activation which simply returns the incoming tensor.
This activation is required to distinguish between `keras.activations.linear`
which does the same thing. The main difference is that NoOpActivation should
not have any quantize operation applied to it.
"""
def __call__(self, x: tf.Tensor) -> tf.Tensor:
return x
def get_config(self) -> Dict[str, Any]:
"""Get a config of this object."""
return {}
def __eq__(self, other: Any) -> bool:
if not other or not isinstance(other, NoOpActivation):
return False
return True
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def quantize_wrapped_layer(cls, quantize_config):
def constructor(*arg, **kwargs):
return tfmot.quantization.keras.QuantizeWrapperV2(
cls(*arg, **kwargs), quantize_config)
return constructor
def norm_by_activation(activation, norm_quantized, norm_no_quantized):
if activation not in ['relu', 'relu6']:
return norm_quantized
else:
return norm_no_quantized
class SeparableConv2DQuantized(tf.keras.layers.Layer):
"""Quantized SeperableConv2D."""
def __init__(
self,
name: Optional[str] = None,
last_quantize: bool = False,
**conv_kwargs,
):
"""Initializes a SeparableConv2DQuantized.
Args:
name: The name of the layer.
last_quantize: A `bool` indicates whether add quantization for the output.
**conv_kwargs: A keyword arguments to be used for conv and dwconv.
"""
super().__init__(name=name)
self._conv_kwargs = copy.deepcopy(conv_kwargs)
self._name = name
self._last_quantize = last_quantize
def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]):
"""Creates the child layers of the layer."""
depthwise_conv2d_quantized = quantize_wrapped_layer(
tf.keras.layers.DepthwiseConv2D,
configs.Default8BitConvQuantizeConfig(['depthwise_kernel'], [], True),
)
conv2d_quantized = quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(
['kernel'], [], self._last_quantize
),
)
dwconv_kwargs = self._conv_kwargs.copy()
# Depthwise conv input filters is always equal to output filters.
# This filters argument only needed for the point-wise conv2d op.
del dwconv_kwargs['filters']
dwconv_kwargs.update({
'activation': None,
'use_bias': False,
})
self.dw_conv = depthwise_conv2d_quantized(name='dw', **dwconv_kwargs)
conv_kwargs = self._conv_kwargs.copy()
conv_kwargs.update({
'kernel_size': (1, 1),
'strides': (1, 1),
'padding': 'valid',
'groups': 1,
})
self.conv = conv2d_quantized(name='pw', **conv_kwargs)
def call(self, inputs: tf.Tensor) -> tf.Tensor:
"""Call the separable conv layer."""
x = self.dw_conv(inputs)
outputs = self.conv(x)
return outputs
def get_config(self) -> Dict[str, Any]:
"""Returns the config of the layer."""
config = self._conv_kwargs.copy()
config.update({
'name': self._name,
'last_quantize': self._last_quantize,
})
return config
@classmethod
def from_config(
cls: Type[SeparableConv2DQuantized], config: Dict[str, Any]
) -> SeparableConv2DQuantized:
"""Creates a layer from its config."""
return cls(**config)
Conv2DQuantized = quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'], False))
Conv2DOutputQuantized = quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'], True))
DepthwiseConv2DQuantized = quantize_wrapped_layer(
tf.keras.layers.DepthwiseConv2D,
configs.Default8BitConvQuantizeConfig(['depthwise_kernel'], ['activation'],
False))
DepthwiseConv2DOutputQuantized = quantize_wrapped_layer(
tf.keras.layers.DepthwiseConv2D,
configs.Default8BitConvQuantizeConfig(['depthwise_kernel'], ['activation'],
True))
GlobalAveragePooling2DQuantized = quantize_wrapped_layer(
tf.keras.layers.GlobalAveragePooling2D,
configs.Default8BitQuantizeConfig([], [], True))
AveragePooling2DQuantized = quantize_wrapped_layer(
tf.keras.layers.AveragePooling2D,
configs.Default8BitQuantizeConfig([], [], True))
ResizingQuantized = quantize_wrapped_layer(
tf.keras.layers.Resizing, configs.Default8BitQuantizeConfig([], [], True))
ConcatenateQuantized = quantize_wrapped_layer(
tf.keras.layers.Concatenate, configs.Default8BitQuantizeConfig([], [],
True))
UpSampling2DQuantized = quantize_wrapped_layer(
tf.keras.layers.UpSampling2D, configs.Default8BitQuantizeConfig([], [],
True))
ReshapeQuantized = quantize_wrapped_layer(
tf.keras.layers.Reshape, configs.Default8BitQuantizeConfig([], [], True))
DenseQuantized = quantize_wrapped_layer(
tf.keras.layers.Dense,
configs.Default8BitQuantizeConfig(['kernel'], ['activation'], False),
)
DenseOutputQuantized = quantize_wrapped_layer(
tf.keras.layers.Dense,
configs.Default8BitQuantizeConfig(['kernel'], ['activation'], True),
)
IdentityQuantized = quantize_wrapped_layer(
tf.keras.layers.Identity, configs.Default8BitQuantizeConfig([], [], True)
)
# pylint:disable=g-long-lambda
BatchNormalizationQuantized = lambda norm_layer: quantize_wrapped_layer(
norm_layer, configs.Default8BitOutputQuantizeConfig())
BatchNormalizationNoQuantized = lambda norm_layer: quantize_wrapped_layer(
norm_layer, configs.NoOpQuantizeConfig())
| 9,789 | 32.186441 | 80 | py |
models | models-master/official/projects/qat/vision/quantization/configs_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for configs.py."""
# Import libraries
import numpy as np
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.modeling import tf_utils
from official.projects.qat.vision.quantization import configs
class _TestHelper(object):
def _convert_list(self, list_of_tuples):
"""Transforms a list of 2-tuples to a tuple of 2 lists.
`QuantizeConfig` methods return a list of 2-tuples in the form
[(weight1, quantizer1), (weight2, quantizer2)]. This function converts
it into a 2-tuple of lists. ([weight1, weight2]), (quantizer1, quantizer2).
Args:
list_of_tuples: List of 2-tuples.
Returns:
2-tuple of lists.
"""
list1 = []
list2 = []
for a, b in list_of_tuples:
list1.append(a)
list2.append(b)
return list1, list2
# TODO(pulkitb): Consider asserting on full equality for quantizers.
def _assert_weight_quantizers(self, quantizer_list):
for quantizer in quantizer_list:
self.assertIsInstance(
quantizer,
tfmot.quantization.keras.quantizers.LastValueQuantizer)
def _assert_activation_quantizers(self, quantizer_list):
for quantizer in quantizer_list:
self.assertIsInstance(
quantizer,
tfmot.quantization.keras.quantizers.MovingAverageQuantizer)
def _assert_kernel_equality(self, a, b):
self.assertAllEqual(a.numpy(), b.numpy())
class Default8BitQuantizeConfigTest(tf.test.TestCase, _TestHelper):
def _simple_dense_layer(self):
layer = tf.keras.layers.Dense(2)
layer.build(input_shape=(3,))
return layer
def testGetsQuantizeWeightsAndQuantizers(self):
layer = self._simple_dense_layer()
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
(weights, weight_quantizers) = self._convert_list(
quantize_config.get_weights_and_quantizers(layer))
self._assert_weight_quantizers(weight_quantizers)
self.assertEqual([layer.kernel], weights)
def testGetsQuantizeActivationsAndQuantizers(self):
layer = self._simple_dense_layer()
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
(activations, activation_quantizers) = self._convert_list(
quantize_config.get_activations_and_quantizers(layer))
self._assert_activation_quantizers(activation_quantizers)
self.assertEqual([layer.activation], activations)
def testSetsQuantizeWeights(self):
layer = self._simple_dense_layer()
quantize_kernel = tf.keras.backend.variable(
np.ones(layer.kernel.shape.as_list()))
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
quantize_config.set_quantize_weights(layer, [quantize_kernel])
self._assert_kernel_equality(layer.kernel, quantize_kernel)
def testSetsQuantizeActivations(self):
layer = self._simple_dense_layer()
quantize_activation = tf.keras.activations.relu
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
quantize_config.set_quantize_activations(layer, [quantize_activation])
self.assertEqual(layer.activation, quantize_activation)
def testSetsQuantizeWeights_ErrorOnWrongNumberOfWeights(self):
layer = self._simple_dense_layer()
quantize_kernel = tf.keras.backend.variable(
np.ones(layer.kernel.shape.as_list()))
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
with self.assertRaises(ValueError):
quantize_config.set_quantize_weights(layer, [])
with self.assertRaises(ValueError):
quantize_config.set_quantize_weights(layer,
[quantize_kernel, quantize_kernel])
def testSetsQuantizeWeights_ErrorOnWrongShapeOfWeight(self):
layer = self._simple_dense_layer()
quantize_kernel = tf.keras.backend.variable(np.ones([1, 2]))
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
with self.assertRaises(ValueError):
quantize_config.set_quantize_weights(layer, [quantize_kernel])
def testSetsQuantizeActivations_ErrorOnWrongNumberOfActivations(self):
layer = self._simple_dense_layer()
quantize_activation = tf.keras.activations.relu
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
with self.assertRaises(ValueError):
quantize_config.set_quantize_activations(layer, [])
with self.assertRaises(ValueError):
quantize_config.set_quantize_activations(
layer, [quantize_activation, quantize_activation])
def testGetsResultQuantizers_ReturnsQuantizer(self):
layer = self._simple_dense_layer()
quantize_config = configs.Default8BitQuantizeConfig(
[], [], True)
output_quantizers = quantize_config.get_output_quantizers(layer)
self.assertLen(output_quantizers, 1)
self._assert_activation_quantizers(output_quantizers)
def testGetsResultQuantizers_EmptyWhenFalse(self):
layer = self._simple_dense_layer()
quantize_config = configs.Default8BitQuantizeConfig(
[], [], False)
output_quantizers = quantize_config.get_output_quantizers(layer)
self.assertEqual([], output_quantizers)
def testSerialization(self):
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
expected_config = {
'class_name': 'Default8BitQuantizeConfig',
'config': {
'weight_attrs': ['kernel'],
'activation_attrs': ['activation'],
'quantize_output': False
}
}
serialized_quantize_config = tf_utils.serialize_keras_object(
quantize_config
)
self.assertEqual(expected_config, serialized_quantize_config)
quantize_config_from_config = (
tf_utils.deserialize_keras_object(
serialized_quantize_config,
module_objects=globals(),
custom_objects=configs._types_dict(),
)
)
self.assertEqual(quantize_config, quantize_config_from_config)
if __name__ == '__main__':
tf.test.main()
| 6,797 | 31.682692 | 79 | py |
models | models-master/official/projects/qat/vision/quantization/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configs package definition."""
| 643 | 39.25 | 74 | py |
models | models-master/official/projects/qat/vision/quantization/configs.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default 8-bit QuantizeConfigs."""
from typing import Sequence, Callable, Tuple, Any, Dict
import tensorflow as tf
import tensorflow_model_optimization as tfmot
Quantizer = tfmot.quantization.keras.quantizers.Quantizer
Layer = tf.keras.layers.Layer
Activation = Callable[[tf.Tensor], tf.Tensor]
WeightAndQuantizer = Tuple[tf.Variable, Quantizer]
ActivationAndQuantizer = Tuple[Activation, Quantizer]
class Default8BitOutputQuantizeConfig(tfmot.quantization.keras.QuantizeConfig):
"""QuantizeConfig which only quantizes the output from a layer."""
def get_weights_and_quantizers(
self, layer: Layer) -> Sequence[WeightAndQuantizer]:
return []
def get_activations_and_quantizers(
self, layer: Layer) -> Sequence[ActivationAndQuantizer]:
return []
def set_quantize_weights(self,
layer: Layer,
quantize_weights: Sequence[tf.Tensor]):
pass
def set_quantize_activations(self,
layer: Layer,
quantize_activations: Sequence[Activation]):
pass
def get_output_quantizers(self, layer: Layer) -> Sequence[Quantizer]:
return [
tfmot.quantization.keras.quantizers.MovingAverageQuantizer(
num_bits=8, per_axis=False, symmetric=False, narrow_range=False)
]
def get_config(self) -> Dict[str, Any]:
return {}
class NoOpQuantizeConfig(tfmot.quantization.keras.QuantizeConfig):
"""QuantizeConfig which does not quantize any part of the layer."""
def get_weights_and_quantizers(
self, layer: Layer) -> Sequence[WeightAndQuantizer]:
return []
def get_activations_and_quantizers(
self, layer: Layer) -> Sequence[ActivationAndQuantizer]:
return []
def set_quantize_weights(
self,
layer: Layer,
quantize_weights: Sequence[tf.Tensor]):
pass
def set_quantize_activations(
self,
layer: Layer,
quantize_activations: Sequence[Activation]):
pass
def get_output_quantizers(self, layer: Layer) -> Sequence[Quantizer]:
return []
def get_config(self) -> Dict[str, Any]:
return {}
class Default8BitQuantizeConfig(tfmot.quantization.keras.QuantizeConfig):
"""QuantizeConfig for non recurrent Keras layers."""
def __init__(self,
weight_attrs: Sequence[str],
activation_attrs: Sequence[str],
quantize_output: bool):
"""Initializes a default 8bit quantize config."""
self.weight_attrs = weight_attrs
self.activation_attrs = activation_attrs
self.quantize_output = quantize_output
# TODO(pulkitb): For some layers such as Conv2D, per_axis should be True.
# Add mapping for which layers support per_axis.
self.weight_quantizer = tfmot.quantization.keras.quantizers.LastValueQuantizer(
num_bits=8, per_axis=False, symmetric=True, narrow_range=True)
self.activation_quantizer = tfmot.quantization.keras.quantizers.MovingAverageQuantizer(
num_bits=8, per_axis=False, symmetric=False, narrow_range=False)
def get_weights_and_quantizers(
self, layer: Layer) -> Sequence[WeightAndQuantizer]:
"""See base class."""
return [(getattr(layer, weight_attr), self.weight_quantizer)
for weight_attr in self.weight_attrs]
def get_activations_and_quantizers(
self, layer: Layer) -> Sequence[ActivationAndQuantizer]:
"""See base class."""
return [(getattr(layer, activation_attr), self.activation_quantizer)
for activation_attr in self.activation_attrs]
def set_quantize_weights(
self,
layer: Layer,
quantize_weights: Sequence[tf.Tensor]):
"""See base class."""
if len(self.weight_attrs) != len(quantize_weights):
raise ValueError(
'`set_quantize_weights` called on layer {} with {} '
'weight parameters, but layer expects {} values.'.format(
layer.name, len(quantize_weights), len(self.weight_attrs)))
for weight_attr, weight in zip(self.weight_attrs, quantize_weights):
current_weight = getattr(layer, weight_attr)
if current_weight.shape != weight.shape:
raise ValueError('Existing layer weight shape {} is incompatible with'
'provided weight shape {}'.format(
current_weight.shape, weight.shape))
setattr(layer, weight_attr, weight)
def set_quantize_activations(
self,
layer: Layer,
quantize_activations: Sequence[Activation]):
"""See base class."""
if len(self.activation_attrs) != len(quantize_activations):
raise ValueError(
'`set_quantize_activations` called on layer {} with {} '
'activation parameters, but layer expects {} values.'.format(
layer.name, len(quantize_activations),
len(self.activation_attrs)))
for activation_attr, activation in zip(
self.activation_attrs, quantize_activations):
setattr(layer, activation_attr, activation)
def get_output_quantizers(self, layer: Layer) -> Sequence[Quantizer]:
"""See base class."""
if self.quantize_output:
return [self.activation_quantizer]
return []
@classmethod
def from_config(cls, config: Dict[str, Any]) -> object:
"""Instantiates a `Default8BitQuantizeConfig` from its config.
Args:
config: Output of `get_config()`.
Returns:
A `Default8BitQuantizeConfig` instance.
"""
return cls(**config)
def get_config(self) -> Dict[str, Any]:
"""Get a config for this quantize config."""
# TODO(pulkitb): Add weight and activation quantizer to config.
# Currently it's created internally, but ideally the quantizers should be
# part of the constructor and passed in from the registry.
return {
'weight_attrs': self.weight_attrs,
'activation_attrs': self.activation_attrs,
'quantize_output': self.quantize_output
}
def __eq__(self, other):
if not isinstance(other, Default8BitQuantizeConfig):
return False
return (self.weight_attrs == other.weight_attrs and
self.activation_attrs == self.activation_attrs and
self.weight_quantizer == other.weight_quantizer and
self.activation_quantizer == other.activation_quantizer and
self.quantize_output == other.quantize_output)
def __ne__(self, other):
return not self.__eq__(other)
class Default8BitConvWeightsQuantizer(
tfmot.quantization.keras.quantizers.LastValueQuantizer):
"""Quantizer for handling weights in Conv2D/DepthwiseConv2D layers."""
def __init__(self):
"""Construct LastValueQuantizer with params specific for TFLite Convs."""
super(Default8BitConvWeightsQuantizer, self).__init__(
num_bits=8, per_axis=True, symmetric=True, narrow_range=True)
def build(self,
tensor_shape: tf.TensorShape,
name: str,
layer: Layer):
"""Build min/max quantization variables."""
min_weight = layer.add_weight(
name + '_min',
shape=(tensor_shape[-1],),
initializer=tf.keras.initializers.Constant(-6.0),
trainable=False)
max_weight = layer.add_weight(
name + '_max',
shape=(tensor_shape[-1],),
initializer=tf.keras.initializers.Constant(6.0),
trainable=False)
return {'min_var': min_weight, 'max_var': max_weight}
class NoQuantizer(tfmot.quantization.keras.quantizers.Quantizer):
"""Dummy quantizer for explicitly not quantize."""
def __call__(self, inputs, training, weights, **kwargs):
return tf.identity(inputs)
def get_config(self):
return {}
def build(self, tensor_shape, name, layer):
return {}
class Default8BitConvQuantizeConfig(Default8BitQuantizeConfig):
"""QuantizeConfig for Conv2D/DepthwiseConv2D layers."""
def __init__(self,
weight_attrs: Sequence[str],
activation_attrs: Sequence[str],
quantize_output: bool):
"""Initializes default 8bit quantization config for the conv layer."""
super().__init__(weight_attrs, activation_attrs, quantize_output)
self.weight_quantizer = Default8BitConvWeightsQuantizer()
class Default8BitActivationQuantizeConfig(
tfmot.quantization.keras.QuantizeConfig):
"""QuantizeConfig for keras.layers.Activation.
`keras.layers.Activation` needs a separate `QuantizeConfig` since the
decision to quantize depends on the specific activation type.
"""
def _assert_activation_layer(self, layer: Layer):
if not isinstance(layer, tf.keras.layers.Activation):
raise RuntimeError(
'Default8BitActivationQuantizeConfig can only be used with '
'`keras.layers.Activation`.')
def get_weights_and_quantizers(
self, layer: Layer) -> Sequence[WeightAndQuantizer]:
"""See base class."""
self._assert_activation_layer(layer)
return []
def get_activations_and_quantizers(
self, layer: Layer) -> Sequence[ActivationAndQuantizer]:
"""See base class."""
self._assert_activation_layer(layer)
return []
def set_quantize_weights(
self,
layer: Layer,
quantize_weights: Sequence[tf.Tensor]):
"""See base class."""
self._assert_activation_layer(layer)
def set_quantize_activations(
self,
layer: Layer,
quantize_activations: Sequence[Activation]):
"""See base class."""
self._assert_activation_layer(layer)
def get_output_quantizers(self, layer: Layer) -> Sequence[Quantizer]:
"""See base class."""
self._assert_activation_layer(layer)
if not hasattr(layer.activation, '__name__'):
raise ValueError('Activation {} not supported by '
'Default8BitActivationQuantizeConfig.'.format(
layer.activation))
# This code is copied from TFMOT repo, but added relu6 to support mobilenet.
if layer.activation.__name__ in ['relu', 'relu6', 'swish', 'hard_swish']:
# 'relu' should generally get fused into the previous layer.
return [tfmot.quantization.keras.quantizers.MovingAverageQuantizer(
num_bits=8, per_axis=False, symmetric=False, narrow_range=False)]
elif layer.activation.__name__ in [
'linear', 'softmax', 'sigmoid', 'hard_sigmoid'
]:
return []
raise ValueError('Activation {} not supported by '
'Default8BitActivationQuantizeConfig.'.format(
layer.activation))
def get_config(self) -> Dict[str, Any]:
"""Get a config for this quantizer config."""
return {}
def _types_dict():
return {
'Default8BitOutputQuantizeConfig':
Default8BitOutputQuantizeConfig,
'NoOpQuantizeConfig':
NoOpQuantizeConfig,
'Default8BitQuantizeConfig':
Default8BitQuantizeConfig,
'Default8BitConvWeightsQuantizer':
Default8BitConvWeightsQuantizer,
'Default8BitConvQuantizeConfig':
Default8BitConvQuantizeConfig,
'Default8BitActivationQuantizeConfig':
Default8BitActivationQuantizeConfig,
}
| 11,684 | 33.571006 | 91 | py |
models | models-master/official/projects/qat/vision/modeling/factory_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for factory.py."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.projects.qat.vision.configs import common
from official.projects.qat.vision.modeling import factory as qat_factory
from official.projects.qat.vision.modeling.heads import dense_prediction_heads as qat_dense_prediction_heads
from official.vision.configs import backbones
from official.vision.configs import decoders
from official.vision.configs import image_classification as classification_cfg
from official.vision.configs import retinanet as retinanet_cfg
from official.vision.configs import semantic_segmentation as semantic_segmentation_cfg
from official.vision.modeling import factory
from official.vision.modeling.decoders import fpn
from official.vision.modeling.heads import dense_prediction_heads
class ClassificationModelBuilderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('resnet', (224, 224), 5e-5),
('resnet', (224, 224), None),
('resnet', (None, None), 5e-5),
('resnet', (None, None), None),
('mobilenet', (224, 224), 5e-5),
('mobilenet', (224, 224), None),
('mobilenet', (None, None), 5e-5),
('mobilenet', (None, None), None),
)
def test_builder(self, backbone_type, input_size, weight_decay):
num_classes = 2
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size[0], input_size[1], 3])
model_config = classification_cfg.ImageClassificationModel(
num_classes=num_classes,
backbone=backbones.Backbone(type=backbone_type))
l2_regularizer = (
tf.keras.regularizers.l2(weight_decay) if weight_decay else None)
model = factory.build_classification_model(
input_specs=input_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
quantization_config = common.Quantization()
_ = qat_factory.build_qat_classification_model(
model=model,
input_specs=input_specs,
quantization=quantization_config,
model_config=model_config,
l2_regularizer=l2_regularizer)
class RetinaNetBuilderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('spinenet_mobile', 'identity', (640, 640), False, False),
('spinenet_mobile', 'identity', (640, 640), True, False),
('mobilenet', 'fpn', (640, 640), True, False),
('mobilenet', 'fpn', (640, 640), True, True),
)
def test_builder(self,
backbone_type,
decoder_type,
input_size,
quantize_detection_head,
quantize_detection_decoder):
num_classes = 2
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size[0], input_size[1], 3])
if backbone_type == 'spinenet_mobile':
backbone_config = backbones.Backbone(
type=backbone_type,
spinenet_mobile=backbones.SpineNetMobile(
model_id='49',
stochastic_depth_drop_rate=0.2,
min_level=3,
max_level=7,
use_keras_upsampling_2d=True))
elif backbone_type == 'mobilenet':
backbone_config = backbones.Backbone(
type=backbone_type,
mobilenet=backbones.MobileNet(
model_id='MobileNetV2',
filter_size_scale=1.0))
else:
raise ValueError(
'backbone_type {} is not supported'.format(backbone_type))
if decoder_type == 'identity':
decoder_config = decoders.Decoder(type=decoder_type)
elif decoder_type == 'fpn':
decoder_config = decoders.Decoder(
type=decoder_type,
fpn=decoders.FPN(
num_filters=128,
use_separable_conv=True,
use_keras_layer=True))
else:
raise ValueError(
'decoder_type {} is not supported'.format(decoder_type))
model_config = retinanet_cfg.RetinaNet(
num_classes=num_classes,
input_size=[input_size[0], input_size[1], 3],
backbone=backbone_config,
decoder=decoder_config,
head=retinanet_cfg.RetinaNetHead(
attribute_heads=None,
use_separable_conv=True))
l2_regularizer = tf.keras.regularizers.l2(5e-5)
# Build the original float32 retinanet model.
model = factory.build_retinanet(
input_specs=input_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
# Call the model with dummy input to build the head part.
dummpy_input = tf.zeros([1] + model_config.input_size)
model(dummpy_input, training=True)
# Build the QAT model from the original model with quantization config.
qat_model = qat_factory.build_qat_retinanet(
model=model,
quantization=common.Quantization(
quantize_detection_decoder=quantize_detection_decoder,
quantize_detection_head=quantize_detection_head),
model_config=model_config)
if quantize_detection_head:
# head become a RetinaNetHeadQuantized when we apply quantization.
self.assertIsInstance(qat_model.head,
qat_dense_prediction_heads.RetinaNetHeadQuantized)
else:
# head is a RetinaNetHead if we don't apply quantization on head part.
self.assertIsInstance(
qat_model.head, dense_prediction_heads.RetinaNetHead)
self.assertNotIsInstance(
qat_model.head, qat_dense_prediction_heads.RetinaNetHeadQuantized)
if decoder_type == 'FPN':
if quantize_detection_decoder:
# FPN decoder become a general keras functional model after applying
# quantization.
self.assertNotIsInstance(qat_model.decoder, fpn.FPN)
else:
self.assertIsInstance(qat_model.decoder, fpn.FPN)
class SegmentationModelBuilderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('mobilenet', (512, 512), 5e-5),)
def test_deeplabv3_builder(self, backbone_type, input_size, weight_decay):
num_classes = 21
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size[0], input_size[1], 3])
model_config = semantic_segmentation_cfg.SemanticSegmentationModel(
num_classes=num_classes,
backbone=backbones.Backbone(
type=backbone_type,
mobilenet=backbones.MobileNet(
model_id='MobileNetV2', output_stride=16)),
decoder=decoders.Decoder(
type='aspp',
aspp=decoders.ASPP(
level=4,
num_filters=256,
dilation_rates=[],
spp_layer_version='v1',
output_tensor=True)),
head=semantic_segmentation_cfg.SegmentationHead(
level=4,
low_level=2,
num_convs=1,
upsample_factor=2,
use_depthwise_convolution=True))
l2_regularizer = (
tf.keras.regularizers.l2(weight_decay) if weight_decay else None)
model = factory.build_segmentation_model(
input_specs=input_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
quantization_config = common.Quantization()
_ = qat_factory.build_qat_segmentation_model(
model=model, quantization=quantization_config, input_specs=input_specs)
@parameterized.parameters(
('mobilenet', (512, 1024), 5e-5),)
def test_deeplabv3plus_builder(self, backbone_type, input_size, weight_decay):
num_classes = 19
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size[0], input_size[1], 3])
model_config = semantic_segmentation_cfg.SemanticSegmentationModel(
num_classes=num_classes,
backbone=backbones.Backbone(
type=backbone_type,
mobilenet=backbones.MobileNet(
model_id='MobileNetV2',
output_stride=16,
output_intermediate_endpoints=True)),
decoder=decoders.Decoder(
type='aspp',
aspp=decoders.ASPP(
level=4,
num_filters=256,
dilation_rates=[],
pool_kernel_size=[512, 1024],
use_depthwise_convolution=False,
spp_layer_version='v1',
output_tensor=True)),
head=semantic_segmentation_cfg.SegmentationHead(
level=4,
num_convs=2,
feature_fusion='deeplabv3plus',
use_depthwise_convolution=True,
low_level='2/depthwise',
low_level_num_filters=48,
prediction_kernel_size=1,
upsample_factor=1,
num_filters=256))
l2_regularizer = (
tf.keras.regularizers.l2(weight_decay) if weight_decay else None)
model = factory.build_segmentation_model(
input_specs=input_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
quantization_config = common.Quantization()
_ = qat_factory.build_qat_segmentation_model(
model=model, quantization=quantization_config, input_specs=input_specs)
if __name__ == '__main__':
tf.test.main()
| 9,737 | 37.642857 | 108 | py |
models | models-master/official/projects/qat/vision/modeling/factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory methods to build models."""
# Import libraries
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.projects.qat.vision.configs import common
from official.projects.qat.vision.modeling import segmentation_model as qat_segmentation_model
from official.projects.qat.vision.modeling.heads import dense_prediction_heads as dense_prediction_heads_qat
from official.projects.qat.vision.modeling.layers import nn_layers as qat_nn_layers
from official.projects.qat.vision.n_bit import schemes as n_bit_schemes
from official.projects.qat.vision.quantization import configs as qat_configs
from official.projects.qat.vision.quantization import helper
from official.projects.qat.vision.quantization import schemes
from official.vision import configs
from official.vision.modeling import classification_model
from official.vision.modeling import retinanet_model
from official.vision.modeling.decoders import aspp
from official.vision.modeling.decoders import fpn
from official.vision.modeling.heads import dense_prediction_heads
from official.vision.modeling.heads import segmentation_heads
from official.vision.modeling.layers import nn_layers
def build_qat_classification_model(
model: tf.keras.Model,
quantization: common.Quantization,
input_specs: tf.keras.layers.InputSpec,
model_config: configs.image_classification.ImageClassificationModel,
l2_regularizer: tf.keras.regularizers.Regularizer = None
) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Apply model optimization techniques.
Args:
model: The model applying model optimization techniques.
quantization: The Quantization config.
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
model_config: The model config.
l2_regularizer: tf.keras.regularizers.Regularizer object. Default to None.
Returns:
model: The model that applied optimization techniques.
"""
original_checkpoint = quantization.pretrained_original_checkpoint
if original_checkpoint:
ckpt = tf.train.Checkpoint(
model=model,
**model.checkpoint_items)
status = ckpt.read(original_checkpoint)
status.expect_partial().assert_existing_objects_matched()
scope_dict = {
'L2': tf.keras.regularizers.l2,
}
with tfmot.quantization.keras.quantize_scope(scope_dict):
annotated_backbone = tfmot.quantization.keras.quantize_annotate_model(
model.backbone)
if quantization.change_num_bits:
backbone = tfmot.quantization.keras.quantize_apply(
annotated_backbone,
scheme=n_bit_schemes.DefaultNBitQuantizeScheme(
num_bits_weight=quantization.num_bits_weight,
num_bits_activation=quantization.num_bits_activation))
else:
backbone = tfmot.quantization.keras.quantize_apply(
annotated_backbone,
scheme=schemes.Default8BitQuantizeScheme())
norm_activation_config = model_config.norm_activation
backbone_optimized_model = classification_model.ClassificationModel(
backbone=backbone,
num_classes=model_config.num_classes,
input_specs=input_specs,
dropout_rate=model_config.dropout_rate,
kernel_regularizer=l2_regularizer,
add_head_batch_norm=model_config.add_head_batch_norm,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon)
for from_layer, to_layer in zip(
model.layers, backbone_optimized_model.layers):
if from_layer != model.backbone:
to_layer.set_weights(from_layer.get_weights())
with tfmot.quantization.keras.quantize_scope(scope_dict):
def apply_quantization_to_dense(layer):
if isinstance(layer, (tf.keras.layers.Dense,
tf.keras.layers.Dropout,
tf.keras.layers.GlobalAveragePooling2D)):
return tfmot.quantization.keras.quantize_annotate_layer(layer)
return layer
backbone_optimized_model.use_legacy_config = True
annotated_model = tf.keras.models.clone_model(
backbone_optimized_model,
clone_function=apply_quantization_to_dense,
)
annotated_model.use_legacy_config = True
if quantization.change_num_bits:
optimized_model = tfmot.quantization.keras.quantize_apply(
annotated_model,
scheme=n_bit_schemes.DefaultNBitQuantizeScheme(
num_bits_weight=quantization.num_bits_weight,
num_bits_activation=quantization.num_bits_activation))
else:
optimized_model = tfmot.quantization.keras.quantize_apply(
annotated_model)
return optimized_model
def _clone_function_for_fpn(layer):
if isinstance(layer, (
tf.keras.layers.BatchNormalization,
tf.keras.layers.experimental.SyncBatchNormalization)):
return tfmot.quantization.keras.quantize_annotate_layer(
qat_nn_layers.BatchNormalizationWrapper(layer),
qat_configs.Default8BitOutputQuantizeConfig())
if isinstance(layer, tf.keras.layers.UpSampling2D):
return layer
return tfmot.quantization.keras.quantize_annotate_layer(layer)
def build_qat_retinanet(
model: tf.keras.Model, quantization: common.Quantization,
model_config: configs.retinanet.RetinaNet) -> tf.keras.Model:
"""Applies quantization aware training for RetinaNet model.
Args:
model: The model applying quantization aware training.
quantization: The Quantization config.
model_config: The model config.
Returns:
The model that applied optimization techniques.
"""
original_checkpoint = quantization.pretrained_original_checkpoint
if original_checkpoint is not None:
ckpt = tf.train.Checkpoint(
model=model,
**model.checkpoint_items)
status = ckpt.read(original_checkpoint)
status.expect_partial().assert_existing_objects_matched()
scope_dict = {
'L2': tf.keras.regularizers.l2,
'BatchNormalizationWrapper': qat_nn_layers.BatchNormalizationWrapper,
}
with tfmot.quantization.keras.quantize_scope(scope_dict):
annotated_backbone = tfmot.quantization.keras.quantize_annotate_model(
model.backbone)
optimized_backbone = tfmot.quantization.keras.quantize_apply(
annotated_backbone,
scheme=schemes.Default8BitQuantizeScheme())
decoder = model.decoder
if quantization.quantize_detection_decoder:
if not isinstance(decoder, fpn.FPN):
raise ValueError('Currently only supports FPN.')
decoder = tf.keras.models.clone_model(
decoder,
clone_function=_clone_function_for_fpn,
)
decoder = tfmot.quantization.keras.quantize_apply(decoder)
decoder = tfmot.quantization.keras.remove_input_range(decoder)
head = model.head
if quantization.quantize_detection_head:
if not isinstance(head, dense_prediction_heads.RetinaNetHead):
raise ValueError('Currently only supports RetinaNetHead.')
head = (
dense_prediction_heads_qat.RetinaNetHeadQuantized.from_config(
head.get_config()))
optimized_model = retinanet_model.RetinaNetModel(
optimized_backbone,
decoder,
head,
model.detection_generator,
min_level=model_config.min_level,
max_level=model_config.max_level,
num_scales=model_config.anchor.num_scales,
aspect_ratios=model_config.anchor.aspect_ratios,
anchor_size=model_config.anchor.anchor_size)
if quantization.quantize_detection_head:
# Call the model with dummy input to build the head part.
dummpy_input = tf.zeros([1] + model_config.input_size)
height, width, _ = model_config.input_size
image_shape = [[height, width]]
optimized_model.call(dummpy_input, image_shape=image_shape, training=False)
helper.copy_original_weights(model.head, optimized_model.head)
return optimized_model
def build_qat_segmentation_model(
model: tf.keras.Model, quantization: common.Quantization,
input_specs: tf.keras.layers.InputSpec) -> tf.keras.Model:
"""Applies quantization aware training for segmentation model.
Args:
model: The model applying quantization aware training.
quantization: The Quantization config.
input_specs: The shape specifications of input tensor.
Returns:
The model that applied optimization techniques.
"""
original_checkpoint = quantization.pretrained_original_checkpoint
if original_checkpoint is not None:
ckpt = tf.train.Checkpoint(model=model, **model.checkpoint_items)
status = ckpt.read(original_checkpoint)
status.expect_partial().assert_existing_objects_matched()
# Build quantization compatible model.
model = qat_segmentation_model.SegmentationModelQuantized(
model.backbone, model.decoder, model.head, input_specs)
scope_dict = {
'L2': tf.keras.regularizers.l2,
}
model.use_legacy_config = True # Ensures old Keras serialization format
# Apply QAT to backbone (a tf.keras.Model) first.
with tfmot.quantization.keras.quantize_scope(scope_dict):
annotated_backbone = tfmot.quantization.keras.quantize_annotate_model(
model.backbone)
optimized_backbone = tfmot.quantization.keras.quantize_apply(
annotated_backbone, scheme=schemes.Default8BitQuantizeScheme())
backbone_optimized_model = qat_segmentation_model.SegmentationModelQuantized(
optimized_backbone, model.decoder, model.head, input_specs)
# Copy over all remaining layers.
for from_layer, to_layer in zip(model.layers,
backbone_optimized_model.layers):
if from_layer != model.backbone:
to_layer.set_weights(from_layer.get_weights())
with tfmot.quantization.keras.quantize_scope(scope_dict):
def apply_quantization_to_layers(layer):
if isinstance(layer, (segmentation_heads.SegmentationHead,
nn_layers.SpatialPyramidPooling, aspp.ASPP)):
return tfmot.quantization.keras.quantize_annotate_layer(layer)
return layer
backbone_optimized_model.use_legacy_config = True
annotated_model = tf.keras.models.clone_model(
backbone_optimized_model,
clone_function=apply_quantization_to_layers,
)
annotated_model.use_legacy_config = True
optimized_model = tfmot.quantization.keras.quantize_apply(
annotated_model, scheme=schemes.Default8BitQuantizeScheme())
return optimized_model
| 11,044 | 39.163636 | 108 | py |
models | models-master/official/projects/qat/vision/modeling/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modeling package definition."""
from official.projects.qat.vision.modeling import heads
from official.projects.qat.vision.modeling import layers
| 757 | 41.111111 | 74 | py |
models | models-master/official/projects/qat/vision/modeling/segmentation_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build segmentation models."""
from typing import Any, Mapping, Union
# Import libraries
import tensorflow as tf
layers = tf.keras.layers
@tf.keras.utils.register_keras_serializable(package='Vision')
class SegmentationModelQuantized(tf.keras.Model):
"""A Segmentation class model.
Input images are passed through backbone first. Decoder network is then
applied, and finally, segmentation head is applied on the output of the
decoder network. Layers such as ASPP should be part of decoder. Any feature
fusion is done as part of the segmentation head (i.e. deeplabv3+ feature
fusion is not part of the decoder, instead it is part of the segmentation
head). This way, different feature fusion techniques can be combined with
different backbones, and decoders.
"""
def __init__(self, backbone: tf.keras.Model, decoder: tf.keras.layers.Layer,
head: tf.keras.layers.Layer,
input_specs: tf.keras.layers.InputSpec, **kwargs):
"""Segmentation initialization function.
Args:
backbone: a backbone network.
decoder: a decoder network. E.g. FPN.
head: segmentation head.
input_specs: The shape specifications of input tensor.
**kwargs: keyword arguments to be passed.
"""
inputs = tf.keras.Input(shape=input_specs.shape[1:], name=input_specs.name)
backbone_features = backbone(inputs)
if decoder:
backbone_feature = backbone_features[str(decoder.get_config()['level'])]
decoder_feature = decoder(backbone_feature)
else:
decoder_feature = backbone_features
backbone_feature = backbone_features[str(head.get_config()['low_level'])]
x = {'logits': head((backbone_feature, decoder_feature))}
super().__init__(inputs=inputs, outputs=x, **kwargs)
self._config_dict = {
'backbone': backbone,
'decoder': decoder,
'head': head,
}
self.backbone = backbone
self.decoder = decoder
self.head = head
@property
def checkpoint_items(
self) -> Mapping[str, Union[tf.keras.Model, tf.keras.layers.Layer]]:
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(backbone=self.backbone, head=self.head)
if self.decoder is not None:
items.update(decoder=self.decoder)
return items
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 3,077 | 35.211765 | 79 | py |
models | models-master/official/projects/qat/vision/modeling/layers/nn_layers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains common building blocks for neural networks."""
import enum
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Tuple, Union
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.modeling import tf_utils
from official.projects.qat.vision.quantization import configs
from official.projects.qat.vision.quantization import helper
from official.vision.modeling import heads
from official.vision.modeling.decoders import aspp
from official.vision.modeling.layers import nn_layers
# Type annotations.
States = Dict[str, tf.Tensor]
Activation = Union[str, Callable]
# String constants.
class FeatureFusion(str, enum.Enum):
PYRAMID_FUSION = 'pyramid_fusion'
PANOPTIC_FPN_FUSION = 'panoptic_fpn_fusion'
DEEPLABV3PLUS = 'deeplabv3plus'
DEEPLABV3PLUS_SUM_TO_MERGE = 'deeplabv3plus_sum_to_merge'
@tf.keras.utils.register_keras_serializable(package='Vision')
class SqueezeExcitationQuantized(
helper.LayerQuantizerHelper,
tf.keras.layers.Layer):
"""Creates a squeeze and excitation layer."""
def __init__(self,
in_filters,
out_filters,
se_ratio,
divisible_by=1,
use_3d_input=False,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
gating_activation='sigmoid',
round_down_protect=True,
**kwargs):
"""Initializes a squeeze and excitation layer.
Args:
in_filters: An `int` number of filters of the input tensor.
out_filters: An `int` number of filters of the output tensor.
se_ratio: A `float` or None. If not None, se ratio for the squeeze and
excitation layer.
divisible_by: An `int` that ensures all inner dimensions are divisible by
this number.
use_3d_input: A `bool` of whether input is 2D or 3D image.
kernel_initializer: A `str` of kernel_initializer for convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d.
Default to None.
activation: A `str` name of the activation function.
gating_activation: A `str` name of the activation function for final
gating function.
round_down_protect: A `bool` of whether round down more than 10% will be
allowed.
**kwargs: Additional keyword arguments to be passed.
"""
super().__init__(**kwargs)
self._in_filters = in_filters
self._out_filters = out_filters
self._se_ratio = se_ratio
self._divisible_by = divisible_by
self._round_down_protect = round_down_protect
self._use_3d_input = use_3d_input
self._activation = activation
self._gating_activation = gating_activation
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
if tf.keras.backend.image_data_format() == 'channels_last':
if not use_3d_input:
self._spatial_axis = [1, 2]
else:
self._spatial_axis = [1, 2, 3]
else:
if not use_3d_input:
self._spatial_axis = [2, 3]
else:
self._spatial_axis = [2, 3, 4]
def _create_gating_activation_layer(self):
if self._gating_activation == 'hard_sigmoid':
# Convert hard_sigmoid activation to quantizable keras layers so each op
# can be properly quantized.
# Formula is hard_sigmoid(x) = relu6(x + 3) * 0.16667.
self._add_quantizer('add_three')
self._add_quantizer('divide_six')
self._relu6 = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation('relu6', use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig())
else:
self._gating_activation_layer = (
tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(
self._gating_activation, use_keras_layer=True
),
configs.Default8BitActivationQuantizeConfig(),
)
)
def _apply_gating_activation_layer(
self, x: tf.Tensor, training: bool) -> tf.Tensor:
if self._gating_activation == 'hard_sigmoid':
x = self._apply_quantizer('add_three', x + 3.0, training)
x = self._relu6(x)
x = self._apply_quantizer('divide_six', x * 1.6667, training)
else:
x = self._gating_activation_layer(x)
return x
def build(self, input_shape):
num_reduced_filters = nn_layers.make_divisible(
max(1, int(self._in_filters * self._se_ratio)),
divisor=self._divisible_by,
round_down_protect=self._round_down_protect)
self._se_reduce = helper.Conv2DQuantized(
filters=num_reduced_filters,
kernel_size=1,
strides=1,
padding='same',
use_bias=True,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=helper.NoOpActivation())
self._se_expand = helper.Conv2DOutputQuantized(
filters=self._out_filters,
kernel_size=1,
strides=1,
padding='same',
use_bias=True,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=helper.NoOpActivation())
self._multiply = tfmot.quantization.keras.QuantizeWrapperV2(
tf.keras.layers.Multiply(),
configs.Default8BitQuantizeConfig([], [], True))
self._reduce_mean_quantizer = (
tfmot.quantization.keras.quantizers.MovingAverageQuantizer(
num_bits=8, per_axis=False, symmetric=False, narrow_range=False))
self._reduce_mean_quantizer_vars = self._reduce_mean_quantizer.build(
None, 'reduce_mean_quantizer_vars', self)
self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(self._activation, use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig())
self._create_gating_activation_layer()
self._build_quantizer_vars()
super().build(input_shape)
def get_config(self):
config = {
'in_filters': self._in_filters,
'out_filters': self._out_filters,
'se_ratio': self._se_ratio,
'divisible_by': self._divisible_by,
'use_3d_input': self._use_3d_input,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'gating_activation': self._gating_activation,
'round_down_protect': self._round_down_protect,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, training=None):
x = tf.reduce_mean(inputs, self._spatial_axis, keepdims=True)
x = self._reduce_mean_quantizer(
x, training, self._reduce_mean_quantizer_vars)
x = self._activation_layer(self._se_reduce(x))
x = self._apply_gating_activation_layer(self._se_expand(x), training)
x = self._multiply([x, inputs])
return x
@tf.keras.utils.register_keras_serializable(package='Vision')
class SegmentationHeadQuantized(tf.keras.layers.Layer):
"""Creates a segmentation head."""
def __init__(
self,
num_classes: int,
level: Union[int, str],
num_convs: int = 2,
num_filters: int = 256,
use_depthwise_convolution: bool = False,
prediction_kernel_size: int = 1,
upsample_factor: int = 1,
feature_fusion: Optional[str] = None,
decoder_min_level: Optional[int] = None,
decoder_max_level: Optional[int] = None,
low_level: int = 2,
low_level_num_filters: int = 48,
num_decoder_filters: int = 256,
activation: str = 'relu',
logit_activation: Optional[str] = None,
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initializes a segmentation head.
Args:
num_classes: An `int` number of mask classification categories. The number
of classes does not include background class.
level: An `int` or `str`, level to use to build segmentation head.
num_convs: An `int` number of stacked convolution before the last
prediction layer.
num_filters: An `int` number to specify the number of filters used.
Default is 256.
use_depthwise_convolution: A bool to specify if use depthwise separable
convolutions.
prediction_kernel_size: An `int` number to specify the kernel size of the
prediction layer.
upsample_factor: An `int` number to specify the upsampling factor to
generate finer mask. Default 1 means no upsampling is applied.
feature_fusion: One of `deeplabv3plus`, `deeplabv3plus_sum_to_merge`,
`pyramid_fusion`, or None. If `deeplabv3plus`, features from
decoder_features[level] will be fused with low level feature maps from
backbone. If `pyramid_fusion`, multiscale features will be resized and
fused at the target level.
decoder_min_level: An `int` of minimum level from decoder to use in
feature fusion. It is only used when feature_fusion is set to
`panoptic_fpn_fusion`.
decoder_max_level: An `int` of maximum level from decoder to use in
feature fusion. It is only used when feature_fusion is set to
`panoptic_fpn_fusion`.
low_level: An `int` of backbone level to be used for feature fusion. It is
used when feature_fusion is set to `deeplabv3plus`.
low_level_num_filters: An `int` of reduced number of filters for the low
level features before fusing it with higher level features. It is only
used when feature_fusion is set to `deeplabv3plus`.
num_decoder_filters: An `int` of number of filters in the decoder outputs.
It is only used when feature_fusion is set to `panoptic_fpn_fusion`.
activation: A `str` that indicates which activation is used, e.g. 'relu',
'swish', etc.
logit_activation: Unused.
use_sync_bn: A `bool` that indicates whether to use synchronized batch
normalization across different replicas.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
**kwargs: Additional keyword arguments to be passed.
"""
super().__init__(**kwargs)
self._config_dict = {
'num_classes': num_classes,
'level': level,
'num_convs': num_convs,
'num_filters': num_filters,
'use_depthwise_convolution': use_depthwise_convolution,
'prediction_kernel_size': prediction_kernel_size,
'upsample_factor': upsample_factor,
'feature_fusion': feature_fusion,
'decoder_min_level': decoder_min_level,
'decoder_max_level': decoder_max_level,
'low_level': low_level,
'low_level_num_filters': low_level_num_filters,
'num_decoder_filters': num_decoder_filters,
'activation': activation,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
}
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(activation, use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig())
if logit_activation:
raise ValueError('Unused logit_activation option inherited from '
'vision SegmentationHead modeling config.')
def build(self, input_shape: Sequence[tf.TensorShape]):
"""Creates the variables of the segmentation head."""
# When input_shape is a list/tuple, the first corresponds to backbone
# features used for resizing the decoder features (the second) if feature
# fusion type is `deeplabv3plus`.
backbone_shape = input_shape[0]
use_depthwise_convolution = self._config_dict['use_depthwise_convolution']
random_initializer = tf.keras.initializers.RandomNormal(stddev=0.01)
conv_kwargs = {
'kernel_size': 3 if not use_depthwise_convolution else 1,
'padding': 'same',
'use_bias': False,
'kernel_initializer': random_initializer,
'kernel_regularizer': self._config_dict['kernel_regularizer'],
}
norm_layer = (
tf.keras.layers.experimental.SyncBatchNormalization
if self._config_dict['use_sync_bn'] else
tf.keras.layers.BatchNormalization)
norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer)
norm_no_quantize = helper.BatchNormalizationNoQuantized(norm_layer)
norm = helper.norm_by_activation(self._config_dict['activation'],
norm_with_quantize, norm_no_quantize)
bn_kwargs = {
'axis': self._bn_axis,
'momentum': self._config_dict['norm_momentum'],
'epsilon': self._config_dict['norm_epsilon'],
}
if self._config_dict['feature_fusion'] in [
FeatureFusion.DEEPLABV3PLUS, FeatureFusion.DEEPLABV3PLUS_SUM_TO_MERGE
]:
# Deeplabv3+ feature fusion layers.
self._dlv3p_conv = helper.Conv2DQuantized(
kernel_size=1,
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(random_initializer),
kernel_regularizer=self._config_dict['kernel_regularizer'],
name='segmentation_head_deeplabv3p_fusion_conv',
filters=self._config_dict['low_level_num_filters'],
activation=helper.NoOpActivation())
self._dlv3p_norm = norm(
name='segmentation_head_deeplabv3p_fusion_norm', **bn_kwargs)
# Segmentation head layers.
self._convs = []
self._norms = []
for i in range(self._config_dict['num_convs']):
if use_depthwise_convolution:
self._convs.append(
helper.DepthwiseConv2DQuantized(
name='segmentation_head_depthwise_conv_{}'.format(i),
kernel_size=3,
padding='same',
use_bias=False,
depthwise_initializer=tf_utils.clone_initializer(
random_initializer),
depthwise_regularizer=self._config_dict['kernel_regularizer'],
depth_multiplier=1,
activation=helper.NoOpActivation()))
norm_name = 'segmentation_head_depthwise_norm_{}'.format(i)
self._norms.append(norm(name=norm_name, **bn_kwargs))
conv_name = 'segmentation_head_conv_{}'.format(i)
self._convs.append(
helper.Conv2DQuantized(
name=conv_name,
filters=self._config_dict['num_filters'],
activation=helper.NoOpActivation(),
**conv_kwargs))
norm_name = 'segmentation_head_norm_{}'.format(i)
self._norms.append(norm(name=norm_name, **bn_kwargs))
self._classifier = helper.Conv2DOutputQuantized(
name='segmentation_output',
filters=self._config_dict['num_classes'],
kernel_size=self._config_dict['prediction_kernel_size'],
padding='same',
bias_initializer=tf.zeros_initializer(),
kernel_initializer=tf_utils.clone_initializer(random_initializer),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'],
activation=helper.NoOpActivation())
self._upsampling_layer = helper.UpSampling2DQuantized(
size=(self._config_dict['upsample_factor'],
self._config_dict['upsample_factor']),
interpolation='nearest')
self._resizing_layer = helper.ResizingQuantized(
backbone_shape[1], backbone_shape[2], interpolation='bilinear')
self._concat_layer = helper.ConcatenateQuantized(axis=self._bn_axis)
self._add_layer = tfmot.quantization.keras.QuantizeWrapperV2(
tf.keras.layers.Add(), configs.Default8BitQuantizeConfig([], [], True))
super().build(input_shape)
def call(self, inputs: Tuple[Union[tf.Tensor, Mapping[str, tf.Tensor]],
Union[tf.Tensor, Mapping[str, tf.Tensor]]]):
"""Forward pass of the segmentation head.
It supports both a tuple of 2 tensors or 2 dictionaries. The first is
backbone endpoints, and the second is decoder endpoints. When inputs are
tensors, they are from a single level of feature maps. When inputs are
dictionaries, they contain multiple levels of feature maps, where the key
is the index of feature map.
Args:
inputs: A tuple of 2 feature map tensors of shape
[batch, height_l, width_l, channels] or 2 dictionaries of tensors:
- key: A `str` of the level of the multilevel features.
- values: A `tf.Tensor` of the feature map tensors, whose shape is
[batch, height_l, width_l, channels].
Returns:
segmentation prediction mask: A `tf.Tensor` of the segmentation mask
scores predicted from input features.
"""
if self._config_dict['feature_fusion'] in (
FeatureFusion.PYRAMID_FUSION, FeatureFusion.PANOPTIC_FPN_FUSION):
raise ValueError(
'The feature fusion method `pyramid_fusion` is not supported in QAT.')
backbone_output = inputs[0]
decoder_output = inputs[1]
if self._config_dict['feature_fusion'] in {
FeatureFusion.DEEPLABV3PLUS, FeatureFusion.DEEPLABV3PLUS_SUM_TO_MERGE
}:
# deeplabv3+ feature fusion.
x = decoder_output[str(self._config_dict['level'])] if isinstance(
decoder_output, dict) else decoder_output
y = backbone_output[str(self._config_dict['low_level'])] if isinstance(
backbone_output, dict) else backbone_output
y = self._dlv3p_norm(self._dlv3p_conv(y))
y = self._activation_layer(y)
x = self._resizing_layer(x)
x = tf.cast(x, dtype=y.dtype)
if self._config_dict['feature_fusion'] == FeatureFusion.DEEPLABV3PLUS:
x = self._concat_layer([x, y])
else:
x = self._add_layer([x, y])
else:
x = decoder_output[str(self._config_dict['level'])] if isinstance(
decoder_output, dict) else decoder_output
for conv, norm in zip(self._convs, self._norms):
x = conv(x)
x = norm(x)
x = self._activation_layer(x)
if self._config_dict['upsample_factor'] > 1:
# Use keras layer for nearest upsampling so it is QAT compatible.
x = self._upsampling_layer(x)
return self._classifier(x)
def get_config(self):
base_config = super().get_config()
return dict(list(base_config.items()) + list(self._config_dict.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
@tf.keras.utils.register_keras_serializable(package='Vision')
class SpatialPyramidPoolingQuantized(nn_layers.SpatialPyramidPooling):
"""Implements the quantized Atrous Spatial Pyramid Pooling.
References:
[Rethinking Atrous Convolution for Semantic Image Segmentation](
https://arxiv.org/pdf/1706.05587.pdf)
[Encoder-Decoder with Atrous Separable Convolution for Semantic Image
Segmentation](https://arxiv.org/pdf/1802.02611.pdf)
"""
def __init__(
self,
output_channels: int,
dilation_rates: List[int],
pool_kernel_size: Optional[List[int]] = None,
use_sync_bn: bool = False,
batchnorm_momentum: float = 0.99,
batchnorm_epsilon: float = 0.001,
activation: str = 'relu',
dropout: float = 0.5,
kernel_initializer: str = 'GlorotUniform',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
interpolation: str = 'bilinear',
use_depthwise_convolution: bool = False,
**kwargs):
"""Initializes `SpatialPyramidPooling`.
Args:
output_channels: Number of channels produced by SpatialPyramidPooling.
dilation_rates: A list of integers for parallel dilated conv.
pool_kernel_size: A list of integers or None. If None, global average
pooling is applied, otherwise an average pooling of pool_kernel_size is
applied.
use_sync_bn: A bool, whether or not to use sync batch normalization.
batchnorm_momentum: A float for the momentum in BatchNorm. Defaults to
0.99.
batchnorm_epsilon: A float for the epsilon value in BatchNorm. Defaults to
0.001.
activation: A `str` for type of activation to be used. Defaults to 'relu'.
dropout: A float for the dropout rate before output. Defaults to 0.5.
kernel_initializer: Kernel initializer for conv layers. Defaults to
`glorot_uniform`.
kernel_regularizer: Kernel regularizer for conv layers. Defaults to None.
interpolation: The interpolation method for upsampling. Defaults to
`bilinear`.
use_depthwise_convolution: Allows spatial pooling to be separable
depthwise convolusions. [Encoder-Decoder with Atrous Separable
Convolution for Semantic Image Segmentation](
https://arxiv.org/pdf/1802.02611.pdf)
**kwargs: Other keyword arguments for the layer.
"""
super().__init__(
output_channels=output_channels,
dilation_rates=dilation_rates,
use_sync_bn=use_sync_bn,
batchnorm_momentum=batchnorm_momentum,
batchnorm_epsilon=batchnorm_epsilon,
activation=activation,
dropout=dropout,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
interpolation=interpolation,
pool_kernel_size=pool_kernel_size,
use_depthwise_convolution=use_depthwise_convolution)
self._activation_fn = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(activation, use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig())
self._activation_fn_no_quant = (
tf_utils.get_activation(activation, use_keras_layer=True))
def build(self, input_shape):
height = input_shape[1]
width = input_shape[2]
channels = input_shape[3]
norm_layer = (
tf.keras.layers.experimental.SyncBatchNormalization
if self._use_sync_bn else tf.keras.layers.BatchNormalization)
norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer)
norm_no_quantize = helper.BatchNormalizationNoQuantized(norm_layer)
norm = helper.norm_by_activation(self._activation, norm_with_quantize,
norm_no_quantize)
self.aspp_layers = []
conv1 = helper.Conv2DQuantized(
filters=self._output_channels,
kernel_size=(1, 1),
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
use_bias=False,
activation=helper.NoOpActivation())
norm1 = norm(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon)
self.aspp_layers.append([conv1, norm1])
for dilation_rate in self._dilation_rates:
leading_layers = []
kernel_size = (3, 3)
if self._use_depthwise_convolution:
leading_layers += [
helper.DepthwiseConv2DOutputQuantized(
depth_multiplier=1,
kernel_size=kernel_size,
padding='same',
depthwise_regularizer=self._kernel_regularizer,
depthwise_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
dilation_rate=dilation_rate,
use_bias=False,
activation=helper.NoOpActivation())
]
kernel_size = (1, 1)
conv_dilation = leading_layers + [
helper.Conv2DQuantized(
filters=self._output_channels,
kernel_size=kernel_size,
padding='same',
kernel_regularizer=self._kernel_regularizer,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
dilation_rate=dilation_rate,
use_bias=False,
activation=helper.NoOpActivation())
]
norm_dilation = norm(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon)
self.aspp_layers.append(conv_dilation + [norm_dilation])
if self._pool_kernel_size is None:
pooling = [
helper.GlobalAveragePooling2DQuantized(),
helper.ReshapeQuantized((1, 1, channels))
]
else:
pooling = [helper.AveragePooling2DQuantized(self._pool_kernel_size)]
conv2 = helper.Conv2DQuantized(
filters=self._output_channels,
kernel_size=(1, 1),
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
use_bias=False,
activation=helper.NoOpActivation())
norm2 = norm(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon)
self.aspp_layers.append(pooling + [conv2, norm2])
self._resizing_layer = helper.ResizingQuantized(
height, width, interpolation=self._interpolation)
self._projection = [
helper.Conv2DQuantized(
filters=self._output_channels,
kernel_size=(1, 1),
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
use_bias=False,
activation=helper.NoOpActivation()),
norm(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon)
]
self._dropout_layer = tf.keras.layers.Dropout(rate=self._dropout)
self._concat_layer = helper.ConcatenateQuantized(axis=-1)
def call(self,
inputs: tf.Tensor,
training: Optional[bool] = None) -> tf.Tensor:
if training is None:
training = tf.keras.backend.learning_phase()
result = []
for i, layers in enumerate(self.aspp_layers):
x = inputs
for layer in layers:
# Apply layers sequentially.
x = layer(x, training=training)
x = self._activation_fn(x)
# Apply resize layer to the end of the last set of layers.
if i == len(self.aspp_layers) - 1:
x = self._resizing_layer(x)
result.append(tf.cast(x, inputs.dtype))
x = self._concat_layer(result)
for layer in self._projection:
x = layer(x, training=training)
x = self._activation_fn(x)
return self._dropout_layer(x)
@tf.keras.utils.register_keras_serializable(package='Vision')
class ASPPQuantized(aspp.ASPP):
"""Creates a quantized Atrous Spatial Pyramid Pooling (ASPP) layer."""
def __init__(
self,
level: int,
dilation_rates: List[int],
num_filters: int = 256,
pool_kernel_size: Optional[int] = None,
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
activation: str = 'relu',
dropout_rate: float = 0.0,
kernel_initializer: str = 'VarianceScaling',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
interpolation: str = 'bilinear',
use_depthwise_convolution: bool = False,
spp_layer_version: str = 'v1',
output_tensor: bool = True,
**kwargs):
"""Initializes an Atrous Spatial Pyramid Pooling (ASPP) layer.
Args:
level: An `int` level to apply ASPP.
dilation_rates: A `list` of dilation rates.
num_filters: An `int` number of output filters in ASPP.
pool_kernel_size: A `list` of [height, width] of pooling kernel size or
None. Pooling size is with respect to original image size, it will be
scaled down by 2**level. If None, global average pooling is used.
use_sync_bn: A `bool`. If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
activation: A `str` activation to be used in ASPP.
dropout_rate: A `float` rate for dropout regularization.
kernel_initializer: A `str` name of kernel_initializer for convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
interpolation: A `str` of interpolation method. It should be one of
`bilinear`, `nearest`, `bicubic`, `area`, `lanczos3`, `lanczos5`,
`gaussian`, or `mitchellcubic`.
use_depthwise_convolution: If True depthwise separable convolutions will
be added to the Atrous spatial pyramid pooling.
spp_layer_version: A `str` of spatial pyramid pooling layer version.
output_tensor: Whether to output a single tensor or a dictionary of
tensor. Default is true.
**kwargs: Additional keyword arguments to be passed.
"""
super().__init__(
level=level,
dilation_rates=dilation_rates,
num_filters=num_filters,
pool_kernel_size=pool_kernel_size,
use_sync_bn=use_sync_bn,
norm_momentum=norm_momentum,
norm_epsilon=norm_epsilon,
activation=activation,
dropout_rate=dropout_rate,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
interpolation=interpolation,
use_depthwise_convolution=use_depthwise_convolution,
spp_layer_version=spp_layer_version,
output_tensor=output_tensor,
**kwargs)
self._aspp_layer = SpatialPyramidPoolingQuantized
def call(self, inputs: Union[tf.Tensor, Mapping[str,
tf.Tensor]]) -> tf.Tensor:
"""Calls the Atrous Spatial Pyramid Pooling (ASPP) layer on an input.
The output of ASPP will be a dict of {`level`, `tf.Tensor`} even if only one
level is present, if output_tensor is false. Hence, this will be compatible
with the rest of the segmentation model interfaces.
If output_tensor is true, a single tensot is output.
Args:
inputs: A `tf.Tensor` of shape [batch, height_l, width_l, filter_size] or
a `dict` of `tf.Tensor` where
- key: A `str` of the level of the multilevel feature maps.
- values: A `tf.Tensor` of shape [batch, height_l, width_l,
filter_size].
Returns:
A `tf.Tensor` of shape [batch, height_l, width_l, filter_size] or a `dict`
of `tf.Tensor` where
- key: A `str` of the level of the multilevel feature maps.
- values: A `tf.Tensor` of output of ASPP module.
"""
level = str(self._config_dict['level'])
backbone_output = inputs[level] if isinstance(inputs, dict) else inputs
return self.aspp(backbone_output)
class BatchNormalizationWrapper(tf.keras.layers.Wrapper):
"""A BatchNormalizationWrapper that explicitly not folded.
It just added an identity depthwise conv right before the normalization.
As a result, given normalization op just folded into the identity depthwise
conv layer.
Note that it only used when the batch normalization folding is not working.
It makes quantize them as a 1x1 depthwise conv layer that just work as same
as inference mode for the normalization. (Basically mult and add for the BN.)
"""
def call(self, inputs: tf.Tensor, *args: Any, **kwargs: Any) -> tf.Tensor:
channels = tf.shape(inputs)[-1]
x = tf.nn.depthwise_conv2d(
inputs, tf.ones([1, 1, channels, 1]), [1, 1, 1, 1], 'VALID')
outputs = self.layer.call(x, *args, **kwargs)
return outputs
class MaskScoringQuantized(heads.MaskScoring):
"""Creates a quantized mask scoring layer.
This implements mask scoring layer from the paper:
Zhaojin Huang, Lichao Huang, Yongchao Gong, Chang Huang, Xinggang Wang.
Mask Scoring R-CNN.
(https://arxiv.org/pdf/1903.00241.pdf)
"""
def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]):
"""Creates the variables of the mask scoring head."""
self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(
self._config_dict['activation'], use_keras_layer=True
),
configs.Default8BitActivationQuantizeConfig(),
)
conv_kwargs = {
'filters': self._config_dict['num_filters'],
'kernel_size': 3,
'padding': 'same',
}
conv_kwargs.update({
'kernel_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'
),
'bias_initializer': tf.zeros_initializer(),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
})
norm_layer = (
tf.keras.layers.experimental.SyncBatchNormalization
if self._config_dict['use_sync_bn']
else tf.keras.layers.BatchNormalization
)
norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer)
norm_no_quantize = helper.BatchNormalizationNoQuantized(norm_layer)
bn_op = helper.norm_by_activation(
self._config_dict['activation'], norm_with_quantize, norm_no_quantize
)
bn_kwargs = {
'axis': self._bn_axis,
'momentum': self._config_dict['norm_momentum'],
'epsilon': self._config_dict['norm_epsilon'],
}
self._convs = []
self._conv_norms = []
for i in range(self._config_dict['num_convs']):
if self._config_dict['use_depthwise_convolution']:
self._convs.append(
helper.DepthwiseConv2DQuantized(
name='mask-scoring-depthwise-conv-{}'.format(i),
kernel_size=3,
padding='same',
use_bias=False,
depthwise_initializer=tf.keras.initializers.RandomNormal(
stddev=0.01),
depthwise_regularizer=self._config_dict['kernel_regularizer'],
depth_multiplier=1,
activation=helper.NoOpActivation()))
norm_name = 'mask-scoring-depthwise-bn-{}'.format(i)
self._conv_norms.append(bn_op(name=norm_name, **bn_kwargs))
conv_name = 'mask-scoring_{}'.format(i)
if 'kernel_initializer' in conv_kwargs:
conv_kwargs['kernel_initializer'] = tf_utils.clone_initializer(
conv_kwargs['kernel_initializer']
)
if self._config_dict['use_depthwise_convolution']:
conv_kwargs['kernel_size'] = 1
self._convs.append(
helper.Conv2DQuantized(
name=conv_name, activation=helper.NoOpActivation(), **conv_kwargs
)
)
bn_name = 'mask-scoring-bn_{}'.format(i)
self._conv_norms.append(bn_op(name=bn_name, **bn_kwargs))
self._fcs = []
self._fc_norms = []
for i in range(self._config_dict['num_fcs']):
fc_name = 'mask-scoring-fc_{}'.format(i)
self._fcs.append(
helper.DenseQuantized(
units=self._config_dict['fc_dims'],
kernel_initializer=tf.keras.initializers.VarianceScaling(
scale=1 / 3.0, mode='fan_out', distribution='uniform'
),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'],
name=fc_name,
activation=helper.NoOpActivation(),
)
)
bn_name = 'mask-scoring-fc-bn_{}'.format(i)
self._fc_norms.append(bn_op(name=bn_name, **bn_kwargs))
self._classifier = helper.DenseOutputQuantized(
units=self._config_dict['num_classes'],
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'],
name='iou-scores',
)
self._resizing_layer = helper.ResizingQuantized(
self._config_dict['fc_input_size'][0],
self._config_dict['fc_input_size'][1],
interpolation='bilinear',
)
self._identity_layer = helper.IdentityQuantized(trainable=False)
super().build(input_shape)
def call(self, inputs: tf.Tensor, training: bool = None):
"""Forward pass mask scoring head.
Args:
inputs: A `tf.Tensor` of the shape [batch_size, width, size, num_classes],
representing the segmentation logits.
training: a `bool` indicating whether it is in `training` mode.
Returns:
mask_scores: A `tf.Tensor` of predicted mask scores
[batch_size, num_classes].
"""
x = tf.stop_gradient(inputs)
for conv, bn in zip(self._convs, self._conv_norms):
x = conv(x)
x = bn(x)
x = self._activation_layer(x)
x = self._resizing_layer(x)
_, h, w, filters = x.get_shape().as_list()
x = tf.reshape(x, [-1, h * w * filters])
for fc, bn in zip(self._fcs, self._fc_norms):
x = fc(x)
x = bn(x)
x = self._activation_layer(x)
ious = self._classifier(x)
ious = self._identity_layer(ious)
return ious
| 38,727 | 39.55288 | 87 | py |
models | models-master/official/projects/qat/vision/modeling/layers/nn_blocks_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for nn_blocks."""
from typing import Any, Iterable, Tuple
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.projects.qat.vision.modeling.layers import nn_blocks
def distribution_strategy_combinations() -> Iterable[Tuple[Any, ...]]:
"""Returns the combinations of end-to-end tests to run."""
return combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
)
class NNBlocksTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(nn_blocks.BottleneckBlockQuantized, 1, False, 0.0, None),
(nn_blocks.BottleneckBlockQuantized, 2, True, 0.2, 0.25),
)
def test_bottleneck_block_creation(self, block_fn, strides, use_projection,
stochastic_depth_drop_rate, se_ratio):
input_size = 128
filter_size = 256
inputs = tf.keras.Input(
shape=(input_size, input_size, filter_size * 4), batch_size=1)
block = block_fn(
filter_size,
strides,
use_projection=use_projection,
se_ratio=se_ratio,
stochastic_depth_drop_rate=stochastic_depth_drop_rate)
features = block(inputs)
self.assertAllEqual(
[1, input_size // strides, input_size // strides, filter_size * 4],
features.shape.as_list())
@parameterized.parameters(
(nn_blocks.InvertedBottleneckBlockQuantized, 1, 1, None, None),
(nn_blocks.InvertedBottleneckBlockQuantized, 6, 1, None, None),
(nn_blocks.InvertedBottleneckBlockQuantized, 1, 2, None, None),
(nn_blocks.InvertedBottleneckBlockQuantized, 1, 1, 0.2, None),
(nn_blocks.InvertedBottleneckBlockQuantized, 1, 1, None, 0.2),
)
def test_invertedbottleneck_block_creation(
self, block_fn, expand_ratio, strides, se_ratio,
stochastic_depth_drop_rate):
input_size = 128
in_filters = 24
out_filters = 40
inputs = tf.keras.Input(
shape=(input_size, input_size, in_filters), batch_size=1)
block = block_fn(
in_filters=in_filters,
out_filters=out_filters,
expand_ratio=expand_ratio,
strides=strides,
se_ratio=se_ratio,
stochastic_depth_drop_rate=stochastic_depth_drop_rate,
output_intermediate_endpoints=False)
features = block(inputs)
self.assertAllEqual(
[1, input_size // strides, input_size // strides, out_filters],
features.shape.as_list())
if __name__ == '__main__':
tf.test.main()
| 3,345 | 33.854167 | 77 | py |
models | models-master/official/projects/qat/vision/modeling/layers/nn_layers_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for nn_layers."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.projects.qat.vision.modeling.layers import nn_layers
class NNLayersTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('deeplabv3plus', 1, 128, 128),
('deeplabv3plus', 2, 128, 128),
('deeplabv3', 1, 128, 64),
('deeplabv3', 2, 128, 64),
('deeplabv3plus_sum_to_merge', 1, 64, 128),
('deeplabv3plus_sum_to_merge', 2, 64, 128),
)
def test_segmentation_head_creation(self, feature_fusion, upsample_factor,
low_level_num_filters, expected_shape):
input_size = 128
decoder_outupt_size = input_size // 2
decoder_output = tf.random.uniform(
(2, decoder_outupt_size, decoder_outupt_size, 64), dtype=tf.float32)
backbone_output = tf.random.uniform((2, input_size, input_size, 32),
dtype=tf.float32)
segmentation_head = nn_layers.SegmentationHeadQuantized(
num_classes=5,
level=4,
upsample_factor=upsample_factor,
low_level=2,
low_level_num_filters=low_level_num_filters,
feature_fusion=feature_fusion)
features = segmentation_head((backbone_output, decoder_output))
self.assertAllEqual([
2, expected_shape * upsample_factor, expected_shape * upsample_factor, 5
], features.shape.as_list())
@parameterized.parameters(
(None, []),
(None, [6, 12, 18]),
([32, 32], [6, 12, 18]),
)
def test_spatial_pyramid_pooling_creation(self, pool_kernel_size,
dilation_rates):
inputs = tf.keras.Input(shape=(64, 64, 128), dtype=tf.float32)
layer = nn_layers.SpatialPyramidPoolingQuantized(
output_channels=256,
dilation_rates=dilation_rates,
pool_kernel_size=pool_kernel_size)
output = layer(inputs)
self.assertAllEqual([None, 64, 64, 256], output.shape)
@parameterized.parameters(
(3, [6, 12, 18, 24], 128),
(3, [6, 12, 18], 128),
(3, [6, 12], 256),
(4, [], 128),
(4, [6, 12, 18], 128),
(4, [], 256),
)
def test_aspp_creation(self, level, dilation_rates, num_filters):
input_size = 128 // 2**level
tf.keras.backend.set_image_data_format('channels_last')
endpoints = tf.random.uniform(
shape=(2, input_size, input_size, 64), dtype=tf.float32)
network = nn_layers.ASPPQuantized(
level=level, dilation_rates=dilation_rates, num_filters=num_filters)
feats = network(endpoints)
self.assertAllEqual([2, input_size, input_size, num_filters],
feats.shape.as_list())
@parameterized.parameters(False, True)
def test_bnorm_wrapper_creation(self, use_sync_bn):
inputs = tf.keras.Input(shape=(64, 64, 128), dtype=tf.float32)
if use_sync_bn:
norm = tf.keras.layers.experimental.SyncBatchNormalization(axis=-1)
else:
norm = tf.keras.layers.BatchNormalization(axis=-1)
layer = nn_layers.BatchNormalizationWrapper(norm)
output = layer(inputs)
self.assertAllEqual([None, 64, 64, 128], output.shape)
@parameterized.parameters(
(1, 1, 64, [4, 4]),
(2, 1, 64, [4, 4]),
(3, 1, 64, [4, 4]),
(1, 2, 32, [8, 8]),
(2, 2, 32, [8, 8]),
(3, 2, 32, [8, 8]),
)
def test_mask_scoring_creation(
self, num_convs, num_fcs, num_filters, fc_input_size
):
inputs = tf.keras.Input(shape=(64, 64, 16), dtype=tf.float32)
head = nn_layers.MaskScoringQuantized(
num_classes=2,
num_convs=num_convs,
num_filters=num_filters,
fc_dims=128,
num_fcs=num_fcs,
fc_input_size=fc_input_size,
use_depthwise_convolution=True,
)
scores = head(inputs)
self.assertAllEqual(scores.shape.as_list(), [None, 2])
if __name__ == '__main__':
tf.test.main()
| 4,538 | 32.873134 | 80 | py |
models | models-master/official/projects/qat/vision/modeling/layers/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers package definition."""
from official.projects.qat.vision.modeling.layers import nn_layers
from official.projects.qat.vision.modeling.layers.nn_blocks import BottleneckBlockQuantized
from official.projects.qat.vision.modeling.layers.nn_blocks import Conv2DBNBlockQuantized
from official.projects.qat.vision.modeling.layers.nn_blocks import InvertedBottleneckBlockQuantized
| 991 | 48.6 | 99 | py |
models | models-master/official/projects/qat/vision/modeling/layers/nn_blocks.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains quantized neural blocks for the QAT."""
from typing import Any, Dict, Optional, Sequence, Tuple, Union
# Import libraries
from absl import logging
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.modeling import tf_utils
from official.projects.qat.vision.modeling.layers import nn_layers as qat_nn_layers
from official.projects.qat.vision.quantization import configs
from official.projects.qat.vision.quantization import helper
from official.vision.modeling.layers import nn_layers
# This class is copied from modeling.layers.nn_blocks.BottleneckBlock and apply
# QAT.
@tf.keras.utils.register_keras_serializable(package='Vision')
class BottleneckBlockQuantized(tf.keras.layers.Layer):
"""A quantized standard bottleneck block."""
def __init__(self,
filters: int,
strides: int,
dilation_rate: int = 1,
use_projection: bool = False,
se_ratio: Optional[float] = None,
resnetd_shortcut: bool = False,
stochastic_depth_drop_rate: Optional[float] = None,
kernel_initializer: str = 'VarianceScaling',
kernel_regularizer: tf.keras.regularizers.Regularizer = None,
bias_regularizer: tf.keras.regularizers.Regularizer = None,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
bn_trainable: bool = True, # pytype: disable=annotation-type-mismatch # typed-keras
**kwargs):
"""Initializes a standard bottleneck block with BN after convolutions.
Args:
filters: An `int` number of filters for the first two convolutions. Note
that the third and final convolution will use 4 times as many filters.
strides: An `int` block stride. If greater than 1, this block will
ultimately downsample the input.
dilation_rate: An `int` dilation_rate of convolutions. Default to 1.
use_projection: A `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
se_ratio: A `float` or None. Ratio of the Squeeze-and-Excitation layer.
resnetd_shortcut: A `bool`. If True, apply the resnetd style modification
to the shortcut connection.
stochastic_depth_drop_rate: A `float` or None. If not None, drop rate for
the stochastic depth layer.
kernel_initializer: A `str` of kernel_initializer for convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d.
Default to None.
activation: A `str` name of the activation function.
use_sync_bn: A `bool`. If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
bn_trainable: A `bool` that indicates whether batch norm layers should be
trainable. Default to True.
**kwargs: Additional keyword arguments to be passed.
"""
super(BottleneckBlockQuantized, self).__init__(**kwargs)
self._filters = filters
self._strides = strides
self._dilation_rate = dilation_rate
self._use_projection = use_projection
self._se_ratio = se_ratio
self._resnetd_shortcut = resnetd_shortcut
self._use_sync_bn = use_sync_bn
self._activation = activation
self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
self._kernel_initializer = kernel_initializer
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
norm_layer = (
tf.keras.layers.experimental.SyncBatchNormalization
if use_sync_bn else tf.keras.layers.BatchNormalization)
self._norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer)
self._norm = helper.BatchNormalizationNoQuantized(norm_layer)
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._bn_trainable = bn_trainable
def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]):
"""Build variables and child layers to prepare for calling."""
if self._use_projection:
if self._resnetd_shortcut:
self._shortcut0 = tf.keras.layers.AveragePooling2D(
pool_size=2, strides=self._strides, padding='same')
self._shortcut1 = helper.Conv2DQuantized(
filters=self._filters * 4,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=helper.NoOpActivation())
else:
self._shortcut = helper.Conv2DQuantized(
filters=self._filters * 4,
kernel_size=1,
strides=self._strides,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=helper.NoOpActivation())
self._norm0 = self._norm_with_quantize(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
trainable=self._bn_trainable)
self._conv1 = helper.Conv2DQuantized(
filters=self._filters,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=helper.NoOpActivation())
self._norm1 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
trainable=self._bn_trainable)
self._activation1 = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(self._activation, use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig())
self._conv2 = helper.Conv2DQuantized(
filters=self._filters,
kernel_size=3,
strides=self._strides,
dilation_rate=self._dilation_rate,
padding='same',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=helper.NoOpActivation())
self._norm2 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
trainable=self._bn_trainable)
self._activation2 = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(self._activation, use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig())
self._conv3 = helper.Conv2DQuantized(
filters=self._filters * 4,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=helper.NoOpActivation())
self._norm3 = self._norm_with_quantize(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
trainable=self._bn_trainable)
self._activation3 = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(self._activation, use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig())
if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1:
self._squeeze_excitation = qat_nn_layers.SqueezeExcitationQuantized(
in_filters=self._filters * 4,
out_filters=self._filters * 4,
se_ratio=self._se_ratio,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
else:
self._squeeze_excitation = None
if self._stochastic_depth_drop_rate:
self._stochastic_depth = nn_layers.StochasticDepth(
self._stochastic_depth_drop_rate)
else:
self._stochastic_depth = None
self._add = tfmot.quantization.keras.QuantizeWrapperV2(
tf.keras.layers.Add(),
configs.Default8BitQuantizeConfig([], [], True))
super(BottleneckBlockQuantized, self).build(input_shape)
def get_config(self) -> Dict[str, Any]:
"""Get a config of this layer."""
config = {
'filters': self._filters,
'strides': self._strides,
'dilation_rate': self._dilation_rate,
'use_projection': self._use_projection,
'se_ratio': self._se_ratio,
'resnetd_shortcut': self._resnetd_shortcut,
'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'bn_trainable': self._bn_trainable
}
base_config = super(BottleneckBlockQuantized, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(
self,
inputs: tf.Tensor,
training: Optional[Union[bool, tf.Tensor]] = None) -> tf.Tensor:
"""Run the BottleneckBlockQuantized logics."""
shortcut = inputs
if self._use_projection:
if self._resnetd_shortcut:
shortcut = self._shortcut0(shortcut)
shortcut = self._shortcut1(shortcut)
else:
shortcut = self._shortcut(shortcut)
shortcut = self._norm0(shortcut)
x = self._conv1(inputs)
x = self._norm1(x)
x = self._activation1(x)
x = self._conv2(x)
x = self._norm2(x)
x = self._activation2(x)
x = self._conv3(x)
x = self._norm3(x)
if self._squeeze_excitation:
x = self._squeeze_excitation(x)
if self._stochastic_depth:
x = self._stochastic_depth(x, training=training)
x = self._add([x, shortcut])
return self._activation3(x)
# This class is copied from modeling.backbones.mobilenet.Conv2DBNBlock and apply
# QAT.
@tf.keras.utils.register_keras_serializable(package='Vision')
class Conv2DBNBlockQuantized(tf.keras.layers.Layer):
"""A quantized convolution block with batch normalization."""
def __init__(
self,
filters: int,
kernel_size: int = 3,
strides: int = 1,
use_bias: bool = False,
use_explicit_padding: bool = False,
activation: str = 'relu6',
kernel_initializer: str = 'VarianceScaling',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
use_normalization: bool = True,
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
**kwargs):
"""A convolution block with batch normalization.
Args:
filters: An `int` number of filters for the first two convolutions. Note
that the third and final convolution will use 4 times as many filters.
kernel_size: An `int` specifying the height and width of the 2D
convolution window.
strides: An `int` of block stride. If greater than 1, this block will
ultimately downsample the input.
use_bias: If True, use bias in the convolution layer.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
activation: A `str` name of the activation function.
kernel_initializer: A `str` for kernel initializer of convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
Default to None.
use_normalization: If True, use batch normalization.
use_sync_bn: If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
**kwargs: Additional keyword arguments to be passed.
"""
super(Conv2DBNBlockQuantized, self).__init__(**kwargs)
self._filters = filters
self._kernel_size = kernel_size
self._strides = strides
self._activation = activation
self._use_bias = use_bias
self._use_explicit_padding = use_explicit_padding
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._use_normalization = use_normalization
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
if use_explicit_padding and kernel_size > 1:
self._padding = 'valid'
else:
self._padding = 'same'
norm_layer = (
tf.keras.layers.experimental.SyncBatchNormalization
if use_sync_bn else tf.keras.layers.BatchNormalization)
self._norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer)
self._norm = helper.BatchNormalizationNoQuantized(norm_layer)
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
def get_config(self) -> Dict[str, Any]:
"""Get a config of this layer."""
config = {
'filters': self._filters,
'strides': self._strides,
'kernel_size': self._kernel_size,
'use_bias': self._use_bias,
'use_explicit_padding': self._use_explicit_padding,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'use_normalization': self._use_normalization,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon
}
base_config = super(Conv2DBNBlockQuantized, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]):
"""Build variables and child layers to prepare for calling."""
if self._use_explicit_padding and self._kernel_size > 1:
padding_size = nn_layers.get_padding_for_kernel_size(self._kernel_size)
self._pad = tf.keras.layers.ZeroPadding2D(padding_size)
conv2d_quantized = (
helper.Conv2DQuantized
if self._use_normalization else helper.Conv2DOutputQuantized)
self._conv0 = conv2d_quantized(
filters=self._filters,
kernel_size=self._kernel_size,
strides=self._strides,
padding=self._padding,
use_bias=self._use_bias,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=helper.NoOpActivation())
if self._use_normalization:
self._norm0 = helper.norm_by_activation(self._activation,
self._norm_with_quantize,
self._norm)(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(self._activation, use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig())
super(Conv2DBNBlockQuantized, self).build(input_shape)
def call(
self,
inputs: tf.Tensor,
training: Optional[Union[bool, tf.Tensor]] = None) -> tf.Tensor:
"""Run the Conv2DBNBlockQuantized logics."""
if self._use_explicit_padding and self._kernel_size > 1:
inputs = self._pad(inputs)
x = self._conv0(inputs)
if self._use_normalization:
x = self._norm0(x)
return self._activation_layer(x)
@tf.keras.utils.register_keras_serializable(package='Vision')
class InvertedBottleneckBlockQuantized(tf.keras.layers.Layer):
"""A quantized inverted bottleneck block."""
def __init__(self,
in_filters,
out_filters,
expand_ratio,
strides,
kernel_size=3,
se_ratio=None,
stochastic_depth_drop_rate=None,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
se_inner_activation='relu',
se_gating_activation='sigmoid',
se_round_down_protect=True,
expand_se_in_filters=False,
depthwise_activation=None,
use_sync_bn=False,
dilation_rate=1,
divisible_by=1,
regularize_depthwise=False,
use_depthwise=True,
use_residual=True,
norm_momentum=0.99,
norm_epsilon=0.001,
output_intermediate_endpoints=False,
**kwargs):
"""Initializes an inverted bottleneck block with BN after convolutions.
Args:
in_filters: An `int` number of filters of the input tensor.
out_filters: An `int` number of filters of the output tensor.
expand_ratio: An `int` of expand_ratio for an inverted bottleneck block.
strides: An `int` block stride. If greater than 1, this block will
ultimately downsample the input.
kernel_size: An `int` kernel_size of the depthwise conv layer.
se_ratio: A `float` or None. If not None, se ratio for the squeeze and
excitation layer.
stochastic_depth_drop_rate: A `float` or None. if not None, drop rate for
the stochastic depth layer.
kernel_initializer: A `str` of kernel_initializer for convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d.
Default to None.
activation: A `str` name of the activation function.
se_inner_activation: A `str` name of squeeze-excitation inner activation.
se_gating_activation: A `str` name of squeeze-excitation gating
activation.
se_round_down_protect: A `bool` of whether round down more than 10% will
be allowed in SE layer.
expand_se_in_filters: A `bool` of whether or not to expand in_filter in
squeeze and excitation layer.
depthwise_activation: A `str` name of the activation function for
depthwise only.
use_sync_bn: A `bool`. If True, use synchronized batch normalization.
dilation_rate: An `int` that specifies the dilation rate to use for.
divisible_by: An `int` that ensures all inner dimensions are divisible by
this number.
dilated convolution: An `int` to specify the same value for all spatial
dimensions.
regularize_depthwise: A `bool` of whether or not apply regularization on
depthwise.
use_depthwise: A `bool` of whether to uses fused convolutions instead of
depthwise.
use_residual: A `bool` of whether to include residual connection between
input and output.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
output_intermediate_endpoints: A `bool` of whether or not output the
intermediate endpoints.
**kwargs: Additional keyword arguments to be passed.
"""
super(InvertedBottleneckBlockQuantized, self).__init__(**kwargs)
self._in_filters = in_filters
self._out_filters = out_filters
self._expand_ratio = expand_ratio
self._strides = strides
self._kernel_size = kernel_size
self._se_ratio = se_ratio
self._divisible_by = divisible_by
self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
self._dilation_rate = dilation_rate
self._use_sync_bn = use_sync_bn
self._regularize_depthwise = regularize_depthwise
self._use_depthwise = use_depthwise
self._use_residual = use_residual
self._activation = activation
self._se_inner_activation = se_inner_activation
self._se_gating_activation = se_gating_activation
self._se_round_down_protect = se_round_down_protect
self._depthwise_activation = depthwise_activation
self._kernel_initializer = kernel_initializer
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._expand_se_in_filters = expand_se_in_filters
self._output_intermediate_endpoints = output_intermediate_endpoints
norm_layer = (
tf.keras.layers.experimental.SyncBatchNormalization
if use_sync_bn else tf.keras.layers.BatchNormalization)
self._norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer)
self._norm = helper.BatchNormalizationNoQuantized(norm_layer)
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
if not depthwise_activation:
self._depthwise_activation = activation
if regularize_depthwise:
self._depthsize_regularizer = kernel_regularizer
else:
self._depthsize_regularizer = None
def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]):
"""Build variables and child layers to prepare for calling."""
expand_filters = self._in_filters
if self._expand_ratio > 1:
# First 1x1 conv for channel expansion.
expand_filters = nn_layers.make_divisible(
self._in_filters * self._expand_ratio, self._divisible_by)
expand_kernel = 1 if self._use_depthwise else self._kernel_size
expand_stride = 1 if self._use_depthwise else self._strides
self._conv0 = helper.Conv2DQuantized(
filters=expand_filters,
kernel_size=expand_kernel,
strides=expand_stride,
padding='same',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=helper.NoOpActivation())
self._norm0 = helper.norm_by_activation(self._activation,
self._norm_with_quantize,
self._norm)(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(self._activation, use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig())
if self._use_depthwise:
# Depthwise conv.
self._conv1 = helper.DepthwiseConv2DQuantized(
kernel_size=(self._kernel_size, self._kernel_size),
strides=self._strides,
padding='same',
depth_multiplier=1,
dilation_rate=self._dilation_rate,
use_bias=False,
depthwise_initializer=self._kernel_initializer,
depthwise_regularizer=self._depthsize_regularizer,
bias_regularizer=self._bias_regularizer,
activation=helper.NoOpActivation())
self._norm1 = helper.norm_by_activation(self._depthwise_activation,
self._norm_with_quantize,
self._norm)(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._depthwise_activation_layer = (
tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(self._depthwise_activation,
use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig()))
# Squeeze and excitation.
if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1:
logging.info('Use Squeeze and excitation.')
in_filters = self._in_filters
if self._expand_se_in_filters:
in_filters = expand_filters
self._squeeze_excitation = qat_nn_layers.SqueezeExcitationQuantized(
in_filters=in_filters,
out_filters=expand_filters,
se_ratio=self._se_ratio,
divisible_by=self._divisible_by,
round_down_protect=self._se_round_down_protect,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._se_inner_activation,
gating_activation=self._se_gating_activation)
else:
self._squeeze_excitation = None
# Last 1x1 conv.
self._conv2 = helper.Conv2DQuantized(
filters=self._out_filters,
kernel_size=1,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=helper.NoOpActivation())
self._norm2 = self._norm_with_quantize(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
if self._stochastic_depth_drop_rate:
self._stochastic_depth = nn_layers.StochasticDepth(
self._stochastic_depth_drop_rate)
else:
self._stochastic_depth = None
self._add = tfmot.quantization.keras.QuantizeWrapperV2(
tf.keras.layers.Add(),
configs.Default8BitQuantizeConfig([], [], True))
super(InvertedBottleneckBlockQuantized, self).build(input_shape)
def get_config(self) -> Dict[str, Any]:
"""Get a config of this layer."""
config = {
'in_filters': self._in_filters,
'out_filters': self._out_filters,
'expand_ratio': self._expand_ratio,
'strides': self._strides,
'kernel_size': self._kernel_size,
'se_ratio': self._se_ratio,
'divisible_by': self._divisible_by,
'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'se_inner_activation': self._se_inner_activation,
'se_gating_activation': self._se_gating_activation,
'se_round_down_protect': self._se_round_down_protect,
'expand_se_in_filters': self._expand_se_in_filters,
'depthwise_activation': self._depthwise_activation,
'dilation_rate': self._dilation_rate,
'use_sync_bn': self._use_sync_bn,
'regularize_depthwise': self._regularize_depthwise,
'use_depthwise': self._use_depthwise,
'use_residual': self._use_residual,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'output_intermediate_endpoints': self._output_intermediate_endpoints
}
base_config = super(InvertedBottleneckBlockQuantized, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(
self,
inputs: tf.Tensor,
training: Optional[Union[bool, tf.Tensor]] = None
) -> Union[tf.Tensor, Tuple[tf.Tensor, Dict[str, tf.Tensor]]]:
"""Run the InvertedBottleneckBlockQuantized logics."""
endpoints = {}
shortcut = inputs
if self._expand_ratio > 1:
x = self._conv0(inputs)
x = self._norm0(x)
x = self._activation_layer(x)
else:
x = inputs
if self._use_depthwise:
x = self._conv1(x)
x = self._norm1(x)
x = self._depthwise_activation_layer(x)
if self._output_intermediate_endpoints:
endpoints['depthwise'] = x
if self._squeeze_excitation:
x = self._squeeze_excitation(x)
x = self._conv2(x)
x = self._norm2(x)
if (self._use_residual and self._in_filters == self._out_filters and
self._strides == 1):
if self._stochastic_depth:
x = self._stochastic_depth(x, training=training)
x = self._add([x, shortcut])
if self._output_intermediate_endpoints:
return x, endpoints
return x
| 30,118 | 40.948468 | 100 | py |
models | models-master/official/projects/qat/vision/modeling/heads/dense_prediction_heads_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for dense_prediction_heads.py."""
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.qat.vision.modeling.heads import dense_prediction_heads
def get_attribute_heads(att_head_type):
if att_head_type == 'regression_head':
return [
dict(name='depth', type='regression', size=1, prediction_tower_name='')
]
elif att_head_type == 'shared_prediction_tower_attribute_heads':
return [
dict(
name='attr_1', type='regression', size=1, prediction_tower_name=''),
dict(
name='attr_2',
type='classification',
size=1,
prediction_tower_name='tower_1'),
dict(
name='attr_3',
type='regression',
size=1,
prediction_tower_name='tower_1')
]
else:
raise ValueError('Undefined attribute type.')
class RetinaNetHeadQuantizedTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(False, False, False, None),
(False, True, False, None),
(True, False, True, 'regression_head'),
(True, True, True, 'regression_head'),
(True, True, True, 'shared_prediction_tower_attribute_heads'),
)
def test_forward(self, use_separable_conv, use_sync_bn, has_att_heads,
att_head_type):
if has_att_heads:
attribute_heads = get_attribute_heads(att_head_type)
else:
attribute_heads = None
retinanet_head = dense_prediction_heads.RetinaNetHeadQuantized(
min_level=3,
max_level=4,
num_classes=3,
num_anchors_per_location=3,
num_convs=2,
num_filters=256,
attribute_heads=attribute_heads,
use_separable_conv=use_separable_conv,
activation='relu',
use_sync_bn=use_sync_bn,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_regularizer=None,
bias_regularizer=None,
)
features = {
'3': np.random.rand(2, 128, 128, 16),
'4': np.random.rand(2, 64, 64, 16),
}
scores, boxes, attributes = retinanet_head(features)
self.assertAllEqual(scores['3'].numpy().shape, [2, 128, 128, 9])
self.assertAllEqual(scores['4'].numpy().shape, [2, 64, 64, 9])
self.assertAllEqual(boxes['3'].numpy().shape, [2, 128, 128, 12])
self.assertAllEqual(boxes['4'].numpy().shape, [2, 64, 64, 12])
if has_att_heads:
for att in attributes.values():
self.assertAllEqual(att['3'].numpy().shape, [2, 128, 128, 3])
self.assertAllEqual(att['4'].numpy().shape, [2, 64, 64, 3])
def test_serialize_deserialize(self):
retinanet_head = dense_prediction_heads.RetinaNetHeadQuantized(
min_level=3,
max_level=7,
num_classes=3,
num_anchors_per_location=9,
num_convs=2,
num_filters=16,
attribute_heads=None,
use_separable_conv=False,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_regularizer=None,
bias_regularizer=None,
)
config = retinanet_head.get_config()
new_retinanet_head = (
dense_prediction_heads.RetinaNetHead.from_config(config))
self.assertAllEqual(
retinanet_head.get_config(), new_retinanet_head.get_config())
| 3,980 | 32.737288 | 80 | py |
models | models-master/official/projects/qat/vision/modeling/heads/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Heads package definition."""
from official.projects.qat.vision.modeling.heads.dense_prediction_heads import RetinaNetHeadQuantized
| 743 | 42.764706 | 101 | py |
models | models-master/official/projects/qat/vision/modeling/heads/dense_prediction_heads.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of dense prediction heads."""
from typing import List, Mapping, Union, Optional, Any, Dict
# Import libraries
import numpy as np
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.modeling import tf_utils
from official.projects.qat.vision.quantization import configs
from official.projects.qat.vision.quantization import helper
@tf.keras.utils.register_keras_serializable(package='Vision')
class RetinaNetHeadQuantized(tf.keras.layers.Layer):
"""Creates a RetinaNet quantized head."""
def __init__(
self,
min_level: int,
max_level: int,
num_classes: int,
num_anchors_per_location: int,
num_convs: int = 4,
num_filters: int = 256,
attribute_heads: Optional[List[Dict[str, Any]]] = None,
use_separable_conv: bool = False,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
num_params_per_anchor: int = 4,
share_classification_heads: bool = False,
share_level_convs: bool = True,
**kwargs):
"""Initializes a RetinaNet quantized head.
Args:
min_level: An `int` number of minimum feature level.
max_level: An `int` number of maximum feature level.
num_classes: An `int` number of classes to predict.
num_anchors_per_location: An `int` number of number of anchors per pixel
location.
num_convs: An `int` number that represents the number of the intermediate
conv layers before the prediction.
num_filters: An `int` number that represents the number of filters of the
intermediate conv layers.
attribute_heads: If not None, a list that contains a dict for each
additional attribute head. Each dict consists of 4 key-value pairs:
`name`, `type` ('regression' or 'classification'), `size` (number of
predicted values for each instance), and `prediction_tower_name`
(optional, specifies shared prediction towers.)
use_separable_conv: A `bool` that indicates whether the separable
convolution layers is used.
activation: A `str` that indicates which activation is used, e.g. 'relu',
'swish', etc.
use_sync_bn: A `bool` that indicates whether to use synchronized batch
normalization across different replicas.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
num_params_per_anchor: Number of parameters required to specify an anchor
box. For example, `num_params_per_anchor` would be 4 for axis-aligned
anchor boxes specified by their y-centers, x-centers, heights, and
widths.
share_classification_heads: A `bool` that indicates whethere sharing
weights among the main and attribute classification heads. Not used in
the QAT model.
share_level_convs: An optional bool to enable sharing convs
across levels for classnet, boxnet, classifier and box regressor.
If True, convs will be shared across all levels. Not used in the QAT
model.
**kwargs: Additional keyword arguments to be passed.
"""
del share_classification_heads
del share_level_convs
super().__init__(**kwargs)
self._config_dict = {
'min_level': min_level,
'max_level': max_level,
'num_classes': num_classes,
'num_anchors_per_location': num_anchors_per_location,
'num_convs': num_convs,
'num_filters': num_filters,
'attribute_heads': attribute_heads,
'use_separable_conv': use_separable_conv,
'activation': activation,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
'num_params_per_anchor': num_params_per_anchor,
}
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(activation, use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig())
def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]):
"""Creates the variables of the head."""
if self._config_dict['use_separable_conv']:
conv_op = helper.SeparableConv2DQuantized
else:
conv_op = helper.quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.Default8BitConvQuantizeConfig(
['kernel'], ['activation'], False))
conv_kwargs = {
'filters': self._config_dict['num_filters'],
'kernel_size': 3,
'padding': 'same',
'bias_initializer': tf.zeros_initializer(),
'bias_regularizer': self._config_dict['bias_regularizer'],
}
if not self._config_dict['use_separable_conv']:
conv_kwargs.update({
'kernel_initializer': tf.keras.initializers.RandomNormal(
stddev=0.01),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
})
base_bn_op = (tf.keras.layers.experimental.SyncBatchNormalization
if self._config_dict['use_sync_bn']
else tf.keras.layers.BatchNormalization)
bn_op = helper.norm_by_activation(
self._config_dict['activation'],
helper.quantize_wrapped_layer(
base_bn_op, configs.Default8BitOutputQuantizeConfig()),
helper.quantize_wrapped_layer(
base_bn_op, configs.NoOpQuantizeConfig()))
bn_kwargs = {
'axis': self._bn_axis,
'momentum': self._config_dict['norm_momentum'],
'epsilon': self._config_dict['norm_epsilon'],
}
# Class net.
self._cls_convs = []
self._cls_norms = []
for level in range(
self._config_dict['min_level'], self._config_dict['max_level'] + 1):
this_level_cls_norms = []
for i in range(self._config_dict['num_convs']):
if level == self._config_dict['min_level']:
cls_conv_name = 'classnet-conv_{}'.format(i)
self._cls_convs.append(conv_op(name=cls_conv_name, **conv_kwargs))
cls_norm_name = 'classnet-conv-norm_{}_{}'.format(level, i)
this_level_cls_norms.append(bn_op(name=cls_norm_name, **bn_kwargs))
self._cls_norms.append(this_level_cls_norms)
classifier_kwargs = {
'filters': (
self._config_dict['num_classes'] *
self._config_dict['num_anchors_per_location']),
'kernel_size': 3,
'padding': 'same',
'bias_initializer': tf.constant_initializer(-np.log((1 - 0.01) / 0.01)),
'bias_regularizer': self._config_dict['bias_regularizer'],
}
if not self._config_dict['use_separable_conv']:
classifier_kwargs.update({
'kernel_initializer': tf.keras.initializers.RandomNormal(stddev=1e-5),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
})
self._classifier = conv_op(
name='scores', last_quantize=True, **classifier_kwargs)
# Box net.
self._box_convs = []
self._box_norms = []
for level in range(
self._config_dict['min_level'], self._config_dict['max_level'] + 1):
this_level_box_norms = []
for i in range(self._config_dict['num_convs']):
if level == self._config_dict['min_level']:
box_conv_name = 'boxnet-conv_{}'.format(i)
self._box_convs.append(conv_op(name=box_conv_name, **conv_kwargs))
box_norm_name = 'boxnet-conv-norm_{}_{}'.format(level, i)
this_level_box_norms.append(bn_op(name=box_norm_name, **bn_kwargs))
self._box_norms.append(this_level_box_norms)
box_regressor_kwargs = {
'filters': (self._config_dict['num_params_per_anchor'] *
self._config_dict['num_anchors_per_location']),
'kernel_size': 3,
'padding': 'same',
'bias_initializer': tf.zeros_initializer(),
'bias_regularizer': self._config_dict['bias_regularizer'],
}
if not self._config_dict['use_separable_conv']:
box_regressor_kwargs.update({
'kernel_initializer': tf.keras.initializers.RandomNormal(
stddev=1e-5),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
})
self._box_regressor = conv_op(
name='boxes', last_quantize=True, **box_regressor_kwargs)
# Attribute learning nets.
if self._config_dict['attribute_heads']:
self._att_predictors = {}
self._att_convs = {}
self._att_norms = {}
for att_config in self._config_dict['attribute_heads']:
att_name = att_config['name']
att_type = att_config['type']
att_size = att_config['size']
att_convs_i = []
att_norms_i = []
# Build conv and norm layers.
for level in range(self._config_dict['min_level'],
self._config_dict['max_level'] + 1):
this_level_att_norms = []
for i in range(self._config_dict['num_convs']):
if level == self._config_dict['min_level']:
att_conv_name = '{}-conv_{}'.format(att_name, i)
att_convs_i.append(conv_op(name=att_conv_name, **conv_kwargs))
att_norm_name = '{}-conv-norm_{}_{}'.format(att_name, level, i)
this_level_att_norms.append(bn_op(name=att_norm_name, **bn_kwargs))
att_norms_i.append(this_level_att_norms)
self._att_convs[att_name] = att_convs_i
self._att_norms[att_name] = att_norms_i
# Build the final prediction layer.
att_predictor_kwargs = {
'filters':
(att_size * self._config_dict['num_anchors_per_location']),
'kernel_size': 3,
'padding': 'same',
'bias_initializer': tf.zeros_initializer(),
'bias_regularizer': self._config_dict['bias_regularizer'],
}
if att_type == 'regression':
att_predictor_kwargs.update(
{'bias_initializer': tf.zeros_initializer()})
elif att_type == 'classification':
att_predictor_kwargs.update({
'bias_initializer':
tf.constant_initializer(-np.log((1 - 0.01) / 0.01))
})
else:
raise ValueError(
'Attribute head type {} not supported.'.format(att_type))
if not self._config_dict['use_separable_conv']:
att_predictor_kwargs.update({
'kernel_initializer':
tf.keras.initializers.RandomNormal(stddev=1e-5),
'kernel_regularizer':
self._config_dict['kernel_regularizer'],
})
self._att_predictors[att_name] = conv_op(
name='{}_attributes'.format(att_name), **att_predictor_kwargs)
super().build(input_shape)
def call(self, features: Mapping[str, tf.Tensor]):
"""Forward pass of the RetinaNet quantized head.
Args:
features: A `dict` of `tf.Tensor` where
- key: A `str` of the level of the multilevel features.
- values: A `tf.Tensor`, the feature map tensors, whose shape is
[batch, height_l, width_l, channels].
Returns:
scores: A `dict` of `tf.Tensor` which includes scores of the predictions.
- key: A `str` of the level of the multilevel predictions.
- values: A `tf.Tensor` of the box scores predicted from a particular
feature level, whose shape is
[batch, height_l, width_l, num_classes * num_anchors_per_location].
boxes: A `dict` of `tf.Tensor` which includes coordinates of the
predictions.
- key: A `str` of the level of the multilevel predictions.
- values: A `tf.Tensor` of the box scores predicted from a particular
feature level, whose shape is
[batch, height_l, width_l,
num_params_per_anchor * num_anchors_per_location].
attributes: a dict of (attribute_name, attribute_prediction). Each
`attribute_prediction` is a dict of:
- key: `str`, the level of the multilevel predictions.
- values: `Tensor`, the box scores predicted from a particular feature
level, whose shape is
[batch, height_l, width_l,
attribute_size * num_anchors_per_location].
Can be an empty dictionary if no attribute learning is required.
"""
scores = {}
boxes = {}
if self._config_dict['attribute_heads']:
attributes = {
att_config['name']: {}
for att_config in self._config_dict['attribute_heads']
}
else:
attributes = {}
for i, level in enumerate(
range(self._config_dict['min_level'],
self._config_dict['max_level'] + 1)):
this_level_features = features[str(level)]
# class net.
x = this_level_features
for conv, norm in zip(self._cls_convs, self._cls_norms[i]):
x = conv(x)
x = norm(x)
x = self._activation(x)
scores[str(level)] = self._classifier(x)
# box net.
x = this_level_features
for conv, norm in zip(self._box_convs, self._box_norms[i]):
x = conv(x)
x = norm(x)
x = self._activation(x)
boxes[str(level)] = self._box_regressor(x)
# attribute nets.
if self._config_dict['attribute_heads']:
prediction_tower_output = {}
for att_config in self._config_dict['attribute_heads']:
att_name = att_config['name']
def build_prediction_tower(atttribute_name, features, feature_level):
x = features
for conv, norm in zip(
self._att_convs[atttribute_name],
self._att_norms[atttribute_name][feature_level]):
x = conv(x)
x = norm(x)
x = self._activation(x)
return x
prediction_tower_name = att_config['prediction_tower_name']
if not prediction_tower_name:
attributes[att_name][str(level)] = self._att_predictors[att_name](
build_prediction_tower(att_name, this_level_features, i))
else:
if prediction_tower_name not in prediction_tower_output:
prediction_tower_output[
prediction_tower_name] = build_prediction_tower(
att_name, this_level_features, i)
attributes[att_name][str(level)] = self._att_predictors[att_name](
prediction_tower_output[prediction_tower_name])
return scores, boxes, attributes
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
| 15,779 | 40.09375 | 80 | py |
models | models-master/official/projects/qat/vision/tasks/retinanet_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for RetinaNet task."""
# pylint: disable=unused-import
import os
from absl.testing import parameterized
import orbit
import tensorflow as tf
from official import vision
from official.core import exp_factory
from official.modeling import optimization
from official.projects.qat.vision.tasks import retinanet
from official.vision.configs import retinanet as exp_cfg
from official.vision.dataloaders import tfexample_utils
class RetinaNetTaskTest(parameterized.TestCase, tf.test.TestCase):
def _create_test_tfrecord(self, tfrecord_file, example, num_samples):
examples = [example] * num_samples
tfexample_utils.dump_to_tfrecord(
record_file=tfrecord_file, tf_examples=examples)
@parameterized.parameters(
('retinanet_mobile_coco_qat', True),
('retinanet_mobile_coco_qat', False),
)
def test_retinanet_task(self, test_config, is_training):
"""RetinaNet task test for training and val using toy configs."""
input_image_size = [384, 384]
test_tfrecord_file = os.path.join(self.get_temp_dir(), 'det_test.tfrecord')
example = tfexample_utils.create_detection_test_example(
image_height=input_image_size[0],
image_width=input_image_size[1],
image_channel=3,
num_instances=10)
self._create_test_tfrecord(
tfrecord_file=test_tfrecord_file, example=example, num_samples=10)
config = exp_factory.get_exp_config(test_config)
# modify config to suit local testing
config.task.model.input_size = [128, 128, 3]
config.trainer.steps_per_loop = 1
config.task.train_data.global_batch_size = 1
config.task.validation_data.global_batch_size = 1
config.task.train_data.shuffle_buffer_size = 2
config.task.validation_data.shuffle_buffer_size = 2
config.task.validation_data.input_path = test_tfrecord_file
config.task.train_data.input_path = test_tfrecord_file
config.task.annotation_file = None
config.train_steps = 1
task = retinanet.RetinaNetTask(config.task)
model = task.build_model()
self.assertLen(model.weights, 2393)
metrics = task.build_metrics(training=is_training)
strategy = tf.distribute.get_strategy()
data_config = config.task.train_data if is_training else config.task.validation_data
dataset = orbit.utils.make_distributed_dataset(strategy, task.build_inputs,
data_config)
iterator = iter(dataset)
opt_factory = optimization.OptimizerFactory(config.trainer.optimizer_config)
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
if is_training:
task.train_step(next(iterator), model, optimizer, metrics=metrics)
else:
task.validation_step(next(iterator), model, metrics=metrics)
if __name__ == '__main__':
tf.test.main()
| 3,423 | 37.909091 | 88 | py |
models | models-master/official/projects/qat/vision/tasks/retinanet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RetinaNet task definition."""
import tensorflow as tf
from official.core import task_factory
from official.projects.qat.vision.configs import retinanet as exp_cfg
from official.projects.qat.vision.modeling import factory
from official.vision.tasks import retinanet
@task_factory.register_task_cls(exp_cfg.RetinaNetTask)
class RetinaNetTask(retinanet.RetinaNetTask):
"""A task for RetinaNet object detection with QAT."""
def build_model(self) -> tf.keras.Model:
"""Builds RetinaNet model with QAT."""
model = super(RetinaNetTask, self).build_model()
# Call the model with dummy input to build the head part.
dummpy_input = tf.zeros([1] + self.task_config.model.input_size)
model(dummpy_input, training=True)
if self.task_config.quantization:
model = factory.build_qat_retinanet(
model,
self.task_config.quantization,
model_config=self.task_config.model)
return model
| 1,551 | 36.853659 | 74 | py |
models | models-master/official/projects/qat/vision/tasks/semantic_segmentation.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Semantic segmentation task definition."""
import tensorflow as tf
from official.core import task_factory
from official.projects.qat.vision.configs import semantic_segmentation as exp_cfg
from official.projects.qat.vision.modeling import factory
from official.vision.tasks import semantic_segmentation
@task_factory.register_task_cls(exp_cfg.SemanticSegmentationTask)
class SemanticSegmentationTask(semantic_segmentation.SemanticSegmentationTask):
"""A task for semantic segmentation with QAT."""
def build_model(self) -> tf.keras.Model:
"""Builds semantic segmentation model with QAT."""
model = super().build_model()
input_specs = tf.keras.layers.InputSpec(shape=[None] +
self.task_config.model.input_size)
if self.task_config.quantization:
model = factory.build_qat_segmentation_model(
model, self.task_config.quantization, input_specs)
return model
| 1,552 | 40.972973 | 81 | py |
models | models-master/official/projects/qat/vision/tasks/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tasks package definition."""
from official.projects.qat.vision.tasks import image_classification
from official.projects.qat.vision.tasks import retinanet
from official.projects.qat.vision.tasks import semantic_segmentation
| 835 | 43 | 74 | py |
models | models-master/official/projects/qat/vision/tasks/image_classification_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for image classification task."""
# pylint: disable=unused-import
import os
from absl.testing import parameterized
import orbit
import tensorflow as tf
from official import vision
from official.core import exp_factory
from official.modeling import optimization
from official.projects.qat.vision.tasks import image_classification as img_cls_task
from official.vision.dataloaders import tfexample_utils
class ImageClassificationTaskTest(tf.test.TestCase, parameterized.TestCase):
def _create_test_tfrecord(self, tfrecord_file, example, num_samples):
examples = [example] * num_samples
tfexample_utils.dump_to_tfrecord(
record_file=tfrecord_file, tf_examples=examples)
@parameterized.parameters(('resnet_imagenet_qat'),
('mobilenet_imagenet_qat'))
def test_task(self, config_name):
input_image_size = [224, 224]
test_tfrecord_file = os.path.join(self.get_temp_dir(), 'cls_test.tfrecord')
example = tf.train.Example.FromString(
tfexample_utils.create_classification_example(
image_height=input_image_size[0], image_width=input_image_size[1]))
self._create_test_tfrecord(
tfrecord_file=test_tfrecord_file, example=example, num_samples=10)
config = exp_factory.get_exp_config(config_name)
config.task.train_data.global_batch_size = 2
config.task.validation_data.input_path = test_tfrecord_file
config.task.train_data.input_path = test_tfrecord_file
task = img_cls_task.ImageClassificationTask(config.task)
model = task.build_model()
metrics = task.build_metrics()
strategy = tf.distribute.get_strategy()
dataset = orbit.utils.make_distributed_dataset(strategy, task.build_inputs,
config.task.train_data)
iterator = iter(dataset)
opt_factory = optimization.OptimizerFactory(config.trainer.optimizer_config)
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
logs = task.train_step(next(iterator), model, optimizer, metrics=metrics)
for metric in metrics:
logs[metric.name] = metric.result()
self.assertIn('loss', logs)
self.assertIn('accuracy', logs)
self.assertIn('top_5_accuracy', logs)
logs = task.validation_step(next(iterator), model, metrics=metrics)
for metric in metrics:
logs[metric.name] = metric.result()
self.assertIn('loss', logs)
self.assertIn('accuracy', logs)
self.assertIn('top_5_accuracy', logs)
if __name__ == '__main__':
tf.test.main()
| 3,150 | 38.3875 | 83 | py |
models | models-master/official/projects/qat/vision/tasks/image_classification.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image classification task definition."""
import tensorflow as tf
from official.core import task_factory
from official.projects.qat.vision.configs import image_classification as exp_cfg
from official.projects.qat.vision.modeling import factory
from official.vision.tasks import image_classification
@task_factory.register_task_cls(exp_cfg.ImageClassificationTask)
class ImageClassificationTask(image_classification.ImageClassificationTask):
"""A task for image classification with QAT."""
def build_model(self) -> tf.keras.Model:
"""Builds classification model with QAT."""
input_specs = tf.keras.layers.InputSpec(
shape=[None] + self.task_config.model.input_size)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (tf.keras.regularizers.l2(
l2_weight_decay / 2.0) if l2_weight_decay else None)
model = super(ImageClassificationTask, self).build_model()
if self.task_config.quantization:
model = factory.build_qat_classification_model(
model,
self.task_config.quantization,
input_specs=input_specs,
model_config=self.task_config.model,
l2_regularizer=l2_regularizer)
return model
| 2,047 | 39.96 | 80 | py |
models | models-master/official/projects/movinet/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/movinet/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Training driver.
To train:
CONFIG_FILE=official/projects/movinet/configs/yaml/movinet_a0_k600_8x8.yaml
python3 official/projects/movinet/train.py \
--experiment=movinet_kinetics600 \
--mode=train \
--model_dir=/tmp/movinet/ \
--config_file=${CONFIG_FILE} \
--params_override="" \
--gin_file="" \
--gin_params="" \
--tpu="" \
--tf_data_service=""
"""
from absl import app
from absl import flags
import gin
from official.common import distribute_utils
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
# Import movinet libraries to register the backbone and model into tf.vision
# model garden factory.
# pylint: disable=unused-import
from official.projects.movinet.modeling import movinet
from official.projects.movinet.modeling import movinet_model
from official.vision import registry_imports
# pylint: enable=unused-import
FLAGS = flags.FLAGS
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
if 'train_and_eval' in FLAGS.mode:
assert (params.task.train_data.feature_shape ==
params.task.validation_data.feature_shape), (
f'train {params.task.train_data.feature_shape} != validate '
f'{params.task.validation_data.feature_shape}')
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu)
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(main)
| 3,320 | 35.097826 | 80 | py |
models | models-master/official/projects/movinet/train_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for train.py."""
import json
import os
import random
from absl import flags
from absl import logging
from absl.testing import flagsaver
import tensorflow as tf
from official.projects.movinet import train as train_lib
from official.vision.dataloaders import tfexample_utils
FLAGS = flags.FLAGS
class TrainTest(tf.test.TestCase):
def setUp(self):
super(TrainTest, self).setUp()
self._model_dir = os.path.join(self.get_temp_dir(), 'model_dir')
tf.io.gfile.makedirs(self._model_dir)
data_dir = os.path.join(self.get_temp_dir(), 'data')
tf.io.gfile.makedirs(data_dir)
self._data_path = os.path.join(data_dir, 'data.tfrecord')
# pylint: disable=g-complex-comprehension
examples = [
tfexample_utils.make_video_test_example(
image_shape=(32, 32, 3),
audio_shape=(20, 128),
label=random.randint(0, 100)) for _ in range(2)
]
# pylint: enable=g-complex-comprehension
tfexample_utils.dump_to_tfrecord(self._data_path, tf_examples=examples)
def test_train_and_evaluation_pipeline_runs(self):
saved_flag_values = flagsaver.save_flag_values()
train_lib.tfm_flags.define_flags()
FLAGS.mode = 'train'
FLAGS.model_dir = self._model_dir
FLAGS.experiment = 'movinet_kinetics600'
logging.info('Test pipeline correctness.')
num_frames = 4
# Test model training pipeline runs.
params_override = json.dumps({
'runtime': {
'distribution_strategy': 'mirrored',
'mixed_precision_dtype': 'float32',
},
'trainer': {
'train_steps': 2,
'validation_steps': 2,
},
'task': {
'train_data': {
'input_path': self._data_path,
'file_type': 'tfrecord',
'feature_shape': [num_frames, 32, 32, 3],
'global_batch_size': 2,
},
'validation_data': {
'input_path': self._data_path,
'file_type': 'tfrecord',
'global_batch_size': 2,
'feature_shape': [num_frames * 2, 32, 32, 3],
}
}
})
FLAGS.params_override = params_override
train_lib.main('unused_args')
# Test model evaluation pipeline runs on newly produced checkpoint.
FLAGS.mode = 'eval'
with train_lib.gin.unlock_config():
train_lib.main('unused_args')
flagsaver.restore_flag_values(saved_flag_values)
if __name__ == '__main__':
tf.test.main()
| 3,119 | 30.515152 | 75 | py |
models | models-master/official/projects/movinet/tools/export_saved_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for export_saved_model."""
from absl import flags
import tensorflow as tf
import tensorflow_hub as hub
from official.projects.movinet.tools import export_saved_model
FLAGS = flags.FLAGS
class ExportSavedModelTest(tf.test.TestCase):
def test_movinet_export_a0_base_with_tfhub(self):
saved_model_path = self.get_temp_dir()
FLAGS.export_path = saved_model_path
FLAGS.model_id = 'a0'
FLAGS.causal = False
FLAGS.num_classes = 600
export_saved_model.main('unused_args')
encoder = hub.KerasLayer(saved_model_path, trainable=True)
inputs = tf.keras.layers.Input(
shape=[None, None, None, 3],
dtype=tf.float32)
outputs = encoder(dict(image=inputs))
model = tf.keras.Model(inputs, outputs)
example_input = tf.ones([1, 8, 172, 172, 3])
outputs = model(example_input)
self.assertAllEqual(outputs.shape, [1, 600])
def test_movinet_export_a0_stream_with_tfhub(self):
saved_model_path = self.get_temp_dir()
FLAGS.export_path = saved_model_path
FLAGS.model_id = 'a0'
FLAGS.causal = True
FLAGS.num_classes = 600
export_saved_model.main('unused_args')
encoder = hub.KerasLayer(saved_model_path, trainable=True)
image_input = tf.keras.layers.Input(
shape=[None, None, None, 3],
dtype=tf.float32,
name='image')
init_states_fn = encoder.resolved_object.signatures['init_states']
state_shapes = {
name: ([s if s > 0 else None for s in state.shape], state.dtype)
for name, state in init_states_fn(tf.constant([0, 0, 0, 0, 3])).items()
}
states_input = {
name: tf.keras.Input(shape[1:], dtype=dtype, name=name)
for name, (shape, dtype) in state_shapes.items()
}
inputs = {**states_input, 'image': image_input}
outputs = encoder(inputs)
model = tf.keras.Model(inputs, outputs)
example_input = tf.ones([1, 8, 172, 172, 3])
frames = tf.split(example_input, example_input.shape[1], axis=1)
init_states = init_states_fn(tf.shape(example_input))
expected_outputs, _ = model({**init_states, 'image': example_input})
states = init_states
for frame in frames:
outputs, states = model({**states, 'image': frame})
self.assertAllEqual(outputs.shape, [1, 600])
self.assertNotEmpty(states)
self.assertAllClose(outputs, expected_outputs, 1e-5, 1e-5)
def test_movinet_export_a0_stream_with_tflite(self):
saved_model_path = self.get_temp_dir()
FLAGS.export_path = saved_model_path
FLAGS.model_id = 'a0'
FLAGS.causal = True
FLAGS.conv_type = '2plus1d'
FLAGS.se_type = '2plus3d'
FLAGS.activation = 'hard_swish'
FLAGS.gating_activation = 'hard_sigmoid'
FLAGS.use_positional_encoding = False
FLAGS.num_classes = 600
FLAGS.batch_size = 1
FLAGS.num_frames = 1
FLAGS.image_size = 172
FLAGS.bundle_input_init_states_fn = False
export_saved_model.main('unused_args')
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_path)
tflite_model = converter.convert()
interpreter = tf.lite.Interpreter(model_content=tflite_model)
runner = interpreter.get_signature_runner('serving_default')
def state_name(name: str) -> str:
return name[len('serving_default_'):-len(':0')]
init_states = {
state_name(x['name']): tf.zeros(x['shape'], dtype=x['dtype'])
for x in interpreter.get_input_details()
}
del init_states['image']
video = tf.ones([1, 8, 172, 172, 3])
clips = tf.split(video, video.shape[1], axis=1)
states = init_states
for clip in clips:
outputs = runner(**states, image=clip)
logits = outputs.pop('logits')
states = outputs
self.assertAllEqual(logits.shape, [1, 600])
self.assertNotEmpty(states)
if __name__ == '__main__':
tf.test.main()
| 4,451 | 28.879195 | 79 | py |
models | models-master/official/projects/movinet/tools/export_saved_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Exports models to tf.saved_model.
Export example:
```shell
python3 export_saved_model.py \
--export_path=/tmp/movinet/ \
--model_id=a0 \
--causal=True \
--conv_type="3d" \
--num_classes=600 \
--use_positional_encoding=False \
--checkpoint_path=""
```
Export for TF Lite example:
```shell
python3 export_saved_model.py \
--model_id=a0 \
--causal=True \
--conv_type=2plus1d \
--se_type=2plus3d \
--activation=hard_swish \
--gating_activation=hard_sigmoid \
--use_positional_encoding=False \
--num_classes=600 \
--batch_size=1 \
--num_frames=1 \ # Use a single frame for streaming mode
--image_size=172 \ # Input resolution for the model
--bundle_input_init_states_fn=False \
--checkpoint_path=/path/to/checkpoint \
--export_path=/tmp/movinet_a0_stream
```
To use an exported saved_model, refer to export_saved_model_test.py.
"""
from typing import Optional, Tuple
from absl import app
from absl import flags
import tensorflow as tf
from official.projects.movinet.modeling import movinet
from official.projects.movinet.modeling import movinet_model
flags.DEFINE_string(
'export_path', '/tmp/movinet/',
'Export path to save the saved_model file.')
flags.DEFINE_string(
'model_id', 'a0', 'MoViNet model name.')
flags.DEFINE_bool(
'causal', False, 'Run the model in causal mode.')
flags.DEFINE_string(
'conv_type', '3d',
'3d, 2plus1d, or 3d_2plus1d. 3d configures the network '
'to use the default 3D convolution. 2plus1d uses (2+1)D convolution '
'with Conv2D operations and 2D reshaping (e.g., a 5x3x3 kernel becomes '
'3x3 followed by 5x1 conv). 3d_2plus1d uses (2+1)D convolution with '
'Conv3D and no 2D reshaping (e.g., a 5x3x3 kernel becomes 1x3x3 '
'followed by 5x1x1 conv).')
flags.DEFINE_string(
'se_type', '3d',
'3d, 2d, or 2plus3d. 3d uses the default 3D spatiotemporal global average'
'pooling for squeeze excitation. 2d uses 2D spatial global average pooling '
'on each frame. 2plus3d concatenates both 3D and 2D global average '
'pooling.')
flags.DEFINE_string(
'activation', 'swish',
'The main activation to use across layers.')
flags.DEFINE_string(
'classifier_activation', 'swish',
'The classifier activation to use.')
flags.DEFINE_string(
'gating_activation', 'sigmoid',
'The gating activation to use in squeeze-excitation layers.')
flags.DEFINE_bool(
'use_positional_encoding', False,
'Whether to use positional encoding (only applied when causal=True).')
flags.DEFINE_integer(
'num_classes', 600, 'The number of classes for prediction.')
flags.DEFINE_integer(
'batch_size', None,
'The batch size of the input. Set to None for dynamic input.')
flags.DEFINE_integer(
'num_frames', None,
'The number of frames of the input. Set to None for dynamic input.')
flags.DEFINE_integer(
'image_size', None,
'The resolution of the input. Set to None for dynamic input.')
flags.DEFINE_bool(
'bundle_input_init_states_fn', True,
'Add init_states as a function signature to the saved model.'
'This is not necessary if the input shape is static (e.g., for TF Lite).')
flags.DEFINE_string(
'checkpoint_path', '',
'Checkpoint path to load. Leave blank for default initialization.')
flags.DEFINE_bool(
'assert_checkpoint_objects_matched',
True,
'Whether to check the checkpoint objects exactly match those of the model.',
)
FLAGS = flags.FLAGS
def export_saved_model(
model: tf.keras.Model,
input_shape: Tuple[int, int, int, int, int],
export_path: str = '/tmp/movinet/',
causal: bool = False,
bundle_input_init_states_fn: bool = True,
checkpoint_path: Optional[str] = None,
assert_checkpoint_objects_matched: bool = True,
) -> None:
"""Exports a MoViNet model to a saved model.
Args:
model: the tf.keras.Model to export.
input_shape: The 5D spatiotemporal input shape of size [batch_size,
num_frames, image_height, image_width, num_channels]. Set the field or a
shape position in the field to None for dynamic input.
export_path: Export path to save the saved_model file.
causal: Run the model in causal mode.
bundle_input_init_states_fn: Add init_states as a function signature to the
saved model. This is not necessary if the input shape is static (e.g., for
TF Lite).
checkpoint_path: Checkpoint path to load. Leave blank to keep the model's
initialization.
assert_checkpoint_objects_matched: Whether to check the checkpoint objects
exactly match those of the model.
"""
# Use dimensions of 1 except the channels to export faster,
# since we only really need the last dimension to build and get the output
# states. These dimensions can be set to `None` once the model is built.
input_shape_concrete = [1 if s is None else s for s in input_shape]
model.build(input_shape_concrete)
# Compile model to generate some internal Keras variables.
model.compile()
if checkpoint_path:
checkpoint = tf.train.Checkpoint(model=model)
status = checkpoint.restore(checkpoint_path)
if assert_checkpoint_objects_matched:
status.assert_existing_objects_matched()
if causal:
# Call the model once to get the output states. Call again with `states`
# input to ensure that the inputs with the `states` argument is built
# with the full output state shapes.
input_image = tf.ones(input_shape_concrete)
_, states = model({
**model.init_states(input_shape_concrete), 'image': input_image})
_ = model({**states, 'image': input_image})
# Create a function to explicitly set the names of the outputs
def predict(inputs):
outputs, states = model(inputs)
return {**states, 'logits': outputs}
specs = {
name: tf.TensorSpec(spec.shape, name=name, dtype=spec.dtype)
for name, spec in model.initial_state_specs(
input_shape).items()
}
specs['image'] = tf.TensorSpec(
input_shape, dtype=model.dtype, name='image')
predict_fn = tf.function(predict, jit_compile=True)
predict_fn = predict_fn.get_concrete_function(specs)
init_states_fn = tf.function(model.init_states, jit_compile=True)
init_states_fn = init_states_fn.get_concrete_function(
tf.TensorSpec([5], dtype=tf.int32))
if bundle_input_init_states_fn:
signatures = {'call': predict_fn, 'init_states': init_states_fn}
else:
signatures = predict_fn
tf.keras.models.save_model(
model, export_path, signatures=signatures)
else:
_ = model(tf.ones(input_shape_concrete))
tf.keras.models.save_model(model, export_path)
def build_and_export_saved_model(
export_path: str = '/tmp/movinet/',
model_id: str = 'a0',
causal: bool = False,
conv_type: str = '3d',
se_type: str = '3d',
activation: str = 'swish',
classifier_activation: str = 'swish',
gating_activation: str = 'sigmoid',
use_positional_encoding: bool = False,
num_classes: int = 600,
input_shape: Optional[Tuple[int, int, int, int, int]] = None,
bundle_input_init_states_fn: bool = True,
checkpoint_path: Optional[str] = None,
assert_checkpoint_objects_matched: bool = True,
) -> None:
"""Builds and exports a MoViNet model to a saved model.
Args:
export_path: Export path to save the saved_model file.
model_id: MoViNet model name.
causal: Run the model in causal mode.
conv_type: 3d, 2plus1d, or 3d_2plus1d. 3d configures the network to use the
default 3D convolution. 2plus1d uses (2+1)D convolution with Conv2D
operations and 2D reshaping (e.g., a 5x3x3 kernel becomes 3x3 followed by
5x1 conv). 3d_2plus1d uses (2+1)D convolution with Conv3D and no 2D
reshaping (e.g., a 5x3x3 kernel becomes 1x3x3 followed by 5x1x1 conv).
se_type: 3d, 2d, or 2plus3d. 3d uses the default 3D spatiotemporal global
average pooling for squeeze excitation. 2d uses 2D spatial global average
pooling on each frame. 2plus3d concatenates both 3D and 2D global average
pooling.
activation: The main activation to use across layers.
classifier_activation: The classifier activation to use.
gating_activation: The gating activation to use in squeeze-excitation
layers.
use_positional_encoding: Whether to use positional encoding (only applied
when causal=True).
num_classes: The number of classes for prediction.
input_shape: The 5D spatiotemporal input shape of size [batch_size,
num_frames, image_height, image_width, num_channels]. Set the field or a
shape position in the field to None for dynamic input.
bundle_input_init_states_fn: Add init_states as a function signature to the
saved model. This is not necessary if the input shape is static (e.g., for
TF Lite).
checkpoint_path: Checkpoint path to load. Leave blank for default
initialization.
assert_checkpoint_objects_matched: Whether to check the checkpoint objects
exactly match those of the model.
"""
input_specs = tf.keras.layers.InputSpec(shape=input_shape)
# Override swish activation implementation to remove custom gradients
if activation == 'swish':
activation = 'simple_swish'
if classifier_activation == 'swish':
classifier_activation = 'simple_swish'
backbone = movinet.Movinet(
model_id=model_id,
causal=causal,
use_positional_encoding=use_positional_encoding,
conv_type=conv_type,
se_type=se_type,
input_specs=input_specs,
activation=activation,
gating_activation=gating_activation,
use_sync_bn=False,
use_external_states=causal)
model = movinet_model.MovinetClassifier(
backbone,
num_classes=num_classes,
output_states=causal,
input_specs=dict(image=input_specs),
activation=classifier_activation)
export_saved_model(
model=model,
input_shape=input_shape,
export_path=export_path,
causal=causal,
bundle_input_init_states_fn=bundle_input_init_states_fn,
checkpoint_path=checkpoint_path,
assert_checkpoint_objects_matched=assert_checkpoint_objects_matched,
)
def main(_) -> None:
input_shape = (
FLAGS.batch_size, FLAGS.num_frames, FLAGS.image_size, FLAGS.image_size, 3)
build_and_export_saved_model(
export_path=FLAGS.export_path,
model_id=FLAGS.model_id,
causal=FLAGS.causal,
conv_type=FLAGS.conv_type,
se_type=FLAGS.se_type,
activation=FLAGS.activation,
classifier_activation=FLAGS.classifier_activation,
gating_activation=FLAGS.gating_activation,
use_positional_encoding=FLAGS.use_positional_encoding,
num_classes=FLAGS.num_classes,
input_shape=input_shape,
bundle_input_init_states_fn=FLAGS.bundle_input_init_states_fn,
checkpoint_path=FLAGS.checkpoint_path,
assert_checkpoint_objects_matched=FLAGS.assert_checkpoint_objects_matched,
)
print(' ----- Done. Saved Model is saved at {}'.format(FLAGS.export_path))
if __name__ == '__main__':
app.run(main)
| 11,694 | 36.009494 | 80 | py |
models | models-master/official/projects/movinet/tools/convert_3d_2plus1d.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts '3d_2plus1d' checkpoints into '2plus1d'."""
from absl import app
from absl import flags
import tensorflow as tf
from official.projects.movinet.modeling import movinet
from official.projects.movinet.modeling import movinet_model
flags.DEFINE_string(
'input_checkpoint_path', None,
'Checkpoint path to load.')
flags.DEFINE_string(
'output_checkpoint_path', None,
'Export path to save the saved_model file.')
flags.DEFINE_string(
'model_id', 'a0', 'MoViNet model name.')
flags.DEFINE_string(
'se_type', '2plus3d', 'MoViNet model SE type.')
flags.DEFINE_bool(
'causal', True, 'Run the model in causal mode.')
flags.DEFINE_bool(
'use_positional_encoding', False,
'Whether to use positional encoding (only applied when causal=True).')
flags.DEFINE_integer(
'num_classes', 600, 'The number of classes for prediction.')
flags.DEFINE_bool(
'verify_output', False, 'Verify the output matches between the models.')
FLAGS = flags.FLAGS
def main(_) -> None:
backbone_2plus1d = movinet.Movinet(
model_id=FLAGS.model_id,
causal=FLAGS.causal,
conv_type='2plus1d',
se_type=FLAGS.se_type,
use_positional_encoding=FLAGS.use_positional_encoding)
model_2plus1d = movinet_model.MovinetClassifier(
backbone=backbone_2plus1d,
num_classes=FLAGS.num_classes)
model_2plus1d.build([1, 1, 1, 1, 3])
backbone_3d_2plus1d = movinet.Movinet(
model_id=FLAGS.model_id,
causal=FLAGS.causal,
conv_type='3d_2plus1d',
se_type=FLAGS.se_type,
use_positional_encoding=FLAGS.use_positional_encoding)
model_3d_2plus1d = movinet_model.MovinetClassifier(
backbone=backbone_3d_2plus1d,
num_classes=FLAGS.num_classes)
model_3d_2plus1d.build([1, 1, 1, 1, 3])
checkpoint = tf.train.Checkpoint(model=model_3d_2plus1d)
status = checkpoint.restore(FLAGS.input_checkpoint_path)
status.assert_existing_objects_matched()
# Ensure both models have the same weights
weights = []
for var_2plus1d, var_3d_2plus1d in zip(
model_2plus1d.get_weights(), model_3d_2plus1d.get_weights()):
if var_2plus1d.shape == var_3d_2plus1d.shape:
weights.append(var_3d_2plus1d)
else:
if var_3d_2plus1d.shape[0] == 1:
weight = var_3d_2plus1d[0]
else:
weight = var_3d_2plus1d[:, 0]
if weight.shape[-1] != var_2plus1d.shape[-1]:
# Transpose any depthwise kernels (conv3d --> depthwise_conv2d)
weight = tf.transpose(weight, perm=(0, 1, 3, 2))
weights.append(weight)
model_2plus1d.set_weights(weights)
if FLAGS.verify_output:
inputs = tf.random.uniform([1, 6, 64, 64, 3], dtype=tf.float32)
logits_2plus1d = model_2plus1d(inputs)
logits_3d_2plus1d = model_3d_2plus1d(inputs)
if tf.reduce_mean(logits_2plus1d - logits_3d_2plus1d) > 1e-5:
raise ValueError('Bad conversion, model outputs do not match.')
save_checkpoint = tf.train.Checkpoint(
model=model_2plus1d, backbone=backbone_2plus1d)
save_checkpoint.save(FLAGS.output_checkpoint_path)
if __name__ == '__main__':
flags.mark_flag_as_required('input_checkpoint_path')
flags.mark_flag_as_required('output_checkpoint_path')
app.run(main)
| 3,809 | 33.954128 | 76 | py |
models | models-master/official/projects/movinet/tools/convert_3d_2plus1d_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for convert_3d_2plus1d."""
import os
from absl import flags
import tensorflow as tf
from official.projects.movinet.modeling import movinet
from official.projects.movinet.modeling import movinet_model
from official.projects.movinet.tools import convert_3d_2plus1d
FLAGS = flags.FLAGS
class Convert3d2plus1dTest(tf.test.TestCase):
def test_convert_model(self):
saved_model_path = self.get_temp_dir()
input_checkpoint_path = os.path.join(saved_model_path, 'ckpt-input')
output_checkpoint_path = os.path.join(saved_model_path, 'ckpt')
model_3d_2plus1d = movinet_model.MovinetClassifier(
backbone=movinet.Movinet(
model_id='a0',
conv_type='3d_2plus1d',
se_type='2plus3d'),
num_classes=600)
model_3d_2plus1d.build([1, 1, 1, 1, 3])
save_checkpoint = tf.train.Checkpoint(model=model_3d_2plus1d)
save_checkpoint.save(input_checkpoint_path)
FLAGS.input_checkpoint_path = f'{input_checkpoint_path}-1'
FLAGS.output_checkpoint_path = output_checkpoint_path
FLAGS.model_id = 'a0'
FLAGS.use_positional_encoding = False
FLAGS.num_classes = 600
FLAGS.verify_output = True
convert_3d_2plus1d.main('unused_args')
print(os.listdir(saved_model_path))
self.assertTrue(tf.io.gfile.exists(f'{output_checkpoint_path}-1.index'))
if __name__ == '__main__':
tf.test.main()
| 1,997 | 31.225806 | 76 | py |
models | models-master/official/projects/movinet/tools/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/movinet/tools/quantize_movinet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Generates example dataset for post-training quantization.
Example command line to run the script:
```shell
python3 quantize_movinet.py \
--saved_model_dir=${SAVED_MODEL_DIR} \
--saved_model_with_states_dir=${SAVED_MODEL_WITH_STATES_DIR} \
--output_dataset_dir=${OUTPUT_DATASET_DIR} \
--output_tflite=${OUTPUT_TFLITE} \
--quantization_mode='int_float_fallback' \
--save_dataset_to_tfrecords=True
```
"""
import functools
from typing import Any, Callable, Mapping, Optional
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_hub as hub
from official.vision.configs import video_classification as video_classification_configs
from official.vision.tasks import video_classification
tf.enable_v2_behavior()
FLAGS = flags.FLAGS
flags.DEFINE_string(
'saved_model_dir', None, 'The saved_model directory.')
flags.DEFINE_string(
'saved_model_with_states_dir', None,
'The directory to the saved_model with state signature. '
'The saved_model_with_states is needed in order to get the initial state '
'shape and dtype while saved_model is used for the quantization.')
flags.DEFINE_string(
'output_tflite', '/tmp/output.tflite',
'The output tflite file path.')
flags.DEFINE_integer(
'temporal_stride', 5,
'Temporal stride used to generate input videos.')
flags.DEFINE_integer(
'num_frames', 50, 'Input videos number of frames.')
flags.DEFINE_integer(
'image_size', 172, 'Input videos frame size.')
flags.DEFINE_string(
'quantization_mode', None,
'The quantization mode. Can be one of "float16", "int8",'
'"int_float_fallback" or None.')
flags.DEFINE_integer(
'num_calibration_videos', 100,
'Number of videos to run to generate example datasets.')
flags.DEFINE_integer(
'num_samples_per_video', 3,
'Number of sample draw from one single video.')
flags.DEFINE_boolean(
'save_dataset_to_tfrecords', False,
'Whether to save representative dataset to the disk.')
flags.DEFINE_string(
'output_dataset_dir', '/tmp/representative_dataset/',
'The directory to store exported tfrecords.')
flags.DEFINE_integer(
'max_saved_files', 100,
'The maximum number of tfrecord files to save.')
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))):
value = value.numpy() # BytesList won't unpack string from an EagerTensor.
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _build_tf_example(feature):
return tf.train.Example(
features=tf.train.Features(feature=feature)).SerializeToString()
def save_to_tfrecord(input_frame: tf.Tensor,
input_states: Mapping[str, tf.Tensor],
frame_index: int,
predictions: tf.Tensor,
output_states: Mapping[str, tf.Tensor],
groundtruth_label_id: tf.Tensor,
output_dataset_dir: str,
file_index: int):
"""Save results to tfrecord."""
features = {}
features['frame_id'] = _int64_feature([frame_index])
features['groundtruth_label'] = _int64_feature(
groundtruth_label_id.numpy().flatten().tolist())
features['predictions'] = _float_feature(
predictions.numpy().flatten().tolist())
image_string = tf.io.encode_png(
tf.squeeze(tf.cast(input_frame * 255., tf.uint8), axis=[0, 1]))
features['image'] = _bytes_feature(image_string.numpy())
# Input/Output states at time T
for k, v in output_states.items():
dtype = v[0].dtype
if dtype == tf.int32:
features['input/' + k] = _int64_feature(
input_states[k].numpy().flatten().tolist())
features['output/' + k] = _int64_feature(
output_states[k].numpy().flatten().tolist())
elif dtype == tf.float32:
features['input/' + k] = _float_feature(
input_states[k].numpy().flatten().tolist())
features['output/' + k] = _float_feature(
output_states[k].numpy().flatten().tolist())
else:
raise ValueError(f'Unrecongized dtype: {dtype}')
tfe = _build_tf_example(features)
record_file = '{}/movinet_stream_{:06d}.tfrecords'.format(
output_dataset_dir, file_index)
logging.info('Saving to %s.', record_file)
with tf.io.TFRecordWriter(record_file) as writer:
writer.write(tfe)
def get_dataset() -> tf.data.Dataset:
"""Gets dataset source."""
config = video_classification_configs.video_classification_kinetics600()
temporal_stride = FLAGS.temporal_stride
num_frames = FLAGS.num_frames
image_size = FLAGS.image_size
feature_shape = (num_frames, image_size, image_size, 3)
config.task.validation_data.global_batch_size = 1
config.task.validation_data.feature_shape = feature_shape
config.task.validation_data.temporal_stride = temporal_stride
config.task.train_data.min_image_size = int(1.125 * image_size)
config.task.validation_data.dtype = 'float32'
config.task.validation_data.drop_remainder = False
task = video_classification.VideoClassificationTask(config.task)
valid_dataset = task.build_inputs(config.task.validation_data)
valid_dataset = valid_dataset.map(lambda x, y: (x['image'], y))
valid_dataset = valid_dataset.prefetch(32)
return valid_dataset
def stateful_representative_dataset_generator(
model: tf.keras.Model,
dataset_iter: Any,
init_states: Mapping[str, tf.Tensor],
save_dataset_to_tfrecords: bool = False,
max_saved_files: int = 100,
output_dataset_dir: Optional[str] = None,
num_samples_per_video: int = 3,
num_calibration_videos: int = 100):
"""Generates sample input data with states.
Args:
model: the inference keras model.
dataset_iter: the dataset source.
init_states: the initial states for the model.
save_dataset_to_tfrecords: whether to save the representative dataset to
tfrecords on disk.
max_saved_files: the max number of saved tfrecords files.
output_dataset_dir: the directory to store the saved tfrecords.
num_samples_per_video: number of randomly sampled frames per video.
num_calibration_videos: number of calibration videos to run.
Yields:
A dictionary of model inputs.
"""
counter = 0
for i in range(num_calibration_videos):
if i % 100 == 0:
logging.info('Reading representative dateset id %d.', i)
example_input, example_label = next(dataset_iter)
groundtruth_label_id = tf.argmax(example_label, axis=-1)
input_states = init_states
# split video into frames along the temporal dimension.
frames = tf.split(example_input, example_input.shape[1], axis=1)
random_indices = np.random.randint(
low=1, high=len(frames), size=num_samples_per_video)
# always include the first frame
random_indices[0] = 0
random_indices = set(random_indices)
for frame_index, frame in enumerate(frames):
predictions, output_states = model({'image': frame, **input_states})
if frame_index in random_indices:
if save_dataset_to_tfrecords and counter < max_saved_files:
save_to_tfrecord(
input_frame=frame,
input_states=input_states,
frame_index=frame_index,
predictions=predictions,
output_states=output_states,
groundtruth_label_id=groundtruth_label_id,
output_dataset_dir=output_dataset_dir,
file_index=counter)
yield {'image': frame, **input_states}
counter += 1
# update states for the next inference step
input_states = output_states
def get_tflite_converter(
saved_model_dir: str,
quantization_mode: str,
representative_dataset: Optional[Callable[..., Any]] = None
) -> tf.lite.TFLiteConverter:
"""Gets tflite converter."""
converter = tf.lite.TFLiteConverter.from_saved_model(
saved_model_dir=saved_model_dir)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
if quantization_mode == 'float16':
logging.info('Using float16 quantization.')
converter.target_spec.supported_types = [tf.float16]
elif quantization_mode == 'int8':
logging.info('Using full interger quantization.')
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
elif quantization_mode == 'int_float_fallback':
logging.info('Using interger quantization with float-point fallback.')
converter.representative_dataset = representative_dataset
else:
logging.info('Using dynamic range quantization.')
return converter
def quantize_movinet(dataset_fn):
"""Quantizes Movinet."""
valid_dataset = dataset_fn()
dataset_iter = iter(valid_dataset)
# Load model
encoder = hub.KerasLayer(FLAGS.saved_model_with_states_dir, trainable=False)
inputs = tf.keras.layers.Input(
shape=[1, FLAGS.image_size, FLAGS.image_size, 3],
dtype=tf.float32,
name='image')
# Define the state inputs, which is a dict that maps state names to tensors.
init_states_fn = encoder.resolved_object.signatures['init_states']
state_shapes = {
name: ([s if s > 0 else None for s in state.shape], state.dtype)
for name, state in init_states_fn(
tf.constant([1, 1, FLAGS.image_size, FLAGS.image_size, 3])).items()
}
states_input = {
name: tf.keras.Input(shape[1:], dtype=dtype, name=name)
for name, (shape, dtype) in state_shapes.items()
}
# The inputs to the model are the states and the video
inputs = {**states_input, 'image': inputs}
outputs = encoder(inputs)
model = tf.keras.Model(inputs, outputs, name='movinet_stream')
input_shape = tf.constant(
[1, FLAGS.num_frames, FLAGS.image_size, FLAGS.image_size, 3])
init_states = init_states_fn(input_shape)
# config representative_datset_fn
representative_dataset = functools.partial(
stateful_representative_dataset_generator,
model=model,
dataset_iter=dataset_iter,
init_states=init_states,
save_dataset_to_tfrecords=FLAGS.save_dataset_to_tfrecords,
max_saved_files=FLAGS.max_saved_files,
output_dataset_dir=FLAGS.output_dataset_dir,
num_samples_per_video=FLAGS.num_samples_per_video,
num_calibration_videos=FLAGS.num_calibration_videos)
converter = get_tflite_converter(
saved_model_dir=FLAGS.saved_model_dir,
quantization_mode=FLAGS.quantization_mode,
representative_dataset=representative_dataset)
logging.info('Converting...')
tflite_buffer = converter.convert()
return tflite_buffer
def main(_):
tflite_buffer = quantize_movinet(dataset_fn=get_dataset)
with open(FLAGS.output_tflite, 'wb') as f:
f.write(tflite_buffer)
logging.info('tflite model written to %s', FLAGS.output_tflite)
if __name__ == '__main__':
flags.mark_flag_as_required('saved_model_dir')
flags.mark_flag_as_required('saved_model_with_states_dir')
app.run(main)
| 11,994 | 35.129518 | 88 | py |
models | models-master/official/projects/movinet/configs/movinet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definitions for MoViNet structures.
Reference: "MoViNets: Mobile Video Networks for Efficient Video Recognition"
https://arxiv.org/pdf/2103.11511.pdf
MoViNets are efficient video classification networks that are part of a model
family, ranging from the smallest model, MoViNet-A0, to the largest model,
MoViNet-A6. Each model has various width, depth, input resolution, and input
frame-rate associated with them. See the main paper for more details.
"""
import dataclasses
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.vision.configs import backbones_3d
from official.vision.configs import common
from official.vision.configs import video_classification
@dataclasses.dataclass
class Movinet(hyperparams.Config):
"""Backbone config for Base MoViNet."""
model_id: str = 'a0'
causal: bool = False
use_positional_encoding: bool = False
# Choose from ['3d', '2plus1d', '3d_2plus1d']
# 3d: default 3D convolution
# 2plus1d: (2+1)D convolution with Conv2D (2D reshaping)
# 3d_2plus1d: (2+1)D convolution with Conv3D (no 2D reshaping)
conv_type: str = '3d'
# Choose from ['3d', '2d', '2plus3d']
# 3d: default 3D global average pooling.
# 2d: 2D global average pooling.
# 2plus3d: concatenation of 2D and 3D global average pooling.
se_type: str = '3d'
activation: str = 'swish'
gating_activation: str = 'sigmoid'
stochastic_depth_drop_rate: float = 0.2
use_external_states: bool = False
average_pooling_type: str = '3d'
output_states: bool = True
@dataclasses.dataclass
class MovinetA0(Movinet):
"""Backbone config for MoViNet-A0.
Represents the smallest base MoViNet searched by NAS.
Reference: https://arxiv.org/pdf/2103.11511.pdf
"""
model_id: str = 'a0'
@dataclasses.dataclass
class MovinetA1(Movinet):
"""Backbone config for MoViNet-A1."""
model_id: str = 'a1'
@dataclasses.dataclass
class MovinetA2(Movinet):
"""Backbone config for MoViNet-A2."""
model_id: str = 'a2'
@dataclasses.dataclass
class MovinetA3(Movinet):
"""Backbone config for MoViNet-A3."""
model_id: str = 'a3'
@dataclasses.dataclass
class MovinetA4(Movinet):
"""Backbone config for MoViNet-A4."""
model_id: str = 'a4'
@dataclasses.dataclass
class MovinetA5(Movinet):
"""Backbone config for MoViNet-A5.
Represents the largest base MoViNet searched by NAS.
"""
model_id: str = 'a5'
@dataclasses.dataclass
class MovinetT0(Movinet):
"""Backbone config for MoViNet-T0.
MoViNet-T0 is a smaller version of MoViNet-A0 for even faster processing.
"""
model_id: str = 't0'
@dataclasses.dataclass
class Backbone3D(backbones_3d.Backbone3D):
"""Configuration for backbones.
Attributes:
type: 'str', type of backbone be used, on the of fields below.
movinet: movinet backbone config.
"""
type: str = 'movinet'
movinet: Movinet = dataclasses.field(default_factory=Movinet)
@dataclasses.dataclass
class MovinetModel(video_classification.VideoClassificationModel):
"""The MoViNet model config."""
model_type: str = 'movinet'
backbone: Backbone3D = dataclasses.field(default_factory=Backbone3D)
norm_activation: common.NormActivation = dataclasses.field(
default_factory=lambda: common.NormActivation( # pylint: disable=g-long-lambda
activation=None, # legacy flag, not used.
norm_momentum=0.99,
norm_epsilon=1e-3,
use_sync_bn=True,
)
)
activation: str = 'swish'
output_states: bool = False
@exp_factory.register_config_factory('movinet_kinetics600')
def movinet_kinetics600() -> cfg.ExperimentConfig:
"""Video classification on Videonet with MoViNet backbone."""
exp = video_classification.video_classification_kinetics600()
exp.task.train_data.dtype = 'bfloat16'
exp.task.validation_data.dtype = 'bfloat16'
model = MovinetModel()
exp.task.model = model
return exp
| 4,538 | 28.666667 | 85 | py |
models | models-master/official/projects/movinet/configs/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/movinet/configs/movinet_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for movinet video classification."""
from absl.testing import parameterized
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.movinet.configs import movinet
from official.vision.configs import video_classification as exp_cfg
class MovinetConfigTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('movinet_kinetics600',),)
def test_video_classification_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, exp_cfg.VideoClassificationTask)
self.assertIsInstance(config.task.model, movinet.MovinetModel)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.task.train_data.is_training = None
with self.assertRaises(KeyError):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 1,596 | 36.139535 | 74 | py |
models | models-master/official/projects/movinet/modeling/movinet_layers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains common building blocks for MoViNets.
Reference: https://arxiv.org/pdf/2103.11511.pdf
"""
from typing import Any, Mapping, Optional, Sequence, Tuple, Union
import tensorflow as tf
from official.modeling import tf_utils
from official.vision.modeling.layers import nn_layers
# Default kernel weight decay that may be overridden
KERNEL_WEIGHT_DECAY = 1.5e-5
def normalize_tuple(value: Union[int, Tuple[int, ...]], size: int, name: str):
"""Transforms a single integer or iterable of integers into an integer tuple.
Arguments:
value: The value to validate and convert. Could an int, or any iterable of
ints.
size: The size of the tuple to be returned.
name: The name of the argument being validated, e.g. "strides" or
"kernel_size". This is only used to format error messages.
Returns:
A tuple of `size` integers.
Raises:
ValueError: If something else than an int/long or iterable thereof was
passed.
"""
if isinstance(value, int):
return (value,) * size
else:
try:
value_tuple = tuple(value)
except TypeError:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(size) + ' integers. Received: ' + str(value))
if len(value_tuple) != size:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(size) + ' integers. Received: ' + str(value))
for single_value in value_tuple:
try:
int(single_value)
except (ValueError, TypeError):
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(size) + ' integers. Received: ' + str(value) + ' '
'including element ' + str(single_value) + ' of type' +
' ' + str(type(single_value)))
return value_tuple
@tf.keras.utils.register_keras_serializable(package='Vision')
class Squeeze3D(tf.keras.layers.Layer):
"""Squeeze3D layer to remove singular dimensions."""
def call(self, inputs):
"""Calls the layer with the given inputs."""
return tf.squeeze(inputs, axis=(1, 2, 3))
@tf.keras.utils.register_keras_serializable(package='Vision')
class MobileConv2D(tf.keras.layers.Layer):
"""Conv2D layer with extra options to support mobile devices.
Reshapes 5D video tensor inputs to 4D, allowing Conv2D to run across
dimensions (2, 3) or (3, 4). Reshapes tensors back to 5D when returning the
output.
"""
def __init__(
self,
filters: int,
kernel_size: Union[int, Sequence[int]],
strides: Union[int, Sequence[int]] = (1, 1),
padding: str = 'valid',
data_format: Optional[str] = None,
dilation_rate: Union[int, Sequence[int]] = (1, 1),
groups: int = 1,
use_bias: bool = True,
kernel_initializer: str = 'glorot_uniform',
bias_initializer: str = 'zeros',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
activity_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
kernel_constraint: Optional[tf.keras.constraints.Constraint] = None,
bias_constraint: Optional[tf.keras.constraints.Constraint] = None,
use_depthwise: bool = False,
use_temporal: bool = False,
use_buffered_input: bool = False, # pytype: disable=annotation-type-mismatch # typed-keras
batch_norm_op: Optional[Any] = None,
activation_op: Optional[Any] = None,
**kwargs): # pylint: disable=g-doc-args
"""Initializes mobile conv2d.
For the majority of arguments, see tf.keras.layers.Conv2D.
Args:
use_depthwise: if True, use DepthwiseConv2D instead of Conv2D
use_temporal: if True, apply Conv2D starting from the temporal dimension
instead of the spatial dimensions.
use_buffered_input: if True, the input is expected to be padded
beforehand. In effect, calling this layer will use 'valid' padding on
the temporal dimension to simulate 'causal' padding.
batch_norm_op: A callable object of batch norm layer. If None, no batch
norm will be applied after the convolution.
activation_op: A callabel object of activation layer. If None, no
activation will be applied after the convolution.
**kwargs: keyword arguments to be passed to this layer.
Returns:
A output tensor of the MobileConv2D operation.
"""
super(MobileConv2D, self).__init__(**kwargs)
self._filters = filters
self._kernel_size = kernel_size
self._strides = strides
self._padding = padding
self._data_format = data_format
self._dilation_rate = dilation_rate
self._groups = groups
self._use_bias = use_bias
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._activity_regularizer = activity_regularizer
self._kernel_constraint = kernel_constraint
self._bias_constraint = bias_constraint
self._use_depthwise = use_depthwise
self._use_temporal = use_temporal
self._use_buffered_input = use_buffered_input
self._batch_norm_op = batch_norm_op
self._activation_op = activation_op
kernel_size = normalize_tuple(kernel_size, 2, 'kernel_size')
if self._use_temporal and kernel_size[1] > 1:
raise ValueError('Temporal conv with spatial kernel is not supported.')
if use_depthwise:
self._conv = nn_layers.DepthwiseConv2D(
kernel_size=kernel_size,
strides=strides,
padding=padding,
depth_multiplier=1,
data_format=data_format,
dilation_rate=dilation_rate,
use_bias=use_bias,
depthwise_initializer=kernel_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=kernel_constraint,
bias_constraint=bias_constraint,
use_buffered_input=use_buffered_input)
else:
self._conv = nn_layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
use_buffered_input=use_buffered_input)
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'filters': self._filters,
'kernel_size': self._kernel_size,
'strides': self._strides,
'padding': self._padding,
'data_format': self._data_format,
'dilation_rate': self._dilation_rate,
'groups': self._groups,
'use_bias': self._use_bias,
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activity_regularizer': self._activity_regularizer,
'kernel_constraint': self._kernel_constraint,
'bias_constraint': self._bias_constraint,
'use_depthwise': self._use_depthwise,
'use_temporal': self._use_temporal,
'use_buffered_input': self._use_buffered_input,
}
base_config = super(MobileConv2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
"""Calls the layer with the given inputs."""
if self._use_temporal:
input_shape = [
tf.shape(inputs)[0],
tf.shape(inputs)[1],
tf.shape(inputs)[2] * tf.shape(inputs)[3],
inputs.shape[4]]
else:
input_shape = [
tf.shape(inputs)[0] * tf.shape(inputs)[1],
tf.shape(inputs)[2],
tf.shape(inputs)[3],
inputs.shape[4]]
x = tf.reshape(inputs, input_shape)
x = self._conv(x)
if self._batch_norm_op is not None:
x = self._batch_norm_op(x)
if self._activation_op is not None:
x = self._activation_op(x)
if self._use_temporal:
output_shape = [
tf.shape(x)[0],
tf.shape(x)[1],
tf.shape(inputs)[2],
tf.shape(inputs)[3],
x.shape[3]]
else:
output_shape = [
tf.shape(inputs)[0],
tf.shape(inputs)[1],
tf.shape(x)[1],
tf.shape(x)[2],
x.shape[3]]
x = tf.reshape(x, output_shape)
return x
@tf.keras.utils.register_keras_serializable(package='Vision')
class ConvBlock(tf.keras.layers.Layer):
"""A Conv followed by optional BatchNorm and Activation."""
def __init__(
self,
filters: int,
kernel_size: Union[int, Sequence[int]],
strides: Union[int, Sequence[int]] = 1,
depthwise: bool = False,
causal: bool = False,
use_bias: bool = False,
kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] =
tf.keras.regularizers.L2(KERNEL_WEIGHT_DECAY),
use_batch_norm: bool = True,
batch_norm_layer: tf.keras.layers.Layer =
tf.keras.layers.BatchNormalization,
batch_norm_momentum: float = 0.99,
batch_norm_epsilon: float = 1e-3,
use_sync_bn: bool = False,
activation: Optional[Any] = None,
conv_type: str = '3d',
use_buffered_input: bool = False, # pytype: disable=annotation-type-mismatch # typed-keras
**kwargs):
"""Initializes a conv block.
Args:
filters: filters for the conv operation.
kernel_size: kernel size for the conv operation.
strides: strides for the conv operation.
depthwise: if True, use DepthwiseConv2D instead of Conv2D
causal: if True, use causal mode for the conv operation.
use_bias: use bias for the conv operation.
kernel_initializer: kernel initializer for the conv operation.
kernel_regularizer: kernel regularizer for the conv operation.
use_batch_norm: if True, apply batch norm after the conv operation.
batch_norm_layer: class to use for batch norm, if applied.
batch_norm_momentum: momentum of the batch norm operation, if applied.
batch_norm_epsilon: epsilon of the batch norm operation, if applied.
use_sync_bn: if True, use synchronized batch normalization.
activation: activation after the conv and batch norm operations.
conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D
ops. '2plus1d' split any 3D ops into two sequential 2D ops with their
own batch norm and activation. '3d_2plus1d' is like '2plus1d', but
uses two sequential 3D ops instead.
use_buffered_input: if True, the input is expected to be padded
beforehand. In effect, calling this layer will use 'valid' padding on
the temporal dimension to simulate 'causal' padding.
**kwargs: keyword arguments to be passed to this layer.
Returns:
A output tensor of the ConvBlock operation.
"""
super(ConvBlock, self).__init__(**kwargs)
kernel_size = normalize_tuple(kernel_size, 3, 'kernel_size')
strides = normalize_tuple(strides, 3, 'strides')
self._filters = filters
self._kernel_size = kernel_size
self._strides = strides
self._depthwise = depthwise
self._causal = causal
self._use_bias = use_bias
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._use_batch_norm = use_batch_norm
self._batch_norm_layer = batch_norm_layer
self._batch_norm_momentum = batch_norm_momentum
self._batch_norm_epsilon = batch_norm_epsilon
self._use_sync_bn = use_sync_bn
self._activation = activation
self._conv_type = conv_type
self._use_buffered_input = use_buffered_input
if activation is not None:
self._activation_layer = tf_utils.get_activation(
activation, use_keras_layer=True)
else:
self._activation_layer = None
self._groups = None
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'filters': self._filters,
'kernel_size': self._kernel_size,
'strides': self._strides,
'depthwise': self._depthwise,
'causal': self._causal,
'use_bias': self._use_bias,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'use_batch_norm': self._use_batch_norm,
'batch_norm_momentum': self._batch_norm_momentum,
'batch_norm_epsilon': self._batch_norm_epsilon,
'use_sync_bn': self._use_sync_bn,
'activation': self._activation,
'conv_type': self._conv_type,
'use_buffered_input': self._use_buffered_input,
}
base_config = super(ConvBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
"""Builds the layer with the given input shape."""
padding = 'causal' if self._causal else 'same'
self._groups = input_shape[-1] if self._depthwise else 1
self._batch_norm = None
self._batch_norm_temporal = None
if self._use_batch_norm:
self._batch_norm = self._batch_norm_layer(
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon,
synchronized=self._use_sync_bn,
name='bn')
if self._conv_type != '3d' and self._kernel_size[0] > 1:
self._batch_norm_temporal = self._batch_norm_layer(
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon,
synchronized=self._use_sync_bn,
name='bn_temporal')
self._conv_temporal = None
if self._conv_type == '3d_2plus1d' and self._kernel_size[0] > 1:
self._conv = nn_layers.Conv3D(
self._filters,
(1, self._kernel_size[1], self._kernel_size[2]),
strides=(1, self._strides[1], self._strides[2]),
padding='same',
groups=self._groups,
use_bias=self._use_bias,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_buffered_input=False,
name='conv3d')
self._conv_temporal = nn_layers.Conv3D(
self._filters,
(self._kernel_size[0], 1, 1),
strides=(self._strides[0], 1, 1),
padding=padding,
groups=self._groups,
use_bias=self._use_bias,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_buffered_input=self._use_buffered_input,
name='conv3d_temporal')
elif self._conv_type == '2plus1d':
self._conv = MobileConv2D(
self._filters,
(self._kernel_size[1], self._kernel_size[2]),
strides=(self._strides[1], self._strides[2]),
padding='same',
use_depthwise=self._depthwise,
groups=self._groups,
use_bias=self._use_bias,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_buffered_input=False,
batch_norm_op=self._batch_norm,
activation_op=self._activation_layer,
name='conv2d')
if self._kernel_size[0] > 1:
self._conv_temporal = MobileConv2D(
self._filters,
(self._kernel_size[0], 1),
strides=(self._strides[0], 1),
padding=padding,
use_temporal=True,
use_depthwise=self._depthwise,
groups=self._groups,
use_bias=self._use_bias,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_buffered_input=self._use_buffered_input,
batch_norm_op=self._batch_norm_temporal,
activation_op=self._activation_layer,
name='conv2d_temporal')
else:
self._conv = nn_layers.Conv3D(
self._filters,
self._kernel_size,
strides=self._strides,
padding=padding,
groups=self._groups,
use_bias=self._use_bias,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_buffered_input=self._use_buffered_input,
name='conv3d')
super(ConvBlock, self).build(input_shape)
def call(self, inputs):
"""Calls the layer with the given inputs."""
x = inputs
# bn_op and activation_op are folded into the '2plus1d' conv layer so that
# we do not explicitly call them here.
# TODO(lzyuan): clean the conv layers api once the models are re-trained.
x = self._conv(x)
if self._batch_norm is not None and self._conv_type != '2plus1d':
x = self._batch_norm(x)
if self._activation_layer is not None and self._conv_type != '2plus1d':
x = self._activation_layer(x)
if self._conv_temporal is not None:
x = self._conv_temporal(x)
if self._batch_norm_temporal is not None and self._conv_type != '2plus1d':
x = self._batch_norm_temporal(x)
if self._activation_layer is not None and self._conv_type != '2plus1d':
x = self._activation_layer(x)
return x
@tf.keras.utils.register_keras_serializable(package='Vision')
class StreamBuffer(tf.keras.layers.Layer):
"""Stream buffer wrapper which caches activations of previous frames."""
def __init__(self,
buffer_size: int,
state_prefix: Optional[str] = None,
**kwargs):
"""Initializes a stream buffer.
Args:
buffer_size: the number of input frames to cache.
state_prefix: a prefix string to identify states.
**kwargs: keyword arguments to be passed to this layer.
Returns:
A output tensor of the StreamBuffer operation.
"""
super(StreamBuffer, self).__init__(**kwargs)
state_prefix = state_prefix if state_prefix is not None else ''
self._state_prefix = state_prefix
self._state_name = f'{state_prefix}_stream_buffer'
self._buffer_size = buffer_size
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'buffer_size': self._buffer_size,
'state_prefix': self._state_prefix,
}
base_config = super(StreamBuffer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(
self,
inputs: tf.Tensor,
states: Optional[nn_layers.States] = None,
) -> Tuple[Any, nn_layers.States]:
"""Calls the layer with the given inputs.
Args:
inputs: the input tensor.
states: a dict of states such that, if any of the keys match for this
layer, will overwrite the contents of the buffer(s).
Expected keys include `state_prefix + '_stream_buffer'`.
Returns:
the output tensor and states
"""
states = dict(states) if states is not None else {}
buffer = states.get(self._state_name, None)
# Create the buffer if it does not exist in the states.
# Output buffer shape:
# [batch_size, buffer_size, input_height, input_width, num_channels]
if buffer is None:
shape = tf.shape(inputs)
buffer = tf.zeros(
[shape[0], self._buffer_size, shape[2], shape[3], shape[4]],
dtype=inputs.dtype)
# tf.pad has limited support for tf lite, so use tf.concat instead.
full_inputs = tf.concat([buffer, inputs], axis=1)
# Cache the last b frames of the input where b is the buffer size and f
# is the number of input frames. If b > f, then we will cache the last b - f
# frames from the previous buffer concatenated with the current f input
# frames.
new_buffer = full_inputs[:, -self._buffer_size:]
states[self._state_name] = new_buffer
return full_inputs, states
@tf.keras.utils.register_keras_serializable(package='Vision')
class StreamConvBlock(ConvBlock):
"""ConvBlock with StreamBuffer."""
def __init__(
self,
filters: int,
kernel_size: Union[int, Sequence[int]],
strides: Union[int, Sequence[int]] = 1,
depthwise: bool = False,
causal: bool = False,
use_bias: bool = False,
kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = tf.keras
.regularizers.L2(KERNEL_WEIGHT_DECAY),
use_batch_norm: bool = True,
batch_norm_layer: tf.keras.layers.Layer =
tf.keras.layers.BatchNormalization,
batch_norm_momentum: float = 0.99,
batch_norm_epsilon: float = 1e-3,
use_sync_bn: bool = False,
activation: Optional[Any] = None,
conv_type: str = '3d',
state_prefix: Optional[str] = None, # pytype: disable=annotation-type-mismatch # typed-keras
**kwargs):
"""Initializes a stream conv block.
Args:
filters: filters for the conv operation.
kernel_size: kernel size for the conv operation.
strides: strides for the conv operation.
depthwise: if True, use DepthwiseConv2D instead of Conv2D
causal: if True, use causal mode for the conv operation.
use_bias: use bias for the conv operation.
kernel_initializer: kernel initializer for the conv operation.
kernel_regularizer: kernel regularizer for the conv operation.
use_batch_norm: if True, apply batch norm after the conv operation.
batch_norm_layer: class to use for batch norm, if applied.
batch_norm_momentum: momentum of the batch norm operation, if applied.
batch_norm_epsilon: epsilon of the batch norm operation, if applied.
use_sync_bn: if True, use synchronized batch normalization.
activation: activation after the conv and batch norm operations.
conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D
ops. '2plus1d' split any 3D ops into two sequential 2D ops with their
own batch norm and activation. '3d_2plus1d' is like '2plus1d', but
uses two sequential 3D ops instead.
state_prefix: a prefix string to identify states.
**kwargs: keyword arguments to be passed to this layer.
Returns:
A output tensor of the StreamConvBlock operation.
"""
kernel_size = normalize_tuple(kernel_size, 3, 'kernel_size')
buffer_size = kernel_size[0] - 1
use_buffer = buffer_size > 0 and causal
self._state_prefix = state_prefix
super(StreamConvBlock, self).__init__(
filters,
kernel_size,
strides=strides,
depthwise=depthwise,
causal=causal,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
use_batch_norm=use_batch_norm,
batch_norm_layer=batch_norm_layer,
batch_norm_momentum=batch_norm_momentum,
batch_norm_epsilon=batch_norm_epsilon,
use_sync_bn=use_sync_bn,
activation=activation,
conv_type=conv_type,
use_buffered_input=use_buffer,
**kwargs)
self._stream_buffer = None
if use_buffer:
self._stream_buffer = StreamBuffer(
buffer_size=buffer_size, state_prefix=state_prefix)
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {'state_prefix': self._state_prefix}
base_config = super(StreamConvBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self,
inputs: tf.Tensor,
states: Optional[nn_layers.States] = None
) -> Tuple[tf.Tensor, nn_layers.States]:
"""Calls the layer with the given inputs.
Args:
inputs: the input tensor.
states: a dict of states such that, if any of the keys match for this
layer, will overwrite the contents of the buffer(s).
Returns:
the output tensor and states
"""
states = dict(states) if states is not None else {}
x = inputs
# If we have no separate temporal conv, use the buffer before the 3D conv.
if self._conv_temporal is None and self._stream_buffer is not None:
x, states = self._stream_buffer(x, states=states)
# bn_op and activation_op are folded into the '2plus1d' conv layer so that
# we do not explicitly call them here.
# TODO(lzyuan): clean the conv layers api once the models are re-trained.
x = self._conv(x)
if self._batch_norm is not None and self._conv_type != '2plus1d':
x = self._batch_norm(x)
if self._activation_layer is not None and self._conv_type != '2plus1d':
x = self._activation_layer(x)
if self._conv_temporal is not None:
if self._stream_buffer is not None:
# If we have a separate temporal conv, use the buffer before the
# 1D conv instead (otherwise, we may waste computation on the 2D conv).
x, states = self._stream_buffer(x, states=states)
x = self._conv_temporal(x)
if self._batch_norm_temporal is not None and self._conv_type != '2plus1d':
x = self._batch_norm_temporal(x)
if self._activation_layer is not None and self._conv_type != '2plus1d':
x = self._activation_layer(x)
return x, states
@tf.keras.utils.register_keras_serializable(package='Vision')
class StreamSqueezeExcitation(tf.keras.layers.Layer):
"""Squeeze and excitation layer with causal mode.
Reference: https://arxiv.org/pdf/1709.01507.pdf
"""
def __init__(
self,
hidden_filters: int,
se_type: str = '3d',
activation: nn_layers.Activation = 'swish',
gating_activation: nn_layers.Activation = 'sigmoid',
causal: bool = False,
conv_type: str = '3d',
kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = tf.keras
.regularizers.L2(KERNEL_WEIGHT_DECAY),
use_positional_encoding: bool = False,
state_prefix: Optional[str] = None, # pytype: disable=annotation-type-mismatch # typed-keras
**kwargs):
"""Implementation for squeeze and excitation.
Args:
hidden_filters: The hidden filters of squeeze excite.
se_type: '3d', '2d', or '2plus3d'. '3d' uses the default 3D
spatiotemporal global average pooling for squeeze excitation. '2d'
uses 2D spatial global average pooling on each frame. '2plus3d'
concatenates both 3D and 2D global average pooling.
activation: name of the activation function.
gating_activation: name of the activation function for gating.
causal: if True, use causal mode in the global average pool.
conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D
ops. '2plus1d' split any 3D ops into two sequential 2D ops with their
own batch norm and activation. '3d_2plus1d' is like '2plus1d', but
uses two sequential 3D ops instead.
kernel_initializer: kernel initializer for the conv operations.
kernel_regularizer: kernel regularizer for the conv operation.
use_positional_encoding: add a positional encoding after the (cumulative)
global average pooling layer.
state_prefix: a prefix string to identify states.
**kwargs: keyword arguments to be passed to this layer.
"""
super(StreamSqueezeExcitation, self).__init__(**kwargs)
self._hidden_filters = hidden_filters
self._se_type = se_type
self._activation = activation
self._gating_activation = gating_activation
self._causal = causal
self._conv_type = conv_type
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._use_positional_encoding = use_positional_encoding
self._state_prefix = state_prefix
self._spatiotemporal_pool = nn_layers.GlobalAveragePool3D(
keepdims=True, causal=causal, state_prefix=state_prefix)
self._spatial_pool = nn_layers.SpatialAveragePool3D(keepdims=True)
self._pos_encoding = None
if use_positional_encoding:
self._pos_encoding = nn_layers.PositionalEncoding(
initializer='zeros', state_prefix=state_prefix)
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'hidden_filters': self._hidden_filters,
'se_type': self._se_type,
'activation': self._activation,
'gating_activation': self._gating_activation,
'causal': self._causal,
'conv_type': self._conv_type,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'use_positional_encoding': self._use_positional_encoding,
'state_prefix': self._state_prefix,
}
base_config = super(StreamSqueezeExcitation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
"""Builds the layer with the given input shape."""
self._se_reduce = ConvBlock(
filters=self._hidden_filters,
kernel_size=1,
causal=self._causal,
use_bias=True,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_batch_norm=False,
activation=self._activation,
conv_type=self._conv_type,
name='se_reduce')
self._se_expand = ConvBlock(
filters=input_shape[-1],
kernel_size=1,
causal=self._causal,
use_bias=True,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_batch_norm=False,
activation=self._gating_activation,
conv_type=self._conv_type,
name='se_expand')
super(StreamSqueezeExcitation, self).build(input_shape)
def call(self,
inputs: tf.Tensor,
states: Optional[nn_layers.States] = None
) -> Tuple[tf.Tensor, nn_layers.States]:
"""Calls the layer with the given inputs.
Args:
inputs: the input tensor.
states: a dict of states such that, if any of the keys match for this
layer, will overwrite the contents of the buffer(s).
Returns:
the output tensor and states
"""
states = dict(states) if states is not None else {}
if self._se_type == '3d':
x, states = self._spatiotemporal_pool(
inputs, states=states, output_states=True)
elif self._se_type == '2d':
x = self._spatial_pool(inputs)
elif self._se_type == '2plus3d':
x_space = self._spatial_pool(inputs)
x, states = self._spatiotemporal_pool(
x_space, states=states, output_states=True)
if not self._causal:
x = tf.tile(x, [1, tf.shape(inputs)[1], 1, 1, 1])
x = tf.concat([x, x_space], axis=-1)
else:
raise ValueError('Unknown Squeeze Excitation type {}'.format(
self._se_type))
if self._pos_encoding is not None:
x, states = self._pos_encoding(x, states=states)
x = self._se_reduce(x)
x = self._se_expand(x)
return x * inputs, states
@tf.keras.utils.register_keras_serializable(package='Vision')
class MobileBottleneck(tf.keras.layers.Layer):
"""A depthwise inverted bottleneck block.
Uses dependency injection to allow flexible definition of different layers
within this block.
"""
def __init__(self,
expansion_layer: tf.keras.layers.Layer,
feature_layer: tf.keras.layers.Layer,
projection_layer: tf.keras.layers.Layer,
attention_layer: Optional[tf.keras.layers.Layer] = None,
skip_layer: Optional[tf.keras.layers.Layer] = None,
stochastic_depth_drop_rate: Optional[float] = None,
**kwargs):
"""Implementation for mobile bottleneck.
Args:
expansion_layer: initial layer used for pointwise expansion.
feature_layer: main layer used for computing 3D features.
projection_layer: layer used for pointwise projection.
attention_layer: optional layer used for attention-like operations (e.g.,
squeeze excite).
skip_layer: optional skip layer used to project the input before summing
with the output for the residual connection.
stochastic_depth_drop_rate: optional drop rate for stochastic depth.
**kwargs: keyword arguments to be passed to this layer.
"""
super(MobileBottleneck, self).__init__(**kwargs)
self._projection_layer = projection_layer
self._attention_layer = attention_layer
self._skip_layer = skip_layer
self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
self._identity = tf.keras.layers.Activation(tf.identity)
self._rezero = nn_layers.Scale(initializer='zeros', name='rezero')
if stochastic_depth_drop_rate:
self._stochastic_depth = nn_layers.StochasticDepth(
stochastic_depth_drop_rate, name='stochastic_depth')
else:
self._stochastic_depth = None
self._feature_layer = feature_layer
self._expansion_layer = expansion_layer
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,
}
base_config = super(MobileBottleneck, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self,
inputs: tf.Tensor,
states: Optional[nn_layers.States] = None
) -> Tuple[tf.Tensor, nn_layers.States]:
"""Calls the layer with the given inputs.
Args:
inputs: the input tensor.
states: a dict of states such that, if any of the keys match for this
layer, will overwrite the contents of the buffer(s).
Returns:
the output tensor and states
"""
states = dict(states) if states is not None else {}
x = self._expansion_layer(inputs)
x, states = self._feature_layer(x, states=states)
if self._attention_layer is not None:
x, states = self._attention_layer(x, states=states)
x = self._projection_layer(x)
# Add identity so that the ops are ordered as written. This is useful for,
# e.g., quantization.
x = self._identity(x)
x = self._rezero(x)
if self._stochastic_depth is not None:
x = self._stochastic_depth(x)
if self._skip_layer is not None:
skip = self._skip_layer(inputs)
else:
skip = inputs
return x + skip, states
@tf.keras.utils.register_keras_serializable(package='Vision')
class SkipBlock(tf.keras.layers.Layer):
"""Skip block for bottleneck blocks."""
def __init__(
self,
out_filters: int,
downsample: bool = False,
conv_type: str = '3d',
kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] =
tf.keras.regularizers.L2(KERNEL_WEIGHT_DECAY),
batch_norm_layer: tf.keras.layers.Layer =
tf.keras.layers.BatchNormalization,
batch_norm_momentum: float = 0.99,
batch_norm_epsilon: float = 1e-3, # pytype: disable=annotation-type-mismatch # typed-keras
use_sync_bn: bool = False,
**kwargs):
"""Implementation for skip block.
Args:
out_filters: the number of projected output filters.
downsample: if True, downsamples the input by a factor of 2 by applying
average pooling with a 3x3 kernel size on the spatial dimensions.
conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D
ops. '2plus1d' split any 3D ops into two sequential 2D ops with their
own batch norm and activation. '3d_2plus1d' is like '2plus1d', but
uses two sequential 3D ops instead.
kernel_initializer: kernel initializer for the conv operations.
kernel_regularizer: kernel regularizer for the conv projection.
batch_norm_layer: class to use for batch norm.
batch_norm_momentum: momentum of the batch norm operation.
batch_norm_epsilon: epsilon of the batch norm operation.
use_sync_bn: if True, use synchronized batch normalization.
**kwargs: keyword arguments to be passed to this layer.
"""
super(SkipBlock, self).__init__(**kwargs)
self._out_filters = out_filters
self._downsample = downsample
self._conv_type = conv_type
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._batch_norm_layer = batch_norm_layer
self._batch_norm_momentum = batch_norm_momentum
self._batch_norm_epsilon = batch_norm_epsilon
self._use_sync_bn = use_sync_bn
self._projection = ConvBlock(
filters=self._out_filters,
kernel_size=1,
conv_type=conv_type,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
use_batch_norm=True,
batch_norm_layer=self._batch_norm_layer,
batch_norm_momentum=self._batch_norm_momentum,
batch_norm_epsilon=self._batch_norm_epsilon,
use_sync_bn=self._use_sync_bn,
name='skip_project')
if downsample:
if self._conv_type == '2plus1d':
self._pool = tf.keras.layers.AveragePooling2D(
pool_size=(3, 3),
strides=(2, 2),
padding='same',
name='skip_pool')
else:
self._pool = tf.keras.layers.AveragePooling3D(
pool_size=(1, 3, 3),
strides=(1, 2, 2),
padding='same',
name='skip_pool')
else:
self._pool = None
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'out_filters': self._out_filters,
'downsample': self._downsample,
'conv_type': self._conv_type,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'batch_norm_momentum': self._batch_norm_momentum,
'batch_norm_epsilon': self._batch_norm_epsilon,
'use_sync_bn': self._use_sync_bn
}
base_config = super(SkipBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
"""Calls the layer with the given inputs."""
x = inputs
if self._pool is not None:
if self._conv_type == '2plus1d':
x = tf.reshape(x, [-1, tf.shape(x)[2], tf.shape(x)[3], x.shape[4]])
x = self._pool(x)
if self._conv_type == '2plus1d':
x = tf.reshape(
x,
[tf.shape(inputs)[0], -1, tf.shape(x)[1],
tf.shape(x)[2], x.shape[3]])
return self._projection(x)
@tf.keras.utils.register_keras_serializable(package='Vision')
class MovinetBlock(tf.keras.layers.Layer):
"""A basic block for MoViNets.
Applies a mobile inverted bottleneck with pointwise expansion, 3D depthwise
convolution, 3D squeeze excite, pointwise projection, and residual connection.
"""
def __init__(
self,
out_filters: int,
expand_filters: int,
kernel_size: Union[int, Sequence[int]] = (3, 3, 3),
strides: Union[int, Sequence[int]] = (1, 1, 1),
causal: bool = False,
activation: nn_layers.Activation = 'swish',
gating_activation: nn_layers.Activation = 'sigmoid',
se_ratio: float = 0.25,
stochastic_depth_drop_rate: float = 0.,
conv_type: str = '3d',
se_type: str = '3d',
use_positional_encoding: bool = False,
kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = tf.keras
.regularizers.L2(KERNEL_WEIGHT_DECAY),
batch_norm_layer: tf.keras.layers.Layer =
tf.keras.layers.BatchNormalization,
batch_norm_momentum: float = 0.99,
batch_norm_epsilon: float = 1e-3,
use_sync_bn: bool = False,
state_prefix: Optional[str] = None, # pytype: disable=annotation-type-mismatch # typed-keras
**kwargs):
"""Implementation for MoViNet block.
Args:
out_filters: number of output filters for the final projection.
expand_filters: number of expansion filters after the input.
kernel_size: kernel size of the main depthwise convolution.
strides: strides of the main depthwise convolution.
causal: if True, run the temporal convolutions in causal mode.
activation: activation to use across all conv operations.
gating_activation: gating activation to use in squeeze excitation layers.
se_ratio: squeeze excite filters ratio.
stochastic_depth_drop_rate: optional drop rate for stochastic depth.
conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D
ops. '2plus1d' split any 3D ops into two sequential 2D ops with their
own batch norm and activation. '3d_2plus1d' is like '2plus1d', but
uses two sequential 3D ops instead.
se_type: '3d', '2d', or '2plus3d'. '3d' uses the default 3D
spatiotemporal global average pooling for squeeze excitation. '2d'
uses 2D spatial global average pooling on each frame. '2plus3d'
concatenates both 3D and 2D global average pooling.
use_positional_encoding: add a positional encoding after the (cumulative)
global average pooling layer in the squeeze excite layer.
kernel_initializer: kernel initializer for the conv operations.
kernel_regularizer: kernel regularizer for the conv operations.
batch_norm_layer: class to use for batch norm.
batch_norm_momentum: momentum of the batch norm operation.
batch_norm_epsilon: epsilon of the batch norm operation.
use_sync_bn: if True, use synchronized batch normalization.
state_prefix: a prefix string to identify states.
**kwargs: keyword arguments to be passed to this layer.
"""
super(MovinetBlock, self).__init__(**kwargs)
self._kernel_size = normalize_tuple(kernel_size, 3, 'kernel_size')
self._strides = normalize_tuple(strides, 3, 'strides')
# Use a multiplier of 2 if concatenating multiple features
se_multiplier = 2 if se_type == '2plus3d' else 1
se_hidden_filters = nn_layers.make_divisible(
se_ratio * expand_filters * se_multiplier, divisor=8)
self._out_filters = out_filters
self._expand_filters = expand_filters
self._causal = causal
self._activation = activation
self._gating_activation = gating_activation
self._se_ratio = se_ratio
self._downsample = any(s > 1 for s in self._strides)
self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
self._conv_type = conv_type
self._se_type = se_type
self._use_positional_encoding = use_positional_encoding
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._batch_norm_layer = batch_norm_layer
self._batch_norm_momentum = batch_norm_momentum
self._batch_norm_epsilon = batch_norm_epsilon
self._use_sync_bn = use_sync_bn
self._state_prefix = state_prefix
self._expansion = ConvBlock(
expand_filters,
(1, 1, 1),
activation=activation,
conv_type=conv_type,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
use_batch_norm=True,
batch_norm_layer=self._batch_norm_layer,
batch_norm_momentum=self._batch_norm_momentum,
batch_norm_epsilon=self._batch_norm_epsilon,
use_sync_bn=self._use_sync_bn,
name='expansion')
self._feature = StreamConvBlock(
expand_filters,
self._kernel_size,
strides=self._strides,
depthwise=True,
causal=self._causal,
activation=activation,
conv_type=conv_type,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
use_batch_norm=True,
batch_norm_layer=self._batch_norm_layer,
batch_norm_momentum=self._batch_norm_momentum,
batch_norm_epsilon=self._batch_norm_epsilon,
use_sync_bn=self._use_sync_bn,
state_prefix=state_prefix,
name='feature')
self._projection = ConvBlock(
out_filters,
(1, 1, 1),
activation=None,
conv_type=conv_type,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
use_batch_norm=True,
batch_norm_layer=self._batch_norm_layer,
batch_norm_momentum=self._batch_norm_momentum,
batch_norm_epsilon=self._batch_norm_epsilon,
use_sync_bn=self._use_sync_bn,
name='projection')
self._attention = None
if se_type != 'none':
self._attention = StreamSqueezeExcitation(
se_hidden_filters,
se_type=se_type,
activation=activation,
gating_activation=gating_activation,
causal=self._causal,
conv_type=conv_type,
use_positional_encoding=use_positional_encoding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
state_prefix=state_prefix,
name='se')
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'out_filters': self._out_filters,
'expand_filters': self._expand_filters,
'kernel_size': self._kernel_size,
'strides': self._strides,
'causal': self._causal,
'activation': self._activation,
'gating_activation': self._gating_activation,
'se_ratio': self._se_ratio,
'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,
'conv_type': self._conv_type,
'se_type': self._se_type,
'use_positional_encoding': self._use_positional_encoding,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'batch_norm_momentum': self._batch_norm_momentum,
'batch_norm_epsilon': self._batch_norm_epsilon,
'use_sync_bn': self._use_sync_bn,
'state_prefix': self._state_prefix,
}
base_config = super(MovinetBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
"""Builds the layer with the given input shape."""
if input_shape[-1] == self._out_filters and not self._downsample:
self._skip = None
else:
self._skip = SkipBlock(
self._out_filters,
downsample=self._downsample,
conv_type=self._conv_type,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
name='skip')
self._mobile_bottleneck = MobileBottleneck(
self._expansion,
self._feature,
self._projection,
attention_layer=self._attention,
skip_layer=self._skip,
stochastic_depth_drop_rate=self._stochastic_depth_drop_rate,
name='bneck')
super(MovinetBlock, self).build(input_shape)
def call(self,
inputs: tf.Tensor,
states: Optional[nn_layers.States] = None
) -> Tuple[tf.Tensor, nn_layers.States]:
"""Calls the layer with the given inputs.
Args:
inputs: the input tensor.
states: a dict of states such that, if any of the keys match for this
layer, will overwrite the contents of the buffer(s).
Returns:
the output tensor and states
"""
states = dict(states) if states is not None else {}
return self._mobile_bottleneck(inputs, states=states)
@tf.keras.utils.register_keras_serializable(package='Vision')
class Stem(tf.keras.layers.Layer):
"""Stem layer for video networks.
Applies an initial convolution block operation.
"""
def __init__(
self,
out_filters: int,
kernel_size: Union[int, Sequence[int]],
strides: Union[int, Sequence[int]] = (1, 1, 1),
causal: bool = False,
conv_type: str = '3d',
activation: nn_layers.Activation = 'swish',
kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = tf.keras
.regularizers.L2(KERNEL_WEIGHT_DECAY),
batch_norm_layer: tf.keras.layers.Layer =
tf.keras.layers.BatchNormalization,
batch_norm_momentum: float = 0.99,
batch_norm_epsilon: float = 1e-3,
use_sync_bn: bool = False,
state_prefix: Optional[str] = None, # pytype: disable=annotation-type-mismatch # typed-keras
**kwargs):
"""Implementation for video model stem.
Args:
out_filters: number of output filters.
kernel_size: kernel size of the convolution.
strides: strides of the convolution.
causal: if True, run the temporal convolutions in causal mode.
conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D
ops. '2plus1d' split any 3D ops into two sequential 2D ops with their
own batch norm and activation. '3d_2plus1d' is like '2plus1d', but
uses two sequential 3D ops instead.
activation: the input activation name.
kernel_initializer: kernel initializer for the conv operations.
kernel_regularizer: kernel regularizer for the conv operations.
batch_norm_layer: class to use for batch norm.
batch_norm_momentum: momentum of the batch norm operation.
batch_norm_epsilon: epsilon of the batch norm operation.
use_sync_bn: if True, use synchronized batch normalization.
state_prefix: a prefix string to identify states.
**kwargs: keyword arguments to be passed to this layer.
"""
super(Stem, self).__init__(**kwargs)
self._out_filters = out_filters
self._kernel_size = normalize_tuple(kernel_size, 3, 'kernel_size')
self._strides = normalize_tuple(strides, 3, 'strides')
self._causal = causal
self._conv_type = conv_type
self._activation = activation
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._batch_norm_layer = batch_norm_layer
self._batch_norm_momentum = batch_norm_momentum
self._batch_norm_epsilon = batch_norm_epsilon
self._use_sync_bn = use_sync_bn
self._state_prefix = state_prefix
self._stem = StreamConvBlock(
filters=self._out_filters,
kernel_size=self._kernel_size,
strides=self._strides,
causal=self._causal,
activation=self._activation,
conv_type=self._conv_type,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_batch_norm=True,
batch_norm_layer=self._batch_norm_layer,
batch_norm_momentum=self._batch_norm_momentum,
batch_norm_epsilon=self._batch_norm_epsilon,
use_sync_bn=self._use_sync_bn,
state_prefix=self._state_prefix,
name='stem')
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'out_filters': self._out_filters,
'kernel_size': self._kernel_size,
'strides': self._strides,
'causal': self._causal,
'activation': self._activation,
'conv_type': self._conv_type,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'batch_norm_momentum': self._batch_norm_momentum,
'batch_norm_epsilon': self._batch_norm_epsilon,
'use_sync_bn': self._use_sync_bn,
'state_prefix': self._state_prefix,
}
base_config = super(Stem, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self,
inputs: tf.Tensor,
states: Optional[nn_layers.States] = None
) -> Tuple[tf.Tensor, nn_layers.States]:
"""Calls the layer with the given inputs.
Args:
inputs: the input tensor.
states: a dict of states such that, if any of the keys match for this
layer, will overwrite the contents of the buffer(s).
Returns:
the output tensor and states
"""
states = dict(states) if states is not None else {}
return self._stem(inputs, states=states)
@tf.keras.utils.register_keras_serializable(package='Vision')
class Head(tf.keras.layers.Layer):
"""Head layer for video networks.
Applies pointwise projection and global pooling.
"""
def __init__(
self,
project_filters: int,
conv_type: str = '3d',
activation: nn_layers.Activation = 'swish',
kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = tf.keras
.regularizers.L2(KERNEL_WEIGHT_DECAY),
batch_norm_layer: tf.keras.layers.Layer =
tf.keras.layers.BatchNormalization,
batch_norm_momentum: float = 0.99,
batch_norm_epsilon: float = 1e-3,
use_sync_bn: bool = False,
average_pooling_type: str = '3d',
state_prefix: Optional[str] = None, # pytype: disable=annotation-type-mismatch # typed-keras
**kwargs):
"""Implementation for video model head.
Args:
project_filters: number of pointwise projection filters.
conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D
ops. '2plus1d' split any 3D ops into two sequential 2D ops with their
own batch norm and activation. '3d_2plus1d' is like '2plus1d', but
uses two sequential 3D ops instead.
activation: the input activation name.
kernel_initializer: kernel initializer for the conv operations.
kernel_regularizer: kernel regularizer for the conv operations.
batch_norm_layer: class to use for batch norm.
batch_norm_momentum: momentum of the batch norm operation.
batch_norm_epsilon: epsilon of the batch norm operation.
use_sync_bn: if True, use synchronized batch normalization.
average_pooling_type: The average pooling type. Currently supporting
['3d', '2d', 'none'].
state_prefix: a prefix string to identify states.
**kwargs: keyword arguments to be passed to this layer.
"""
super(Head, self).__init__(**kwargs)
self._project_filters = project_filters
self._conv_type = conv_type
self._activation = activation
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._batch_norm_layer = batch_norm_layer
self._batch_norm_momentum = batch_norm_momentum
self._batch_norm_epsilon = batch_norm_epsilon
self._use_sync_bn = use_sync_bn
self._state_prefix = state_prefix
self._project = ConvBlock(
filters=project_filters,
kernel_size=1,
activation=activation,
conv_type=conv_type,
kernel_regularizer=kernel_regularizer,
use_batch_norm=True,
batch_norm_layer=self._batch_norm_layer,
batch_norm_momentum=self._batch_norm_momentum,
batch_norm_epsilon=self._batch_norm_epsilon,
use_sync_bn=self._use_sync_bn,
name='project')
if average_pooling_type.lower() == '3d':
self._pool = nn_layers.GlobalAveragePool3D(
keepdims=True, causal=False, state_prefix=state_prefix)
elif average_pooling_type.lower() == '2d':
self._pool = nn_layers.SpatialAveragePool3D(keepdims=True)
elif average_pooling_type == 'none':
self._pool = None
else:
raise ValueError(
'%s average_pooling_type is not supported.' % average_pooling_type)
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'project_filters': self._project_filters,
'conv_type': self._conv_type,
'activation': self._activation,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'batch_norm_momentum': self._batch_norm_momentum,
'batch_norm_epsilon': self._batch_norm_epsilon,
'use_sync_bn': self._use_sync_bn,
'state_prefix': self._state_prefix,
}
base_config = super(Head, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(
self,
inputs: Union[tf.Tensor, Mapping[str, tf.Tensor]],
states: Optional[nn_layers.States] = None,
) -> Tuple[tf.Tensor, nn_layers.States]:
"""Calls the layer with the given inputs.
Args:
inputs: the input tensor or dict of endpoints.
states: a dict of states such that, if any of the keys match for this
layer, will overwrite the contents of the buffer(s).
Returns:
the output tensor and states
"""
states = dict(states) if states is not None else {}
x = self._project(inputs)
if self._pool is not None:
outputs = self._pool(x, states=states, output_states=True)
else:
outputs = (x, states)
return outputs
@tf.keras.utils.register_keras_serializable(package='Vision')
class ClassifierHead(tf.keras.layers.Layer):
"""Head layer for video networks.
Applies dense projection, dropout, and classifier projection. Expects input
to be pooled vector with shape [batch_size, 1, 1, 1, num_channels]
"""
def __init__(
self,
head_filters: int,
num_classes: int,
dropout_rate: float = 0.,
conv_type: str = '3d',
activation: nn_layers.Activation = 'swish',
output_activation: Optional[nn_layers.Activation] = None,
max_pool_predictions: bool = False,
kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] =
tf.keras.regularizers.L2(KERNEL_WEIGHT_DECAY), # pytype: disable=annotation-type-mismatch # typed-keras
**kwargs):
"""Implementation for video model classifier head.
Args:
head_filters: number of dense head projection filters.
num_classes: number of output classes for the final logits.
dropout_rate: the dropout rate applied to the head projection.
conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D
ops. '2plus1d' split any 3D ops into two sequential 2D ops with their
own batch norm and activation. '3d_2plus1d' is like '2plus1d', but
uses two sequential 3D ops instead.
activation: the input activation name.
output_activation: optional final activation (e.g., 'softmax').
max_pool_predictions: apply temporal softmax pooling to predictions.
Intended for multi-label prediction, where multiple labels are
distributed across the video. Currently only supports single clips.
kernel_initializer: kernel initializer for the conv operations.
kernel_regularizer: kernel regularizer for the conv operations.
**kwargs: keyword arguments to be passed to this layer.
"""
super(ClassifierHead, self).__init__(**kwargs)
self._head_filters = head_filters
self._num_classes = num_classes
self._dropout_rate = dropout_rate
self._conv_type = conv_type
self._activation = activation
self._output_activation = output_activation
self._max_pool_predictions = max_pool_predictions
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._dropout = tf.keras.layers.Dropout(dropout_rate)
self._head = ConvBlock(
filters=head_filters,
kernel_size=1,
activation=activation,
use_bias=True,
use_batch_norm=False,
conv_type=conv_type,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
name='head')
self._classifier = ConvBlock(
filters=num_classes,
kernel_size=1,
kernel_initializer=tf.keras.initializers.random_normal(stddev=0.01),
kernel_regularizer=None,
use_bias=True,
use_batch_norm=False,
conv_type=conv_type,
name='classifier')
self._max_pool = nn_layers.TemporalSoftmaxPool()
self._squeeze = Squeeze3D()
output_activation = output_activation if output_activation else 'linear'
self._cast = tf.keras.layers.Activation(
output_activation, dtype='float32', name='cast')
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'head_filters': self._head_filters,
'num_classes': self._num_classes,
'dropout_rate': self._dropout_rate,
'conv_type': self._conv_type,
'activation': self._activation,
'output_activation': self._output_activation,
'max_pool_predictions': self._max_pool_predictions,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
}
base_config = super(ClassifierHead, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs: tf.Tensor) -> tf.Tensor:
"""Calls the layer with the given inputs."""
# Input Shape: [batch_size, 1, 1, 1, input_channels]
x = inputs
x = self._head(x)
if self._dropout_rate and self._dropout_rate > 0:
x = self._dropout(x)
x = self._classifier(x)
if self._max_pool_predictions:
x = self._max_pool(x)
x = self._squeeze(x)
x = self._cast(x)
return x
| 62,639 | 37.979465 | 111 | py |
models | models-master/official/projects/movinet/modeling/movinet_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build Movinet for video classification.
Reference: https://arxiv.org/pdf/2103.11511.pdf
"""
from typing import Any, Dict, Mapping, Optional, Sequence, Tuple, Union
from absl import logging
import tensorflow as tf
from official.projects.movinet.configs import movinet as cfg
from official.projects.movinet.modeling import movinet_layers
from official.vision.modeling import backbones
from official.vision.modeling import factory_3d as model_factory
@tf.keras.utils.register_keras_serializable(package='Vision')
class MovinetClassifier(tf.keras.Model):
"""A video classification class builder."""
def __init__(
self,
backbone: tf.keras.Model,
num_classes: int,
input_specs: Optional[Mapping[str, tf.keras.layers.InputSpec]] = None,
activation: str = 'swish',
dropout_rate: float = 0.0,
kernel_initializer: str = 'HeNormal',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
output_states: bool = False,
**kwargs):
"""Movinet initialization function.
Args:
backbone: A 3d backbone network.
num_classes: Number of classes in classification task.
input_specs: Specs of the input tensor.
activation: name of the main activation function.
dropout_rate: Rate for dropout regularization.
kernel_initializer: Kernel initializer for the final dense layer.
kernel_regularizer: Kernel regularizer.
bias_regularizer: Bias regularizer.
output_states: if True, output intermediate states that can be used to run
the model in streaming mode. Inputting the output states of the
previous input clip with the current input clip will utilize a stream
buffer for streaming video.
**kwargs: Keyword arguments to be passed.
"""
if not input_specs:
input_specs = {
'image': tf.keras.layers.InputSpec(shape=[None, None, None, None, 3])
}
self._num_classes = num_classes
self._input_specs = input_specs
self._activation = activation
self._dropout_rate = dropout_rate
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._output_states = output_states
state_specs = None
if backbone.use_external_states:
state_specs = backbone.initial_state_specs(
input_shape=input_specs['image'].shape)
inputs, outputs = self._build_network(
backbone, input_specs, state_specs=state_specs)
super(MovinetClassifier, self).__init__(
inputs=inputs, outputs=outputs, **kwargs)
# Move backbone after super() call so Keras is happy
self._backbone = backbone
def _build_backbone(
self,
backbone: tf.keras.Model,
input_specs: Mapping[str, tf.keras.layers.InputSpec],
state_specs: Optional[Mapping[str, tf.keras.layers.InputSpec]] = None,
) -> Tuple[Mapping[str, Any], Any, Any]:
"""Builds the backbone network and gets states and endpoints.
Args:
backbone: the model backbone.
input_specs: the model input spec to use.
state_specs: a dict of states such that, if any of the keys match for a
layer, will overwrite the contents of the buffer(s).
Returns:
inputs: a dict of input specs.
endpoints: a dict of model endpoints.
states: a dict of model states.
"""
state_specs = state_specs if state_specs is not None else {}
states = {
name: tf.keras.Input(shape=spec.shape[1:], dtype=spec.dtype, name=name)
for name, spec in state_specs.items()
}
image = tf.keras.Input(shape=input_specs['image'].shape[1:], name='image')
inputs = {**states, 'image': image}
if backbone.use_external_states:
before_states = states
endpoints, states = backbone(inputs)
after_states = states
new_states = set(after_states) - set(before_states)
if new_states:
raise ValueError(
'Expected input and output states to be the same. Got extra states '
'{}, expected {}'.format(new_states, set(before_states)))
mismatched_shapes = {}
for name in after_states:
before_shape = before_states[name].shape
after_shape = after_states[name].shape
if len(before_shape) != len(after_shape):
mismatched_shapes[name] = (before_shape, after_shape)
continue
for before, after in zip(before_shape, after_shape):
if before is not None and after is not None and before != after:
mismatched_shapes[name] = (before_shape, after_shape)
break
if mismatched_shapes:
raise ValueError(
'Got mismatched input and output state shapes: {}'.format(
mismatched_shapes))
else:
endpoints, states = backbone(inputs)
return inputs, endpoints, states
def _build_network(
self,
backbone: tf.keras.Model,
input_specs: Mapping[str, tf.keras.layers.InputSpec],
state_specs: Optional[Mapping[str, tf.keras.layers.InputSpec]] = None,
) -> Tuple[Mapping[str, tf.keras.Input], Union[Tuple[Mapping[ # pytype: disable=invalid-annotation # typed-keras
str, tf.Tensor], Mapping[str, tf.Tensor]], Mapping[str, tf.Tensor]]]:
"""Builds the model network.
Args:
backbone: the model backbone.
input_specs: the model input spec to use.
state_specs: a dict of states such that, if any of the keys match for a
layer, will overwrite the contents of the buffer(s).
Returns:
Inputs and outputs as a tuple. Inputs are expected to be a dict with
base input and states. Outputs are expected to be a dict of endpoints
and (optionally) output states.
"""
inputs, endpoints, states = self._build_backbone(
backbone=backbone, input_specs=input_specs, state_specs=state_specs)
x = endpoints['head']
x = movinet_layers.ClassifierHead(
head_filters=backbone.head_filters,
num_classes=self._num_classes,
dropout_rate=self._dropout_rate,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
conv_type=backbone.conv_type,
activation=self._activation)(
x)
outputs = (x, states) if self._output_states else x
return inputs, outputs
def initial_state_specs(
self, input_shape: Sequence[int]) -> Dict[str, tf.keras.layers.InputSpec]:
return self._backbone.initial_state_specs(input_shape=input_shape)
@tf.function
def init_states(self, input_shape: Sequence[int]) -> Dict[str, tf.Tensor]:
"""Returns initial states for the first call in steaming mode."""
return self._backbone.init_states(input_shape)
@property
def checkpoint_items(self) -> Dict[str, Any]:
"""Returns a dictionary of items to be additionally checkpointed."""
return dict(backbone=self.backbone)
@property
def backbone(self) -> tf.keras.Model:
"""Returns the backbone of the model."""
return self._backbone
def get_config(self):
config = {
'backbone': self._backbone,
'activation': self._activation,
'num_classes': self._num_classes,
'input_specs': self._input_specs,
'dropout_rate': self._dropout_rate,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'output_states': self._output_states,
}
return config
@classmethod
def from_config(cls, config, custom_objects=None):
# Each InputSpec may need to be deserialized
# This handles the case where we want to load a saved_model loaded with
# `tf.keras.models.load_model`
if config['input_specs']:
for name in config['input_specs']:
if isinstance(config['input_specs'][name], dict):
config['input_specs'][name] = tf.keras.layers.deserialize(
config['input_specs'][name])
return cls(**config)
@model_factory.register_model_builder('movinet')
def build_movinet_model(
input_specs: Mapping[str, tf.keras.layers.InputSpec],
model_config: cfg.MovinetModel,
num_classes: int,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None):
"""Builds movinet model."""
logging.info('Building movinet model with num classes: %s', num_classes)
if l2_regularizer is not None:
logging.info('Building movinet model with regularizer: %s',
l2_regularizer.get_config())
input_specs_dict = {'image': input_specs}
backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=model_config.backbone,
norm_activation_config=model_config.norm_activation,
l2_regularizer=l2_regularizer)
model = MovinetClassifier(
backbone,
num_classes=num_classes,
kernel_regularizer=l2_regularizer,
input_specs=input_specs_dict,
activation=model_config.activation,
dropout_rate=model_config.dropout_rate,
output_states=model_config.output_states)
return model
| 9,800 | 36.551724 | 116 | py |
models | models-master/official/projects/movinet/modeling/movinet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of Mobile Video Networks.
Reference: https://arxiv.org/pdf/2103.11511.pdf
"""
import dataclasses
import math
from typing import Dict, Mapping, Optional, Sequence, Tuple, Union
from absl import logging
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.movinet.modeling import movinet_layers
from official.vision.modeling.backbones import factory
# Defines a set of kernel sizes and stride sizes to simplify and shorten
# architecture definitions for configs below.
KernelSize = Tuple[int, int, int]
# K(ab) represents a 3D kernel of size (a, b, b)
K13: KernelSize = (1, 3, 3)
K15: KernelSize = (1, 5, 5)
K33: KernelSize = (3, 3, 3)
K53: KernelSize = (5, 3, 3)
# S(ab) represents a 3D stride of size (a, b, b)
S11: KernelSize = (1, 1, 1)
S12: KernelSize = (1, 2, 2)
S22: KernelSize = (2, 2, 2)
S21: KernelSize = (2, 1, 1)
# Type for a state container (map)
TensorMap = Mapping[str, tf.Tensor]
@dataclasses.dataclass
class BlockSpec:
"""Configuration of a block."""
@dataclasses.dataclass
class StemSpec(BlockSpec):
"""Configuration of a Movinet block."""
filters: int = 0
kernel_size: KernelSize = (0, 0, 0)
strides: KernelSize = (0, 0, 0)
@dataclasses.dataclass
class MovinetBlockSpec(BlockSpec):
"""Configuration of a Movinet block."""
base_filters: int = 0
expand_filters: Sequence[int] = ()
kernel_sizes: Sequence[KernelSize] = ()
strides: Sequence[KernelSize] = ()
@dataclasses.dataclass
class HeadSpec(BlockSpec):
"""Configuration of a Movinet block."""
project_filters: int = 0
head_filters: int = 0
# Block specs specify the architecture of each model
BLOCK_SPECS = {
'a0': (
StemSpec(filters=8, kernel_size=K13, strides=S12),
MovinetBlockSpec(
base_filters=8,
expand_filters=(24,),
kernel_sizes=(K15,),
strides=(S12,)),
MovinetBlockSpec(
base_filters=32,
expand_filters=(80, 80, 80),
kernel_sizes=(K33, K33, K33),
strides=(S12, S11, S11)),
MovinetBlockSpec(
base_filters=56,
expand_filters=(184, 112, 184),
kernel_sizes=(K53, K33, K33),
strides=(S12, S11, S11)),
MovinetBlockSpec(
base_filters=56,
expand_filters=(184, 184, 184, 184),
kernel_sizes=(K53, K33, K33, K33),
strides=(S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=104,
expand_filters=(384, 280, 280, 344),
kernel_sizes=(K53, K15, K15, K15),
strides=(S12, S11, S11, S11)),
HeadSpec(project_filters=480, head_filters=2048),
),
'a1': (
StemSpec(filters=16, kernel_size=K13, strides=S12),
MovinetBlockSpec(
base_filters=16,
expand_filters=(40, 40),
kernel_sizes=(K15, K33),
strides=(S12, S11)),
MovinetBlockSpec(
base_filters=40,
expand_filters=(96, 120, 96, 96),
kernel_sizes=(K33, K33, K33, K33),
strides=(S12, S11, S11, S11)),
MovinetBlockSpec(
base_filters=64,
expand_filters=(216, 128, 216, 168, 216),
kernel_sizes=(K53, K33, K33, K33, K33),
strides=(S12, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=64,
expand_filters=(216, 216, 216, 128, 128, 216),
kernel_sizes=(K53, K33, K33, K33, K15, K33),
strides=(S11, S11, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=136,
expand_filters=(456, 360, 360, 360, 456, 456, 544),
kernel_sizes=(K53, K15, K15, K15, K15, K33, K13),
strides=(S12, S11, S11, S11, S11, S11, S11)),
HeadSpec(project_filters=600, head_filters=2048),
),
'a2': (
StemSpec(filters=16, kernel_size=K13, strides=S12),
MovinetBlockSpec(
base_filters=16,
expand_filters=(40, 40, 64),
kernel_sizes=(K15, K33, K33),
strides=(S12, S11, S11)),
MovinetBlockSpec(
base_filters=40,
expand_filters=(96, 120, 96, 96, 120),
kernel_sizes=(K33, K33, K33, K33, K33),
strides=(S12, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=72,
expand_filters=(240, 160, 240, 192, 240),
kernel_sizes=(K53, K33, K33, K33, K33),
strides=(S12, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=72,
expand_filters=(240, 240, 240, 240, 144, 240),
kernel_sizes=(K53, K33, K33, K33, K15, K33),
strides=(S11, S11, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=144,
expand_filters=(480, 384, 384, 480, 480, 480, 576),
kernel_sizes=(K53, K15, K15, K15, K15, K33, K13),
strides=(S12, S11, S11, S11, S11, S11, S11)),
HeadSpec(project_filters=640, head_filters=2048),
),
'a3': (
StemSpec(filters=16, kernel_size=K13, strides=S12),
MovinetBlockSpec(
base_filters=16,
expand_filters=(40, 40, 64, 40),
kernel_sizes=(K15, K33, K33, K33),
strides=(S12, S11, S11, S11)),
MovinetBlockSpec(
base_filters=48,
expand_filters=(112, 144, 112, 112, 144, 144),
kernel_sizes=(K33, K33, K33, K15, K33, K33),
strides=(S12, S11, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=80,
expand_filters=(240, 152, 240, 192, 240),
kernel_sizes=(K53, K33, K33, K33, K33),
strides=(S12, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=88,
expand_filters=(264, 264, 264, 264, 160, 264, 264, 264),
kernel_sizes=(K53, K33, K33, K33, K15, K33, K33, K33),
strides=(S11, S11, S11, S11, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=168,
expand_filters=(560, 448, 448, 560, 560, 560, 448, 448, 560, 672),
kernel_sizes=(K53, K15, K15, K15, K15, K33, K15, K15, K33, K13),
strides=(S12, S11, S11, S11, S11, S11, S11, S11, S11, S11)),
HeadSpec(project_filters=744, head_filters=2048),
),
'a4': (
StemSpec(filters=24, kernel_size=K13, strides=S12),
MovinetBlockSpec(
base_filters=24,
expand_filters=(64, 64, 96, 64, 96, 64),
kernel_sizes=(K15, K33, K33, K33, K33, K33),
strides=(S12, S11, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=56,
expand_filters=(168, 168, 136, 136, 168, 168, 168, 136, 136),
kernel_sizes=(K33, K33, K33, K33, K33, K33, K33, K15, K33),
strides=(S12, S11, S11, S11, S11, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=96,
expand_filters=(320, 160, 320, 192, 320, 160, 320, 256, 320),
kernel_sizes=(K53, K33, K33, K33, K33, K33, K33, K33, K33),
strides=(S12, S11, S11, S11, S11, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=96,
expand_filters=(320, 320, 320, 320, 192, 320, 320, 192, 320, 320),
kernel_sizes=(K53, K33, K33, K33, K15, K33, K33, K33, K33, K33),
strides=(S11, S11, S11, S11, S11, S11, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=192,
expand_filters=(640, 512, 512, 640, 640, 640, 512, 512, 640, 768,
640, 640, 768),
kernel_sizes=(K53, K15, K15, K15, K15, K33, K15, K15, K15, K15, K15,
K33, K33),
strides=(S12, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11,
S11)),
HeadSpec(project_filters=856, head_filters=2048),
),
'a5': (
StemSpec(filters=24, kernel_size=K13, strides=S12),
MovinetBlockSpec(
base_filters=24,
expand_filters=(64, 64, 96, 64, 96, 64),
kernel_sizes=(K15, K15, K33, K33, K33, K33),
strides=(S12, S11, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=64,
expand_filters=(192, 152, 152, 152, 192, 192, 192, 152, 152, 192,
192),
kernel_sizes=(K53, K33, K33, K33, K33, K33, K33, K33, K33, K33,
K33),
strides=(S12, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=112,
expand_filters=(376, 224, 376, 376, 296, 376, 224, 376, 376, 296,
376, 376, 376),
kernel_sizes=(K53, K33, K33, K33, K33, K33, K33, K33, K33, K33, K33,
K33, K33),
strides=(S12, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11,
S11)),
MovinetBlockSpec(
base_filters=120,
expand_filters=(376, 376, 376, 376, 224, 376, 376, 224, 376, 376,
376),
kernel_sizes=(K53, K33, K33, K33, K15, K33, K33, K33, K33, K33,
K33),
strides=(S11, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=224,
expand_filters=(744, 744, 600, 600, 744, 744, 744, 896, 600, 600,
896, 744, 744, 896, 600, 600, 744, 744),
kernel_sizes=(K53, K33, K15, K15, K15, K15, K33, K15, K15, K15, K15,
K15, K33, K15, K15, K15, K15, K33),
strides=(S12, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11,
S11, S11, S11, S11, S11, S11)),
HeadSpec(project_filters=992, head_filters=2048),
),
't0': (
StemSpec(filters=8, kernel_size=K13, strides=S12),
MovinetBlockSpec(
base_filters=8,
expand_filters=(16,),
kernel_sizes=(K15,),
strides=(S12,)),
MovinetBlockSpec(
base_filters=32,
expand_filters=(72, 72),
kernel_sizes=(K33, K15),
strides=(S12, S11)),
MovinetBlockSpec(
base_filters=56,
expand_filters=(112, 112, 112),
kernel_sizes=(K53, K15, K33),
strides=(S12, S11, S11)),
MovinetBlockSpec(
base_filters=56,
expand_filters=(184, 184, 184, 184),
kernel_sizes=(K53, K15, K33, K33),
strides=(S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=104,
expand_filters=(344, 344, 344, 344),
kernel_sizes=(K53, K15, K15, K33),
strides=(S12, S11, S11, S11)),
HeadSpec(project_filters=240, head_filters=1024),
),
}
@tf.keras.utils.register_keras_serializable(package='Vision')
class Movinet(tf.keras.Model):
"""Class to build Movinet family model.
Reference: https://arxiv.org/pdf/2103.11511.pdf
"""
def __init__(self,
model_id: str = 'a0',
causal: bool = False,
use_positional_encoding: bool = False,
conv_type: str = '3d',
se_type: str = '3d',
input_specs: Optional[tf.keras.layers.InputSpec] = None,
activation: str = 'swish',
gating_activation: str = 'sigmoid',
use_sync_bn: bool = True,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_initializer: str = 'HeNormal',
kernel_regularizer: Optional[str] = None,
bias_regularizer: Optional[str] = None,
stochastic_depth_drop_rate: float = 0.,
use_external_states: bool = False,
output_states: bool = True,
average_pooling_type: str = '3d',
**kwargs):
"""MoViNet initialization function.
Args:
model_id: name of MoViNet backbone model.
causal: use causal mode, with CausalConv and CausalSE operations.
use_positional_encoding: if True, adds a positional encoding before
temporal convolutions and the cumulative global average pooling
layers.
conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' configures the network
to use the default 3D convolution. '2plus1d' uses (2+1)D convolution
with Conv2D operations and 2D reshaping (e.g., a 5x3x3 kernel becomes
3x3 followed by 5x1 conv). '3d_2plus1d' uses (2+1)D convolution with
Conv3D and no 2D reshaping (e.g., a 5x3x3 kernel becomes 1x3x3 followed
by 5x1x1 conv).
se_type: '3d', '2d', '2plus3d' or 'none'. '3d' uses the default 3D
spatiotemporal global average pooling for squeeze excitation. '2d'
uses 2D spatial global average pooling on each frame. '2plus3d'
concatenates both 3D and 2D global average pooling.
input_specs: the model input spec to use.
activation: name of the main activation function.
gating_activation: gating activation to use in squeeze excitation layers.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: normalization momentum for the moving average.
norm_epsilon: small float added to variance to avoid dividing by
zero.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Defaults to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Defaults to None.
stochastic_depth_drop_rate: the base rate for stochastic depth.
use_external_states: if True, expects states to be passed as additional
input.
output_states: if True, output intermediate states that can be used to run
the model in streaming mode. Inputting the output states of the
previous input clip with the current input clip will utilize a stream
buffer for streaming video.
average_pooling_type: The average pooling type. Currently supporting
['3d', '2d', 'none'].
**kwargs: keyword arguments to be passed.
"""
block_specs = BLOCK_SPECS[model_id]
if input_specs is None:
input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, None, 3])
if conv_type not in ('3d', '2plus1d', '3d_2plus1d'):
raise ValueError('Unknown conv type: {}'.format(conv_type))
if se_type not in ('3d', '2d', '2plus3d', 'none'):
raise ValueError('Unknown squeeze excitation type: {}'.format(se_type))
self._model_id = model_id
self._block_specs = block_specs
self._causal = causal
self._use_positional_encoding = use_positional_encoding
self._conv_type = conv_type
self._se_type = se_type
self._input_specs = input_specs
self._use_sync_bn = use_sync_bn
self._activation = activation
self._gating_activation = gating_activation
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._norm = tf.keras.layers.BatchNormalization
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
self._use_external_states = use_external_states
self._output_states = output_states
self._average_pooling_type = average_pooling_type
if self._use_external_states and not self._causal:
raise ValueError('External states should be used with causal mode.')
if not isinstance(block_specs[0], StemSpec):
raise ValueError(
'Expected first spec to be StemSpec, got {}'.format(block_specs[0]))
if not isinstance(block_specs[-1], HeadSpec):
raise ValueError(
'Expected final spec to be HeadSpec, got {}'.format(block_specs[-1]))
self._head_filters = block_specs[-1].head_filters
state_specs = None
if use_external_states:
self._set_dtype_policy(input_specs.dtype)
state_specs = self.initial_state_specs(input_specs.shape)
inputs, outputs = self._build_network(input_specs, state_specs=state_specs)
super(Movinet, self).__init__(inputs=inputs, outputs=outputs, **kwargs)
self._state_specs = state_specs
def _build_network(
self,
input_specs: tf.keras.layers.InputSpec,
state_specs: Optional[Mapping[str, tf.keras.layers.InputSpec]] = None,
) -> Tuple[TensorMap, Union[TensorMap, Tuple[TensorMap, TensorMap]]]:
"""Builds the model network.
Args:
input_specs: the model input spec to use.
state_specs: a dict mapping a state name to the corresponding state spec.
State names should match with the `state` input/output dict.
Returns:
Inputs and outputs as a tuple. Inputs are expected to be a dict with
base input and states. Outputs are expected to be a dict of endpoints
and (optional) output states.
"""
state_specs = state_specs if state_specs is not None else {}
image_input = tf.keras.Input(shape=input_specs.shape[1:], name='inputs')
states = {
name: tf.keras.Input(shape=spec.shape[1:], dtype=spec.dtype, name=name)
for name, spec in state_specs.items()
}
inputs = {**states, 'image': image_input}
endpoints = {}
x = image_input
num_layers = sum(
len(block.expand_filters)
for block in self._block_specs
if isinstance(block, MovinetBlockSpec))
stochastic_depth_idx = 1
for block_idx, block in enumerate(self._block_specs):
if isinstance(block, StemSpec):
layer_obj = movinet_layers.Stem(
block.filters,
block.kernel_size,
block.strides,
conv_type=self._conv_type,
causal=self._causal,
activation=self._activation,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
batch_norm_layer=self._norm,
batch_norm_momentum=self._norm_momentum,
batch_norm_epsilon=self._norm_epsilon,
use_sync_bn=self._use_sync_bn,
state_prefix='state_stem',
name='stem')
x, states = layer_obj(x, states=states)
endpoints['stem'] = x
elif isinstance(block, MovinetBlockSpec):
if not (len(block.expand_filters) == len(block.kernel_sizes) ==
len(block.strides)):
raise ValueError(
'Lengths of block parameters differ: {}, {}, {}'.format(
len(block.expand_filters),
len(block.kernel_sizes),
len(block.strides)))
params = list(zip(block.expand_filters,
block.kernel_sizes,
block.strides))
for layer_idx, layer in enumerate(params):
stochastic_depth_drop_rate = (
self._stochastic_depth_drop_rate * stochastic_depth_idx /
num_layers)
expand_filters, kernel_size, strides = layer
name = f'block{block_idx-1}_layer{layer_idx}'
layer_obj = movinet_layers.MovinetBlock(
block.base_filters,
expand_filters,
kernel_size=kernel_size,
strides=strides,
causal=self._causal,
activation=self._activation,
gating_activation=self._gating_activation,
stochastic_depth_drop_rate=stochastic_depth_drop_rate,
conv_type=self._conv_type,
se_type=self._se_type,
use_positional_encoding=
self._use_positional_encoding and self._causal,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
batch_norm_layer=self._norm,
batch_norm_momentum=self._norm_momentum,
batch_norm_epsilon=self._norm_epsilon,
use_sync_bn=self._use_sync_bn,
state_prefix=f'state_{name}',
name=name)
x, states = layer_obj(x, states=states)
endpoints[name] = x
stochastic_depth_idx += 1
elif isinstance(block, HeadSpec):
layer_obj = movinet_layers.Head(
project_filters=block.project_filters,
conv_type=self._conv_type,
activation=self._activation,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
batch_norm_layer=self._norm,
batch_norm_momentum=self._norm_momentum,
batch_norm_epsilon=self._norm_epsilon,
use_sync_bn=self._use_sync_bn,
average_pooling_type=self._average_pooling_type,
state_prefix='state_head',
name='head')
x, states = layer_obj(x, states=states)
endpoints['head'] = x
else:
raise ValueError('Unknown block type {}'.format(block))
outputs = (endpoints, states) if self._output_states else endpoints
return inputs, outputs
def _get_initial_state_shapes(
self,
block_specs: Sequence[BlockSpec],
input_shape: Union[Sequence[int], tf.Tensor],
use_positional_encoding: bool = False) -> Dict[str, Sequence[int]]:
"""Generates names and shapes for all input states.
Args:
block_specs: sequence of specs used for creating a model.
input_shape: the expected 5D shape of the image input.
use_positional_encoding: whether the model will use positional encoding.
Returns:
A dict mapping state names to state shapes.
"""
def divide_resolution(shape, num_downsamples):
"""Downsamples the dimension to calculate strided convolution shape."""
if shape is None:
return None
if isinstance(shape, tf.Tensor):
# Avoid using div and ceil to support tf lite
shape = tf.cast(shape, tf.float32)
resolution_divisor = 2 ** num_downsamples
resolution_multiplier = 0.5 ** num_downsamples
shape = ((shape + resolution_divisor - 1) * resolution_multiplier)
return tf.cast(shape, tf.int32)
else:
resolution_divisor = 2 ** num_downsamples
return math.ceil(shape / resolution_divisor)
states = {}
num_downsamples = 0
for block_idx, block in enumerate(block_specs):
if isinstance(block, StemSpec):
if block.kernel_size[0] > 1:
states['state_stem_stream_buffer'] = (
input_shape[0],
input_shape[1],
divide_resolution(input_shape[2], num_downsamples),
divide_resolution(input_shape[3], num_downsamples),
block.filters,
)
num_downsamples += 1
elif isinstance(block, MovinetBlockSpec):
block_idx -= 1
params = list(zip(
block.expand_filters,
block.kernel_sizes,
block.strides))
for layer_idx, layer in enumerate(params):
expand_filters, kernel_size, strides = layer
# If we use a 2D kernel, we apply spatial downsampling
# before the buffer.
if (tuple(strides[1:3]) != (1, 1) and
self._conv_type in ['2plus1d', '3d_2plus1d']):
num_downsamples += 1
prefix = f'state_block{block_idx}_layer{layer_idx}'
if kernel_size[0] > 1:
states[f'{prefix}_stream_buffer'] = (
input_shape[0],
kernel_size[0] - 1,
divide_resolution(input_shape[2], num_downsamples),
divide_resolution(input_shape[3], num_downsamples),
expand_filters,
)
if '3d' in self._se_type:
states[f'{prefix}_pool_buffer'] = (
input_shape[0], 1, 1, 1, expand_filters,
)
states[f'{prefix}_pool_frame_count'] = (1,)
if use_positional_encoding:
name = f'{prefix}_pos_enc_frame_count'
states[name] = (1,)
if strides[1] != strides[2]:
raise ValueError('Strides must match in the spatial dimensions, '
'got {}'.format(strides))
# If we use a 3D kernel, we apply spatial downsampling
# after the buffer.
if (tuple(strides[1:3]) != (1, 1) and
self._conv_type not in ['2plus1d', '3d_2plus1d']):
num_downsamples += 1
elif isinstance(block, HeadSpec):
states['state_head_pool_buffer'] = (
input_shape[0], 1, 1, 1, block.project_filters,
)
states['state_head_pool_frame_count'] = (1,)
return states
def _get_state_dtype(self, name: str) -> str:
"""Returns the dtype associated with a state."""
if 'frame_count' in name:
return 'int32'
return self.dtype
def initial_state_specs(
self, input_shape: Sequence[int]) -> Dict[str, tf.keras.layers.InputSpec]:
"""Creates a mapping of state name to InputSpec from the input shape."""
state_shapes = self._get_initial_state_shapes(
self._block_specs,
input_shape,
use_positional_encoding=self._use_positional_encoding)
return {
name: tf.keras.layers.InputSpec(
shape=shape, dtype=self._get_state_dtype(name))
for name, shape in state_shapes.items()
}
def init_states(self, input_shape: Sequence[int]) -> Dict[str, tf.Tensor]:
"""Returns initial states for the first call in steaming mode."""
state_shapes = self._get_initial_state_shapes(
self._block_specs,
input_shape,
use_positional_encoding=self._use_positional_encoding)
states = {
name: tf.zeros(shape, dtype=self._get_state_dtype(name))
for name, shape in state_shapes.items()
}
return states
@property
def use_external_states(self) -> bool:
"""Whether this model is expecting input states as additional input."""
return self._use_external_states
@property
def head_filters(self):
"""The number of filters expected to be in the head classifer layer."""
return self._head_filters
@property
def conv_type(self):
"""The expected convolution type (see __init__ for more details)."""
return self._conv_type
def get_config(self):
config_dict = {
'model_id': self._model_id,
'causal': self._causal,
'use_positional_encoding': self._use_positional_encoding,
'conv_type': self._conv_type,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,
'use_external_states': self._use_external_states,
'output_states': self._output_states,
}
return config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@factory.register_backbone_builder('movinet')
def build_movinet(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds MoViNet backbone from a config."""
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
if backbone_type != 'movinet':
raise ValueError(f'Inconsistent backbone type {backbone_type}')
if norm_activation_config.activation is not None:
logging.warn('norm_activation is not used in MoViNets, but specified: '
'%s', norm_activation_config.activation)
logging.warn('norm_activation is ignored.')
return Movinet(
model_id=backbone_cfg.model_id,
causal=backbone_cfg.causal,
use_positional_encoding=backbone_cfg.use_positional_encoding,
conv_type=backbone_cfg.conv_type,
se_type=backbone_cfg.se_type,
input_specs=input_specs,
activation=backbone_cfg.activation,
gating_activation=backbone_cfg.gating_activation,
output_states=backbone_cfg.output_states,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer,
stochastic_depth_drop_rate=backbone_cfg.stochastic_depth_drop_rate,
use_external_states=backbone_cfg.use_external_states,
average_pooling_type=backbone_cfg.average_pooling_type)
| 29,503 | 38.816464 | 139 | py |
models | models-master/official/projects/movinet/modeling/movinet_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for movinet_model.py."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.movinet.modeling import movinet
from official.projects.movinet.modeling import movinet_model
class MovinetModelTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(False, True)
def test_movinet_classifier_creation(self, is_training):
"""Test for creation of a Movinet classifier."""
temporal_size = 16
spatial_size = 224
tf.keras.backend.set_image_data_format('channels_last')
input_specs = tf.keras.layers.InputSpec(
shape=[None, temporal_size, spatial_size, spatial_size, 3])
backbone = movinet.Movinet(model_id='a0', input_specs=input_specs)
num_classes = 1000
model = movinet_model.MovinetClassifier(
backbone=backbone,
num_classes=num_classes,
input_specs={'image': input_specs},
dropout_rate=0.2)
inputs = np.random.rand(2, temporal_size, spatial_size, spatial_size, 3)
logits = model(inputs, training=is_training)
self.assertAllEqual([2, num_classes], logits.shape)
def test_movinet_classifier_stream(self):
"""Test if the classifier can be run in streaming mode."""
tf.keras.backend.set_image_data_format('channels_last')
backbone = movinet.Movinet(
model_id='a0',
causal=True,
use_external_states=True,
)
model = movinet_model.MovinetClassifier(
backbone, num_classes=600, output_states=True)
inputs = tf.ones([1, 8, 172, 172, 3])
init_states = model.init_states(tf.shape(inputs))
expected, _ = model({**init_states, 'image': inputs})
frames = tf.split(inputs, inputs.shape[1], axis=1)
states = init_states
for frame in frames:
output, states = model({**states, 'image': frame})
predicted = output
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected, 1e-5, 1e-5)
def test_movinet_classifier_stream_pos_enc(self):
"""Test if the classifier can be run in streaming mode with pos encoding."""
tf.keras.backend.set_image_data_format('channels_last')
backbone = movinet.Movinet(
model_id='a0',
causal=True,
use_external_states=True,
use_positional_encoding=True,
)
model = movinet_model.MovinetClassifier(
backbone, num_classes=600, output_states=True)
inputs = tf.ones([1, 8, 172, 172, 3])
init_states = model.init_states(tf.shape(inputs))
expected, _ = model({**init_states, 'image': inputs})
frames = tf.split(inputs, inputs.shape[1], axis=1)
states = init_states
for frame in frames:
output, states = model({**states, 'image': frame})
predicted = output
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected, 1e-5, 1e-5)
def test_movinet_classifier_stream_pos_enc_2plus1d(self):
"""Test if the model can run in streaming mode with pos encoding, (2+1)D."""
tf.keras.backend.set_image_data_format('channels_last')
backbone = movinet.Movinet(
model_id='a0',
causal=True,
use_external_states=True,
use_positional_encoding=True,
conv_type='2plus1d',
)
model = movinet_model.MovinetClassifier(
backbone, num_classes=600, output_states=True)
inputs = tf.ones([1, 8, 172, 172, 3])
init_states = model.init_states(tf.shape(inputs))
expected, _ = model({**init_states, 'image': inputs})
frames = tf.split(inputs, inputs.shape[1], axis=1)
states = init_states
for frame in frames:
output, states = model({**states, 'image': frame})
predicted = output
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected, 1e-5, 1e-5)
def test_movinet_classifier_mobile(self):
"""Test if the model can run with mobile parameters."""
tf.keras.backend.set_image_data_format('channels_last')
backbone = movinet.Movinet(
model_id='a0',
causal=True,
use_external_states=True,
conv_type='2plus1d',
se_type='2plus3d',
activation='hard_swish',
gating_activation='hard_sigmoid'
)
model = movinet_model.MovinetClassifier(
backbone, num_classes=600, output_states=True)
inputs = tf.ones([1, 8, 172, 172, 3])
init_states = model.init_states(tf.shape(inputs))
expected, _ = model({**init_states, 'image': inputs})
frames = tf.split(inputs, inputs.shape[1], axis=1)
states = init_states
for frame in frames:
output, states = model({**states, 'image': frame})
predicted = output
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected, 1e-5, 1e-5)
def test_serialize_deserialize(self):
"""Validate the classification network can be serialized and deserialized."""
backbone = movinet.Movinet(model_id='a0')
model = movinet_model.MovinetClassifier(backbone=backbone, num_classes=1000)
config = model.get_config()
new_model = movinet_model.MovinetClassifier.from_config(config)
# Validate that the config can be forced to JSON.
new_model.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(model.get_config(), new_model.get_config())
def test_saved_model_save_load(self):
backbone = movinet.Movinet('a0')
model = movinet_model.MovinetClassifier(
backbone, num_classes=600)
model.build([1, 5, 172, 172, 3])
model.compile(metrics=['acc'])
tf.keras.models.save_model(model, '/tmp/movinet/')
loaded_model = tf.keras.models.load_model('/tmp/movinet/')
output = loaded_model(dict(image=tf.ones([1, 1, 1, 1, 3])))
self.assertAllEqual(output.shape, [1, 600])
@parameterized.parameters(
('a0', 3.126071),
('a1', 4.717912),
('a2', 5.280922),
('a3', 7.443289),
('a4', 11.422727),
('a5', 18.763355),
('t0', 1.740502),
)
def test_movinet_models(self, model_id, expected_params_millions):
"""Test creation of MoViNet family models with states."""
tf.keras.backend.set_image_data_format('channels_last')
model = movinet_model.MovinetClassifier(
backbone=movinet.Movinet(
model_id=model_id,
causal=True),
num_classes=600)
model.build([1, 1, 1, 1, 3])
num_params_millions = model.count_params() / 1e6
self.assertEqual(num_params_millions, expected_params_millions)
def test_movinet_a0_2plus1d(self):
"""Test creation of MoViNet with 2plus1d configuration."""
tf.keras.backend.set_image_data_format('channels_last')
model_2plus1d = movinet_model.MovinetClassifier(
backbone=movinet.Movinet(
model_id='a0',
conv_type='2plus1d'),
num_classes=600)
model_2plus1d.build([1, 1, 1, 1, 3])
model_3d_2plus1d = movinet_model.MovinetClassifier(
backbone=movinet.Movinet(
model_id='a0',
conv_type='3d_2plus1d'),
num_classes=600)
model_3d_2plus1d.build([1, 1, 1, 1, 3])
# Ensure both models have the same weights
weights = []
for var_2plus1d, var_3d_2plus1d in zip(
model_2plus1d.get_weights(), model_3d_2plus1d.get_weights()):
if var_2plus1d.shape == var_3d_2plus1d.shape:
weights.append(var_3d_2plus1d)
else:
if var_3d_2plus1d.shape[0] == 1:
weight = var_3d_2plus1d[0]
else:
weight = var_3d_2plus1d[:, 0]
if weight.shape[-1] != var_2plus1d.shape[-1]:
# Transpose any depthwise kernels (conv3d --> depthwise_conv2d)
weight = tf.transpose(weight, perm=(0, 1, 3, 2))
weights.append(weight)
model_2plus1d.set_weights(weights)
inputs = tf.ones([2, 8, 172, 172, 3], dtype=tf.float32)
logits_2plus1d = model_2plus1d(inputs)
logits_3d_2plus1d = model_3d_2plus1d(inputs)
# Ensure both models have the same output, since the weights are the same
self.assertAllEqual(logits_2plus1d.shape, logits_3d_2plus1d.shape)
self.assertAllClose(logits_2plus1d, logits_3d_2plus1d, 1e-5, 1e-5)
if __name__ == '__main__':
tf.test.main()
| 8,838 | 32.481061 | 81 | py |
models | models-master/official/projects/movinet/modeling/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/movinet/modeling/movinet_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for movinet.py."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.movinet.modeling import movinet
class MoViNetTest(parameterized.TestCase, tf.test.TestCase):
def test_network_creation(self):
"""Test creation of MoViNet family models."""
tf.keras.backend.set_image_data_format('channels_last')
network = movinet.Movinet(
model_id='a0',
causal=True,
)
inputs = tf.keras.Input(shape=(8, 128, 128, 3), batch_size=1)
endpoints, states = network(inputs)
self.assertAllEqual(endpoints['stem'].shape, [1, 8, 64, 64, 8])
self.assertAllEqual(endpoints['block0_layer0'].shape, [1, 8, 32, 32, 8])
self.assertAllEqual(endpoints['block1_layer0'].shape, [1, 8, 16, 16, 32])
self.assertAllEqual(endpoints['block2_layer0'].shape, [1, 8, 8, 8, 56])
self.assertAllEqual(endpoints['block3_layer0'].shape, [1, 8, 8, 8, 56])
self.assertAllEqual(endpoints['block4_layer0'].shape, [1, 8, 4, 4, 104])
self.assertAllEqual(endpoints['head'].shape, [1, 1, 1, 1, 480])
self.assertNotEmpty(states)
def test_network_with_states(self):
"""Test creation of MoViNet family models with states."""
tf.keras.backend.set_image_data_format('channels_last')
backbone = movinet.Movinet(
model_id='a0',
causal=True,
use_external_states=True,
)
inputs = tf.ones([1, 8, 128, 128, 3])
init_states = backbone.init_states(tf.shape(inputs))
endpoints, new_states = backbone({**init_states, 'image': inputs})
self.assertAllEqual(endpoints['stem'].shape, [1, 8, 64, 64, 8])
self.assertAllEqual(endpoints['block0_layer0'].shape, [1, 8, 32, 32, 8])
self.assertAllEqual(endpoints['block1_layer0'].shape, [1, 8, 16, 16, 32])
self.assertAllEqual(endpoints['block2_layer0'].shape, [1, 8, 8, 8, 56])
self.assertAllEqual(endpoints['block3_layer0'].shape, [1, 8, 8, 8, 56])
self.assertAllEqual(endpoints['block4_layer0'].shape, [1, 8, 4, 4, 104])
self.assertAllEqual(endpoints['head'].shape, [1, 1, 1, 1, 480])
self.assertNotEmpty(init_states)
self.assertNotEmpty(new_states)
def test_movinet_stream(self):
"""Test if the backbone can be run in streaming mode."""
tf.keras.backend.set_image_data_format('channels_last')
backbone = movinet.Movinet(
model_id='a0',
causal=True,
use_external_states=True,
)
inputs = tf.ones([1, 5, 128, 128, 3])
init_states = backbone.init_states(tf.shape(inputs))
expected_endpoints, _ = backbone({**init_states, 'image': inputs})
frames = tf.split(inputs, inputs.shape[1], axis=1)
states = init_states
for frame in frames:
output, states = backbone({**states, 'image': frame})
predicted_endpoints = output
predicted = predicted_endpoints['head']
# The expected final output is simply the mean across frames
expected = expected_endpoints['head']
expected = tf.reduce_mean(expected, 1, keepdims=True)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected, 1e-5, 1e-5)
def test_movinet_stream_nse(self):
"""Test if the backbone can be run in streaming mode w/o SE layer."""
tf.keras.backend.set_image_data_format('channels_last')
backbone = movinet.Movinet(
model_id='a0',
causal=True,
use_external_states=True,
se_type='none',
)
inputs = tf.ones([1, 5, 128, 128, 3])
init_states = backbone.init_states(tf.shape(inputs))
expected_endpoints, _ = backbone({**init_states, 'image': inputs})
frames = tf.split(inputs, inputs.shape[1], axis=1)
states = init_states
for frame in frames:
output, states = backbone({**states, 'image': frame})
predicted_endpoints = output
predicted = predicted_endpoints['head']
# The expected final output is simply the mean across frames
expected = expected_endpoints['head']
expected = tf.reduce_mean(expected, 1, keepdims=True)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected, 1e-5, 1e-5)
# Check contents in the states dictionary.
state_keys = list(init_states.keys())
self.assertIn('state_head_pool_buffer', state_keys)
self.assertIn('state_head_pool_frame_count', state_keys)
state_keys.remove('state_head_pool_buffer')
state_keys.remove('state_head_pool_frame_count')
# From now on, there are only 'stream_buffer' for the convolutions.
for state_key in state_keys:
self.assertIn(
'stream_buffer', state_key,
msg=f'Expecting stream_buffer only, found {state_key}')
def test_movinet_2plus1d_stream(self):
tf.keras.backend.set_image_data_format('channels_last')
backbone = movinet.Movinet(
model_id='a0',
causal=True,
conv_type='2plus1d',
use_external_states=True,
)
inputs = tf.ones([1, 5, 128, 128, 3])
init_states = backbone.init_states(tf.shape(inputs))
expected_endpoints, _ = backbone({**init_states, 'image': inputs})
frames = tf.split(inputs, inputs.shape[1], axis=1)
states = init_states
for frame in frames:
output, states = backbone({**states, 'image': frame})
predicted_endpoints = output
predicted = predicted_endpoints['head']
# The expected final output is simply the mean across frames
expected = expected_endpoints['head']
expected = tf.reduce_mean(expected, 1, keepdims=True)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected, 1e-5, 1e-5)
def test_movinet_3d_2plus1d_stream(self):
tf.keras.backend.set_image_data_format('channels_last')
backbone = movinet.Movinet(
model_id='a0',
causal=True,
conv_type='3d_2plus1d',
use_external_states=True,
)
inputs = tf.ones([1, 5, 128, 128, 3])
init_states = backbone.init_states(tf.shape(inputs))
expected_endpoints, _ = backbone({**init_states, 'image': inputs})
frames = tf.split(inputs, inputs.shape[1], axis=1)
states = init_states
for frame in frames:
output, states = backbone({**states, 'image': frame})
predicted_endpoints = output
predicted = predicted_endpoints['head']
# The expected final output is simply the mean across frames
expected = expected_endpoints['head']
expected = tf.reduce_mean(expected, 1, keepdims=True)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected, 1e-5, 1e-5)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
model_id='a0',
causal=True,
use_positional_encoding=True,
use_external_states=True,
)
network = movinet.Movinet(**kwargs)
# Create another network object from the first object's config.
new_network = movinet.Movinet.from_config(network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 7,863 | 33.79646 | 79 | py |
models | models-master/official/projects/movinet/modeling/movinet_layers_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for movinet_layers.py."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.movinet.modeling import movinet_layers
from official.vision.modeling.layers import nn_layers
class MovinetLayersTest(parameterized.TestCase, tf.test.TestCase):
def test_squeeze3d(self):
squeeze = movinet_layers.Squeeze3D()
inputs = tf.ones([5, 1, 1, 1, 3])
predicted = squeeze(inputs)
expected = tf.ones([5, 3])
self.assertEqual(predicted.shape, expected.shape)
self.assertAllEqual(predicted, expected)
def test_mobile_conv2d(self):
conv2d = movinet_layers.MobileConv2D(
filters=3,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
kernel_initializer='ones',
use_bias=False,
use_depthwise=False,
use_temporal=False,
use_buffered_input=True,
)
inputs = tf.ones([1, 2, 2, 2, 3])
predicted = conv2d(inputs)
expected = tf.constant(
[[[[[12., 12., 12.],
[12., 12., 12.]],
[[12., 12., 12.],
[12., 12., 12.]]],
[[[12., 12., 12.],
[12., 12., 12.]],
[[12., 12., 12.],
[12., 12., 12.]]]]])
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
def test_mobile_conv2d_bn(self):
batch_norm_op = tf.keras.layers.BatchNormalization(
momentum=0.9,
epsilon=1.,
name='bn')
conv2d = movinet_layers.MobileConv2D(
filters=3,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
kernel_initializer='ones',
use_bias=False,
use_depthwise=False,
use_temporal=False,
use_buffered_input=True,
batch_norm_op=batch_norm_op,
)
inputs = tf.ones([1, 2, 2, 2, 3])
predicted = conv2d(inputs)
expected = tf.constant(
[[[[[8.48528, 8.48528, 8.48528],
[8.48528, 8.48528, 8.48528]],
[[8.48528, 8.48528, 8.48528],
[8.48528, 8.48528, 8.48528]]],
[[[8.48528, 8.48528, 8.48528],
[8.48528, 8.48528, 8.48528]],
[[8.48528, 8.48528, 8.48528],
[8.48528, 8.48528, 8.48528]]]]])
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
def test_mobile_conv2d_activation(self):
conv2d = movinet_layers.MobileConv2D(
filters=3,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
kernel_initializer='ones',
use_bias=False,
use_depthwise=False,
use_temporal=False,
use_buffered_input=True,
activation_op=tf.nn.relu6,
)
inputs = tf.ones([1, 2, 2, 2, 3])
predicted = conv2d(inputs)
expected = tf.constant(
[[[[[6., 6., 6.],
[6., 6., 6.]],
[[6., 6., 6.],
[6., 6., 6.]]],
[[[6., 6., 6.],
[6., 6., 6.]],
[[6., 6., 6.],
[6., 6., 6.]]]]])
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
def test_mobile_conv2d_temporal(self):
conv2d = movinet_layers.MobileConv2D(
filters=3,
kernel_size=(3, 1),
strides=(1, 1),
padding='causal',
kernel_initializer='ones',
use_bias=False,
use_depthwise=True,
use_temporal=True,
use_buffered_input=True,
)
inputs = tf.ones([1, 2, 2, 1, 3])
paddings = [[0, 0], [2, 0], [0, 0], [0, 0], [0, 0]]
padded_inputs = tf.pad(inputs, paddings)
predicted = conv2d(padded_inputs)
expected = tf.constant(
[[[[[1., 1., 1.]],
[[1., 1., 1.]]],
[[[2., 2., 2.]],
[[2., 2., 2.]]]]])
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
def test_stream_buffer(self):
conv3d_stream = nn_layers.Conv3D(
filters=3,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
padding='causal',
kernel_initializer='ones',
use_bias=False,
use_buffered_input=True,
)
buffer = movinet_layers.StreamBuffer(buffer_size=2)
conv3d = nn_layers.Conv3D(
filters=3,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
padding='causal',
kernel_initializer='ones',
use_bias=False,
use_buffered_input=False,
)
inputs = tf.ones([1, 4, 2, 2, 3])
expected = conv3d(inputs)
for num_splits in [1, 2, 4]:
frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1)
states = {}
predicted = []
for frame in frames:
x, states = buffer(frame, states=states)
x = conv3d_stream(x)
predicted.append(x)
predicted = tf.concat(predicted, axis=1)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
self.assertAllClose(
predicted,
[[[[[12., 12., 12.]]],
[[[24., 24., 24.]]],
[[[36., 36., 36.]]],
[[[36., 36., 36.]]]]])
def test_stream_conv_block_2plus1d(self):
conv_block = movinet_layers.ConvBlock(
filters=3,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
causal=True,
kernel_initializer='ones',
use_bias=False,
activation='relu',
conv_type='2plus1d',
)
stream_conv_block = movinet_layers.StreamConvBlock(
filters=3,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
causal=True,
kernel_initializer='ones',
use_bias=False,
activation='relu',
conv_type='2plus1d',
)
inputs = tf.ones([1, 4, 2, 2, 3])
expected = conv_block(inputs)
predicted_disabled, _ = stream_conv_block(inputs)
self.assertEqual(predicted_disabled.shape, expected.shape)
self.assertAllClose(predicted_disabled, expected)
for num_splits in [1, 2, 4]:
frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1)
states = {}
predicted = []
for frame in frames:
x, states = stream_conv_block(frame, states=states)
predicted.append(x)
predicted = tf.concat(predicted, axis=1)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
self.assertAllClose(
predicted,
[[[[[35.9640400, 35.9640400, 35.9640400]]],
[[[71.9280700, 71.9280700, 71.9280700]]],
[[[107.892105, 107.892105, 107.892105]]],
[[[107.892105, 107.892105, 107.892105]]]]])
def test_stream_conv_block_3d_2plus1d(self):
conv_block = movinet_layers.ConvBlock(
filters=3,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
causal=True,
kernel_initializer='ones',
use_bias=False,
activation='relu',
conv_type='3d_2plus1d',
)
stream_conv_block = movinet_layers.StreamConvBlock(
filters=3,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
causal=True,
kernel_initializer='ones',
use_bias=False,
activation='relu',
conv_type='3d_2plus1d',
)
inputs = tf.ones([1, 4, 2, 2, 3])
expected = conv_block(inputs)
predicted_disabled, _ = stream_conv_block(inputs)
self.assertEqual(predicted_disabled.shape, expected.shape)
self.assertAllClose(predicted_disabled, expected)
for num_splits in [1, 2, 4]:
frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1)
states = {}
predicted = []
for frame in frames:
x, states = stream_conv_block(frame, states=states)
predicted.append(x)
predicted = tf.concat(predicted, axis=1)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
self.assertAllClose(
predicted,
[[[[[35.9640400, 35.9640400, 35.9640400]]],
[[[71.9280700, 71.9280700, 71.9280700]]],
[[[107.892105, 107.892105, 107.892105]]],
[[[107.892105, 107.892105, 107.892105]]]]])
def test_stream_conv_block(self):
conv_block = movinet_layers.ConvBlock(
filters=3,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
causal=True,
kernel_initializer='ones',
use_bias=False,
activation='relu',
)
stream_conv_block = movinet_layers.StreamConvBlock(
filters=3,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
causal=True,
kernel_initializer='ones',
use_bias=False,
activation='relu',
)
inputs = tf.ones([1, 4, 2, 2, 3])
expected = conv_block(inputs)
predicted_disabled, _ = stream_conv_block(inputs)
self.assertEqual(predicted_disabled.shape, expected.shape)
self.assertAllClose(predicted_disabled, expected)
for num_splits in [1, 2, 4]:
frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1)
states = {}
predicted = []
for frame in frames:
x, states = stream_conv_block(frame, states=states)
predicted.append(x)
predicted = tf.concat(predicted, axis=1)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
self.assertAllClose(
predicted,
[[[[[11.994005, 11.994005, 11.994005]]],
[[[23.988010, 23.988010, 23.988010]]],
[[[35.982014, 35.982014, 35.982014]]],
[[[35.982014, 35.982014, 35.982014]]]]])
def test_stream_squeeze_excitation(self):
se = movinet_layers.StreamSqueezeExcitation(
3, causal=True, kernel_initializer='ones')
inputs = tf.range(4, dtype=tf.float32) + 1.
inputs = tf.reshape(inputs, [1, 4, 1, 1, 1])
inputs = tf.tile(inputs, [1, 1, 2, 1, 3])
expected, _ = se(inputs)
for num_splits in [1, 2, 4]:
frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1)
states = {}
predicted = []
for frame in frames:
x, states = se(frame, states=states)
predicted.append(x)
predicted = tf.concat(predicted, axis=1)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected, 1e-5, 1e-5)
self.assertAllClose(
predicted,
[[[[[0.9998109, 0.9998109, 0.9998109]],
[[0.9998109, 0.9998109, 0.9998109]]],
[[[1.9999969, 1.9999969, 1.9999969]],
[[1.9999969, 1.9999969, 1.9999969]]],
[[[3., 3., 3.]],
[[3., 3., 3.]]],
[[[4., 4., 4.]],
[[4., 4., 4.]]]]],
1e-5, 1e-5)
def test_stream_squeeze_excitation_2plus3d(self):
se = movinet_layers.StreamSqueezeExcitation(
3,
se_type='2plus3d',
causal=True,
activation='hard_swish',
gating_activation='hard_sigmoid',
kernel_initializer='ones')
inputs = tf.range(4, dtype=tf.float32) + 1.
inputs = tf.reshape(inputs, [1, 4, 1, 1, 1])
inputs = tf.tile(inputs, [1, 1, 2, 1, 3])
expected, _ = se(inputs)
for num_splits in [1, 2, 4]:
frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1)
states = {}
predicted = []
for frame in frames:
x, states = se(frame, states=states)
predicted.append(x)
predicted = tf.concat(predicted, axis=1)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected, atol=1e-4)
self.assertAllClose(
predicted,
[[[[[1., 1., 1.]],
[[1., 1., 1.]]],
[[[2., 2., 2.]],
[[2., 2., 2.]]],
[[[3., 3., 3.]],
[[3., 3., 3.]]],
[[[4., 4., 4.]],
[[4., 4., 4.]]]]],
atol=1e-4)
def test_stream_movinet_block(self):
block = movinet_layers.MovinetBlock(
out_filters=3,
expand_filters=6,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
causal=True,
)
inputs = tf.range(4, dtype=tf.float32) + 1.
inputs = tf.reshape(inputs, [1, 4, 1, 1, 1])
inputs = tf.tile(inputs, [1, 1, 2, 1, 3])
expected, _ = block(inputs)
for num_splits in [1, 2, 4]:
frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1)
states = {}
predicted = []
for frame in frames:
x, states = block(frame, states=states)
predicted.append(x)
predicted = tf.concat(predicted, axis=1)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
def test_stream_movinet_block_none_se(self):
block = movinet_layers.MovinetBlock(
out_filters=3,
expand_filters=6,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
causal=True,
se_type='none',
state_prefix='test',
)
inputs = tf.range(4, dtype=tf.float32) + 1.
inputs = tf.reshape(inputs, [1, 4, 1, 1, 1])
inputs = tf.tile(inputs, [1, 1, 2, 1, 3])
expected, expected_states = block(inputs)
for num_splits in [1, 2, 4]:
frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1)
states = {}
predicted = []
for frame in frames:
x, states = block(frame, states=states)
predicted.append(x)
predicted = tf.concat(predicted, axis=1)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
self.assertAllEqual(list(expected_states.keys()), ['test_stream_buffer'])
def test_stream_classifier_head(self):
head = movinet_layers.Head(project_filters=5)
classifier_head = movinet_layers.ClassifierHead(
head_filters=10, num_classes=4)
inputs = tf.range(4, dtype=tf.float32) + 1.
inputs = tf.reshape(inputs, [1, 4, 1, 1, 1])
inputs = tf.tile(inputs, [1, 1, 2, 1, 3])
x, _ = head(inputs)
expected = classifier_head(x)
for num_splits in [1, 2, 4]:
frames = tf.split(inputs, inputs.shape[1] // num_splits, axis=1)
states = {}
for frame in frames:
x, states = head(frame, states=states)
predicted = classifier_head(x)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
if __name__ == '__main__':
tf.test.main()
| 14,957 | 28.975952 | 77 | py |
models | models-master/official/projects/token_dropping/experiment_configs.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Token dropping BERT experiment configurations.
Only pretraining configs. Token dropping BERT's checkpoints can be used directly
for the regular BERT. So you can just use the regular BERT for finetuning.
"""
# pylint: disable=g-doc-return-or-yield,line-too-long
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import pretrain_dataloader
from official.projects.token_dropping import encoder_config
from official.projects.token_dropping import masked_lm
@exp_factory.register_config_factory('token_drop_bert/pretraining')
def token_drop_bert_pretraining() -> cfg.ExperimentConfig:
"""BERT pretraining with token dropping."""
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(enable_xla=True),
task=masked_lm.TokenDropMaskedLMConfig(
model=bert.PretrainerConfig(
encoder=encoders.EncoderConfig(
any=encoder_config.TokenDropBertEncoderConfig(
vocab_size=30522, num_layers=1, token_keep_k=64),
type='any')),
train_data=pretrain_dataloader.BertPretrainDataConfig(),
validation_data=pretrain_dataloader.BertPretrainDataConfig(
is_training=False)),
trainer=cfg.TrainerConfig(
train_steps=1000000,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
'adamw': {
'weight_decay_rate':
0.01,
'exclude_from_weight_decay':
['LayerNorm', 'layer_norm', 'bias'],
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 1e-4,
'end_learning_rate': 0.0,
}
},
'warmup': {
'type': 'polynomial'
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
| 2,915 | 38.945205 | 80 | py |
models | models-master/official/projects/token_dropping/masked_lm.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Masked language task."""
import dataclasses
from typing import Tuple
import tensorflow as tf
from official.core import task_factory
from official.nlp.tasks import masked_lm
@dataclasses.dataclass
class TokenDropMaskedLMConfig(masked_lm.MaskedLMConfig):
"""The model config."""
pass
@task_factory.register_task_cls(TokenDropMaskedLMConfig)
class TokenDropMaskedLMTask(masked_lm.MaskedLMTask):
"""Task object for Mask language modeling."""
def build_losses(self,
labels,
model_outputs,
metrics,
aux_losses=None) -> Tuple[tf.Tensor, tf.Tensor]:
"""Return the final loss, and the masked-lm loss."""
with tf.name_scope('MaskedLMTask/losses'):
metrics = dict([(metric.name, metric) for metric in metrics])
lm_prediction_losses = tf.keras.losses.sparse_categorical_crossentropy(
labels['masked_lm_ids'],
tf.cast(model_outputs['mlm_logits'], tf.float32),
from_logits=True)
lm_label_weights = labels['masked_lm_weights']
lm_numerator_loss = tf.reduce_sum(lm_prediction_losses *
lm_label_weights)
lm_denominator_loss = tf.reduce_sum(lm_label_weights)
mlm_loss = tf.math.divide_no_nan(lm_numerator_loss, lm_denominator_loss)
metrics['lm_example_loss'].update_state(mlm_loss)
if 'next_sentence_labels' in labels:
sentence_labels = labels['next_sentence_labels']
sentence_outputs = tf.cast(
model_outputs['next_sentence'], dtype=tf.float32)
sentence_loss = tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(
sentence_labels, sentence_outputs, from_logits=True))
metrics['next_sentence_loss'].update_state(sentence_loss)
total_loss = mlm_loss + sentence_loss
else:
total_loss = mlm_loss
if aux_losses:
total_loss += tf.add_n(aux_losses)
return total_loss, lm_prediction_losses
def train_step(self, inputs, model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer, metrics):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
with tf.GradientTape() as tape:
outputs = model(inputs, training=True)
# Computes per-replica loss.
loss, lm_prediction_losses = self.build_losses(
labels=inputs,
model_outputs=outputs,
metrics=metrics,
aux_losses=model.losses)
model.encoder_network.record_mlm_loss(
mlm_ids=inputs['masked_lm_ids'],
mlm_losses=lm_prediction_losses)
if self.task_config.scale_loss:
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / tf.distribute.get_strategy().num_replicas_in_sync
tvars = model.trainable_variables
if self.task_config.scale_loss:
grads = tape.gradient(scaled_loss, tvars)
else:
grads = tape.gradient(loss, tvars)
optimizer.apply_gradients(list(zip(grads, tvars)))
self.process_metrics(metrics, inputs, outputs)
return {self.loss: loss}
def validation_step(self, inputs, model: tf.keras.Model, metrics):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
outputs = self.inference_step(inputs, model)
loss, _ = self.build_losses(
labels=inputs,
model_outputs=outputs,
metrics=metrics,
aux_losses=model.losses)
self.process_metrics(metrics, inputs, outputs)
return {self.loss: loss}
| 4,551 | 35.416 | 80 | py |
models | models-master/official/projects/token_dropping/encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer-based BERT encoder network."""
# pylint: disable=g-classes-have-attributes
from typing import Any, Callable, Optional, Union, Tuple
from absl import logging
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling import layers
_Initializer = Union[str, tf.keras.initializers.Initializer]
_Activation = Union[str, Callable[..., Any]]
_approx_gelu = lambda x: tf.keras.activations.gelu(x, approximate=True)
class TokenDropBertEncoder(tf.keras.layers.Layer):
"""Bi-directional Transformer-based encoder network with token dropping.
During pretraining, we drop unimportant tokens starting from an intermediate
layer in the model, to make the model focus on important tokens more
efficiently with its limited computational resources. The dropped tokens are
later picked up by the last layer of the model, so that the model still
produces full-length sequences. This approach reduces the pretraining cost of
BERT by 25% while achieving better overall fine-tuning performance on standard
downstream tasks.
Args:
vocab_size: The size of the token vocabulary.
hidden_size: The size of the transformer hidden layers.
num_layers: The number of transformer layers.
num_attention_heads: The number of attention heads for each transformer. The
hidden size must be divisible by the number of attention heads.
max_sequence_length: The maximum sequence length that this encoder can
consume. If None, max_sequence_length uses the value from sequence length.
This determines the variable shape for positional embeddings.
type_vocab_size: The number of types that the 'type_ids' input can take.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network for each transformer.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network for each transformer.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: The dropout rate to use for the attention layers within
the transformer layers.
token_loss_init_value: The default loss value of a token, when the token is
never masked and predicted.
token_loss_beta: How running average factor for computing the average loss
value of a token.
token_keep_k: The number of tokens you want to keep in the intermediate
layers. The rest will be dropped in those layers.
token_allow_list: The list of token-ids that should not be droped. In the
BERT English vocab, token-id from 1 to 998 contains special tokens such as
[CLS], [SEP]. By default, token_allow_list contains all of these special
tokens.
token_deny_list: The list of token-ids that should always be droped. In the
BERT English vocab, token-id=0 means [PAD]. By default, token_deny_list
contains and only contains [PAD].
initializer: The initialzer to use for all weights in this encoder.
output_range: The sequence output range, [0, output_range), by slicing the
target sequence of the last transformer layer. `None` means the entire
target sequence will attend to the source sequence, which yields the full
output.
embedding_width: The width of the word embeddings. If the embedding width is
not equal to hidden size, embedding parameters will be factorized into two
matrices in the shape of ['vocab_size', 'embedding_width'] and
['embedding_width', 'hidden_size'] ('embedding_width' is usually much
smaller than 'hidden_size').
embedding_layer: An optional Layer instance which will be called to generate
embeddings for the input word IDs.
norm_first: Whether to normalize inputs to attention and intermediate dense
layers. If set False, output of attention and intermediate dense layers is
normalized.
with_dense_inputs: Whether to accept dense embeddings as the input.
"""
def __init__(
self,
vocab_size: int,
hidden_size: int = 768,
num_layers: int = 12,
num_attention_heads: int = 12,
max_sequence_length: int = 512,
type_vocab_size: int = 16,
inner_dim: int = 3072,
inner_activation: _Activation = _approx_gelu,
output_dropout: float = 0.1,
attention_dropout: float = 0.1,
token_loss_init_value: float = 10.0,
token_loss_beta: float = 0.995,
token_keep_k: int = 256,
token_allow_list: Tuple[int, ...] = (100, 101, 102, 103),
token_deny_list: Tuple[int, ...] = (0,),
initializer: _Initializer = tf.keras.initializers.TruncatedNormal(
stddev=0.02),
output_range: Optional[int] = None,
embedding_width: Optional[int] = None,
embedding_layer: Optional[tf.keras.layers.Layer] = None,
norm_first: bool = False,
with_dense_inputs: bool = False,
**kwargs):
# Pops kwargs that are used in V1 implementation.
if 'dict_outputs' in kwargs:
kwargs.pop('dict_outputs')
if 'return_all_encoder_outputs' in kwargs:
kwargs.pop('return_all_encoder_outputs')
if 'intermediate_size' in kwargs:
inner_dim = kwargs.pop('intermediate_size')
if 'activation' in kwargs:
inner_activation = kwargs.pop('activation')
if 'dropout_rate' in kwargs:
output_dropout = kwargs.pop('dropout_rate')
if 'attention_dropout_rate' in kwargs:
attention_dropout = kwargs.pop('attention_dropout_rate')
super().__init__(**kwargs)
if output_range is not None:
logging.warning('`output_range` is available as an argument for `call()`.'
'The `output_range` as __init__ argument is deprecated.')
activation = tf.keras.activations.get(inner_activation)
initializer = tf.keras.initializers.get(initializer)
if embedding_width is None:
embedding_width = hidden_size
if embedding_layer is None:
self._embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=tf_utils.clone_initializer(initializer),
name='word_embeddings')
else:
self._embedding_layer = embedding_layer
self._position_embedding_layer = layers.PositionEmbedding(
initializer=tf_utils.clone_initializer(initializer),
max_length=max_sequence_length,
name='position_embedding')
self._type_embedding_layer = layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=tf_utils.clone_initializer(initializer),
use_one_hot=True,
name='type_embeddings')
self._embedding_norm_layer = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)
self._embedding_dropout = tf.keras.layers.Dropout(
rate=output_dropout, name='embedding_dropout')
# We project the 'embedding' output to 'hidden_size' if it is not already
# 'hidden_size'.
self._embedding_projection = None
if embedding_width != hidden_size:
self._embedding_projection = tf.keras.layers.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
kernel_initializer=tf_utils.clone_initializer(initializer),
name='embedding_projection')
# The first 999 tokens are special tokens such as [PAD], [CLS], [SEP].
# We want to always mask [PAD], and always not to maks [CLS], [SEP].
init_importance = tf.constant(token_loss_init_value, shape=(vocab_size))
if token_allow_list:
init_importance = tf.tensor_scatter_nd_update(
tensor=init_importance,
indices=[[x] for x in token_allow_list],
updates=[1.0e4 for x in token_allow_list])
if token_deny_list:
init_importance = tf.tensor_scatter_nd_update(
tensor=init_importance,
indices=[[x] for x in token_deny_list],
updates=[-1.0e4 for x in token_deny_list])
self._token_importance_embed = layers.TokenImportanceWithMovingAvg(
vocab_size=vocab_size,
init_importance=init_importance,
moving_average_beta=token_loss_beta)
self._token_separator = layers.SelectTopK(top_k=token_keep_k)
self._transformer_layers = []
self._num_layers = num_layers
self._attention_mask_layer = layers.SelfAttentionMask(
name='self_attention_mask')
for i in range(num_layers):
layer = layers.TransformerEncoderBlock(
num_attention_heads=num_attention_heads,
inner_dim=inner_dim,
inner_activation=inner_activation,
output_dropout=output_dropout,
attention_dropout=attention_dropout,
norm_first=norm_first,
kernel_initializer=tf_utils.clone_initializer(initializer),
name='transformer/layer_%d' % i)
self._transformer_layers.append(layer)
self._pooler_layer = tf.keras.layers.Dense(
units=hidden_size,
activation='tanh',
kernel_initializer=tf_utils.clone_initializer(initializer),
name='pooler_transform')
self._config = {
'vocab_size': vocab_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'num_attention_heads': num_attention_heads,
'max_sequence_length': max_sequence_length,
'type_vocab_size': type_vocab_size,
'inner_dim': inner_dim,
'inner_activation': tf.keras.activations.serialize(activation),
'output_dropout': output_dropout,
'attention_dropout': attention_dropout,
'token_loss_init_value': token_loss_init_value,
'token_loss_beta': token_loss_beta,
'token_keep_k': token_keep_k,
'token_allow_list': token_allow_list,
'token_deny_list': token_deny_list,
'initializer': tf.keras.initializers.serialize(initializer),
'output_range': output_range,
'embedding_width': embedding_width,
'embedding_layer': embedding_layer,
'norm_first': norm_first,
'with_dense_inputs': with_dense_inputs,
}
if with_dense_inputs:
self.inputs = dict(
input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
dense_inputs=tf.keras.Input(
shape=(None, embedding_width), dtype=tf.float32),
dense_mask=tf.keras.Input(shape=(None,), dtype=tf.int32),
dense_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
)
else:
self.inputs = dict(
input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32))
def call(self, inputs, output_range: Optional[tf.Tensor] = None):
if isinstance(inputs, dict):
word_ids = inputs.get('input_word_ids')
mask = inputs.get('input_mask')
type_ids = inputs.get('input_type_ids')
dense_inputs = inputs.get('dense_inputs', None)
dense_mask = inputs.get('dense_mask', None)
dense_type_ids = inputs.get('dense_type_ids', None)
else:
raise ValueError('Unexpected inputs type to %s.' % self.__class__)
word_embeddings = self._embedding_layer(word_ids)
if dense_inputs is not None:
# Concat the dense embeddings at sequence end.
word_embeddings = tf.concat([word_embeddings, dense_inputs], axis=1)
type_ids = tf.concat([type_ids, dense_type_ids], axis=1)
mask = tf.concat([mask, dense_mask], axis=1)
# absolute position embeddings.
position_embeddings = self._position_embedding_layer(word_embeddings)
type_embeddings = self._type_embedding_layer(type_ids)
embeddings = word_embeddings + position_embeddings + type_embeddings
embeddings = self._embedding_norm_layer(embeddings)
embeddings = self._embedding_dropout(embeddings)
if self._embedding_projection is not None:
embeddings = self._embedding_projection(embeddings)
attention_mask = self._attention_mask_layer(embeddings, mask)
encoder_outputs = []
x = embeddings
# Get token routing.
token_importance = self._token_importance_embed(word_ids)
selected, not_selected = self._token_separator(token_importance)
# For a 12-layer BERT:
# 1. All tokens fist go though 5 transformer layers, then
# 2. Only important tokens go through 1 transformer layer with cross
# attention to unimportant tokens, then
# 3. Only important tokens go through 5 transformer layers without cross
# attention.
# 4. Finally, all tokens go through the last layer.
# Step 1.
for i, layer in enumerate(self._transformer_layers[:self._num_layers // 2 -
1]):
x = layer([x, attention_mask],
output_range=output_range if i == self._num_layers -
1 else None)
encoder_outputs.append(x)
# Step 2.
# First, separate important and non-important tokens.
x_selected = tf.gather(x, selected, batch_dims=1, axis=1)
mask_selected = tf.gather(mask, selected, batch_dims=1, axis=1)
attention_mask_token_drop = self._attention_mask_layer(
x_selected, mask_selected)
x_not_selected = tf.gather(x, not_selected, batch_dims=1, axis=1)
mask_not_selected = tf.gather(mask, not_selected, batch_dims=1, axis=1)
attention_mask_token_pass = self._attention_mask_layer(
x_selected, tf.concat([mask_selected, mask_not_selected], axis=1))
x_all = tf.concat([x_selected, x_not_selected], axis=1)
# Then, call transformer layer with cross attention.
x_selected = self._transformer_layers[self._num_layers // 2 - 1](
[x_selected, x_all, attention_mask_token_pass],
output_range=output_range if self._num_layers // 2 -
1 == self._num_layers - 1 else None)
encoder_outputs.append(x_selected)
# Step 3.
for i, layer in enumerate(self._transformer_layers[self._num_layers //
2:-1]):
x_selected = layer([x_selected, attention_mask_token_drop],
output_range=output_range if i == self._num_layers - 1
else None)
encoder_outputs.append(x_selected)
# Step 4.
# First, merge important and non-important tokens.
x_not_selected = tf.cast(x_not_selected, dtype=x_selected.dtype)
x = tf.concat([x_selected, x_not_selected], axis=1)
indices = tf.concat([selected, not_selected], axis=1)
reverse_indices = tf.argsort(indices)
x = tf.gather(x, reverse_indices, batch_dims=1, axis=1)
# Then, call transformer layer with all tokens.
x = self._transformer_layers[-1]([x, attention_mask],
output_range=output_range)
encoder_outputs.append(x)
last_encoder_output = encoder_outputs[-1]
first_token_tensor = last_encoder_output[:, 0, :]
pooled_output = self._pooler_layer(first_token_tensor)
return dict(
sequence_output=encoder_outputs[-1],
pooled_output=pooled_output,
encoder_outputs=encoder_outputs)
def record_mlm_loss(self, mlm_ids: tf.Tensor, mlm_losses: tf.Tensor):
self._token_importance_embed.update_token_importance(
token_ids=mlm_ids, importance=mlm_losses)
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_embedding_layer(self):
return self._embedding_layer
def get_config(self):
return dict(self._config)
@property
def transformer_layers(self):
"""List of Transformer layers in the encoder."""
return self._transformer_layers
@property
def pooler_layer(self):
"""The pooler dense layer after the transformer layers."""
return self._pooler_layer
@classmethod
def from_config(cls, config, custom_objects=None):
if 'embedding_layer' in config and config['embedding_layer'] is not None:
warn_string = (
'You are reloading a model that was saved with a '
'potentially-shared embedding layer object. If you contine to '
'train this model, the embedding layer will no longer be shared. '
'To work around this, load the model outside of the Keras API.')
print('WARNING: ' + warn_string)
logging.warn(warn_string)
return cls(**config)
| 17,125 | 41.708229 | 80 | py |
models | models-master/official/projects/token_dropping/encoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for transformer-based bert encoder network."""
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.modeling.networks import bert_encoder
from official.projects.token_dropping import encoder
class TokenDropBertEncoderTest(tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(TokenDropBertEncoderTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy("float32")
def test_dict_outputs_network_creation(self):
hidden_size = 32
sequence_length = 21
# Create a small BertEncoder for testing.
test_network = encoder.TokenDropBertEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
token_keep_k=2,
token_allow_list=(),
token_deny_list=())
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
self.assertIsInstance(test_network.transformer_layers, list)
self.assertLen(test_network.transformer_layers, 3)
self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense)
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
def test_dict_outputs_all_encoder_outputs_network_creation(self):
hidden_size = 32
sequence_length = 21
# Create a small BertEncoder for testing.
test_network = encoder.TokenDropBertEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
dict_outputs=True,
token_keep_k=sequence_length,
token_allow_list=(),
token_deny_list=())
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
all_encoder_outputs = dict_outputs["encoder_outputs"]
pooled = dict_outputs["pooled_output"]
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertLen(all_encoder_outputs, 3)
for data in all_encoder_outputs:
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
def test_dict_outputs_network_creation_with_float16_dtype(self):
hidden_size = 32
sequence_length = 21
tf.keras.mixed_precision.set_global_policy("mixed_float16")
# Create a small BertEncoder for testing.
test_network = encoder.TokenDropBertEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=4,
dict_outputs=True,
token_keep_k=2,
token_allow_list=(),
token_deny_list=())
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# If float_dtype is set to float16, the data output is float32 (from a layer
# norm) and pool output should be float16.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float16, pooled.dtype)
@parameterized.named_parameters(
("all_sequence_encoder", None, 21),
("output_range_encoder", 1, 1),
)
def test_dict_outputs_network_invocation(
self, output_range, out_seq_len):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
# Create a small BertEncoder for testing.
test_network = encoder.TokenDropBertEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
dict_outputs=True,
token_keep_k=2,
token_allow_list=(),
token_deny_list=())
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids),
output_range=output_range)
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
# Create a model based off of this network:
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[1], out_seq_len)
# Creates a BertEncoder with max_sequence_length != sequence_length
max_sequence_length = 128
test_network = encoder.TokenDropBertEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
dict_outputs=True,
token_keep_k=2,
token_allow_list=(),
token_deny_list=())
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[1], sequence_length)
# Creates a BertEncoder with embedding_width != hidden_size
test_network = encoder.TokenDropBertEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
embedding_width=16,
dict_outputs=True,
token_keep_k=2,
token_allow_list=(),
token_deny_list=())
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[-1], hidden_size)
self.assertTrue(hasattr(test_network, "_embedding_projection"))
def test_network_creation(self):
hidden_size = 32
sequence_length = 21
# Create a small BertEncoder for testing.
test_network = encoder.TokenDropBertEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
token_keep_k=2,
token_allow_list=(),
token_deny_list=())
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
self.assertIsInstance(test_network.transformer_layers, list)
self.assertLen(test_network.transformer_layers, 3)
self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense)
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
test_network = encoder.TokenDropBertEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
token_keep_k=2,
token_allow_list=(),
token_deny_list=())
# Create the inputs (note that the first dimension is implicit).
inputs = dict(
input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids)
_ = test_network(inputs)
def test_all_encoder_outputs_network_creation(self):
hidden_size = 32
sequence_length = 21
# Create a small BertEncoder for testing.
test_network = encoder.TokenDropBertEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
return_all_encoder_outputs=True,
token_keep_k=sequence_length,
token_allow_list=(),
token_deny_list=())
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
all_encoder_outputs = dict_outputs["encoder_outputs"]
pooled = dict_outputs["pooled_output"]
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertLen(all_encoder_outputs, 3)
for data in all_encoder_outputs:
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, all_encoder_outputs[-1].dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
def test_network_creation_with_float16_dtype(self):
hidden_size = 32
sequence_length = 21
tf.keras.mixed_precision.set_global_policy("mixed_float16")
# Create a small BertEncoder for testing.
test_network = encoder.TokenDropBertEncoder(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=4,
token_keep_k=2,
token_allow_list=(),
token_deny_list=())
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# If float_dtype is set to float16, the data output is float32 (from a layer
# norm) and pool output should be float16.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float16, pooled.dtype)
@parameterized.named_parameters(
("all_sequence", None, 21),
("output_range", 1, 1),
)
def test_network_invocation(self, output_range, out_seq_len):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
# Create a small BertEncoder for testing.
test_network = encoder.TokenDropBertEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
token_keep_k=2,
token_allow_list=(),
token_deny_list=())
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids),
output_range=output_range)
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
# Create a model based off of this network:
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[1], out_seq_len)
# Creates a BertEncoder with max_sequence_length != sequence_length
max_sequence_length = 128
test_network = encoder.TokenDropBertEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
token_keep_k=2,
token_allow_list=(),
token_deny_list=())
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[1], sequence_length)
# Creates a BertEncoder with embedding_width != hidden_size
test_network = encoder.TokenDropBertEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
embedding_width=16,
token_keep_k=2,
token_allow_list=(),
token_deny_list=())
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
data = dict_outputs["sequence_output"]
pooled = dict_outputs["pooled_output"]
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[-1], hidden_size)
self.assertTrue(hasattr(test_network, "_embedding_projection"))
class TokenDropCompatibilityTest(tf.test.TestCase):
def tearDown(self):
super().tearDown()
tf.keras.mixed_precision.set_global_policy("float32")
def test_checkpoint_forward_compatible(self):
batch_size = 3
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
kwargs = dict(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
output_range=None)
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
data = dict(
input_word_ids=word_id_data,
input_mask=mask_data,
input_type_ids=type_id_data)
old_net = bert_encoder.BertEncoderV2(**kwargs)
old_net_outputs = old_net(data)
ckpt = tf.train.Checkpoint(net=old_net)
path = ckpt.save(self.get_temp_dir())
new_net = encoder.TokenDropBertEncoder(
token_keep_k=sequence_length,
token_allow_list=(),
token_deny_list=(),
**kwargs)
new_ckpt = tf.train.Checkpoint(net=new_net)
status = new_ckpt.restore(path)
status.assert_existing_objects_matched()
# assert_consumed will fail because the old model has redundant nodes.
new_net_outputs = new_net(data)
self.assertAllEqual(old_net_outputs.keys(), new_net_outputs.keys())
for key in old_net_outputs:
self.assertAllClose(old_net_outputs[key], new_net_outputs[key])
def test_keras_model_checkpoint_forward_compatible(self):
batch_size = 3
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
kwargs = dict(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types,
output_range=None)
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
data = dict(
input_word_ids=word_id_data,
input_mask=mask_data,
input_type_ids=type_id_data)
old_net = bert_encoder.BertEncoderV2(**kwargs)
inputs = old_net.inputs
outputs = old_net(inputs)
old_model = tf.keras.Model(inputs=inputs, outputs=outputs)
old_model_outputs = old_model(data)
ckpt = tf.train.Checkpoint(net=old_model)
path = ckpt.save(self.get_temp_dir())
new_net = encoder.TokenDropBertEncoder(
token_keep_k=sequence_length,
token_allow_list=(),
token_deny_list=(),
**kwargs)
inputs = new_net.inputs
outputs = new_net(inputs)
new_model = tf.keras.Model(inputs=inputs, outputs=outputs)
new_ckpt = tf.train.Checkpoint(net=new_model)
new_ckpt.restore(path)
new_model_outputs = new_model(data)
self.assertAllEqual(old_model_outputs.keys(), new_model_outputs.keys())
for key in old_model_outputs:
self.assertAllClose(old_model_outputs[key], new_model_outputs[key])
if __name__ == "__main__":
tf.test.main()
| 20,527 | 38.401152 | 80 | py |
models | models-master/official/projects/token_dropping/masked_lm_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.tasks.masked_lm."""
import tensorflow as tf
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import pretrain_dataloader
from official.projects.token_dropping import encoder_config
from official.projects.token_dropping import masked_lm
class MLMTaskTest(tf.test.TestCase):
def test_task(self):
config = masked_lm.TokenDropMaskedLMConfig(
init_checkpoint=self.get_temp_dir(),
scale_loss=True,
model=bert.PretrainerConfig(
encoder=encoders.EncoderConfig(
any=encoder_config.TokenDropBertEncoderConfig(
vocab_size=30522, num_layers=1, token_keep_k=64),
type="any"),
cls_heads=[
bert.ClsHeadConfig(
inner_dim=10, num_classes=2, name="next_sentence")
]),
train_data=pretrain_dataloader.BertPretrainDataConfig(
input_path="dummy",
max_predictions_per_seq=20,
seq_length=128,
global_batch_size=1))
task = masked_lm.TokenDropMaskedLMTask(config)
model = task.build_model()
metrics = task.build_metrics()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
task.validation_step(next(iterator), model, metrics=metrics)
# Saves a checkpoint.
ckpt = tf.train.Checkpoint(model=model, **model.checkpoint_items)
ckpt.save(config.init_checkpoint)
task.initialize(model)
if __name__ == "__main__":
tf.test.main()
| 2,284 | 34.703125 | 74 | py |
models | models-master/official/projects/token_dropping/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A customized training binary for running token dropping experiments."""
from absl import app
from absl import flags
import gin
from official.common import distribute_utils
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
from official.projects.token_dropping import experiment_configs # pylint: disable=unused-import
FLAGS = flags.FLAGS
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu,
**params.runtime.model_parallelism())
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir)
train_utils.save_gin_config(FLAGS.mode, model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(main)
| 2,586 | 35.957143 | 96 | py |
models | models-master/official/projects/token_dropping/encoder_config.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Token dropping encoder configuration and instantiation."""
import dataclasses
from typing import Tuple
import tensorflow as tf
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.projects.token_dropping import encoder
@dataclasses.dataclass
class TokenDropBertEncoderConfig(encoders.BertEncoderConfig):
token_loss_init_value: float = 10.0
token_loss_beta: float = 0.995
token_keep_k: int = 256
token_allow_list: Tuple[int, ...] = (100, 101, 102, 103)
token_deny_list: Tuple[int, ...] = (0,)
@base_config.bind(TokenDropBertEncoderConfig)
def get_encoder(encoder_cfg: TokenDropBertEncoderConfig):
"""Instantiates 'TokenDropBertEncoder'.
Args:
encoder_cfg: A 'TokenDropBertEncoderConfig'.
Returns:
A 'encoder.TokenDropBertEncoder' object.
"""
return encoder.TokenDropBertEncoder(
vocab_size=encoder_cfg.vocab_size,
hidden_size=encoder_cfg.hidden_size,
num_layers=encoder_cfg.num_layers,
num_attention_heads=encoder_cfg.num_attention_heads,
intermediate_size=encoder_cfg.intermediate_size,
activation=tf_utils.get_activation(encoder_cfg.hidden_activation),
dropout_rate=encoder_cfg.dropout_rate,
attention_dropout_rate=encoder_cfg.attention_dropout_rate,
max_sequence_length=encoder_cfg.max_position_embeddings,
type_vocab_size=encoder_cfg.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
output_range=encoder_cfg.output_range,
embedding_width=encoder_cfg.embedding_size,
return_all_encoder_outputs=encoder_cfg.return_all_encoder_outputs,
dict_outputs=True,
norm_first=encoder_cfg.norm_first,
token_loss_init_value=encoder_cfg.token_loss_init_value,
token_loss_beta=encoder_cfg.token_loss_beta,
token_keep_k=encoder_cfg.token_keep_k,
token_allow_list=encoder_cfg.token_allow_list,
token_deny_list=encoder_cfg.token_deny_list)
| 2,660 | 38.132353 | 74 | py |
models | models-master/official/projects/perceiver/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Model Garden training driver, register Perceiver configs."""
from absl import app
from official.common import flags as tfm_flags
from official.nlp import train
# pylint: disable=unused-import
from official.projects.perceiver.configs import perceiver
from official.projects.perceiver.tasks import pretrain
from official.projects.perceiver.tasks import sentence_prediction
# pylint: enable=unused-import
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(train.main)
| 1,104 | 35.833333 | 74 | py |
models | models-master/official/projects/perceiver/configs/perceiver.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perceiver configurations."""
import dataclasses
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.modeling.hyperparams import base_config
from official.nlp.data import pretrain_dataloader
from official.nlp.data import sentence_prediction_dataloader
_SENTENCE_PREDICTION_TRAINER = cfg.TrainerConfig(
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'lamb',
'lamb': {
'weight_decay_rate': 0.01,
'exclude_from_weight_decay': [
'LayerNorm', 'layer_norm', 'bias'
],
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 3.0e-05,
'end_learning_rate': 0.0,
'decay_steps': 32730,
'power': 1.0,
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 200,
'warmup_learning_rate': 0.,
}
}
}))
_MLM_WORDPIECE_TRAINER = cfg.TrainerConfig(
train_steps=500_000,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'lamb',
'lamb': {
'weight_decay_rate': 0.01,
'exclude_from_weight_decay': [
'LayerNorm', 'layer_norm', 'bias'
],
}
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate': 1.25e-3,
'decay_steps': 500_000,
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 1_000,
'warmup_learning_rate': 0.,
}
}
}))
@dataclasses.dataclass
class EncoderConfig(base_config.Config):
"""The perceiver encoder processor configuration."""
_attention_heads = 8
_per_attention_head_last_dim = 32
self_attention_widening_factor: int = 1
self_attention_num_heads: int = _attention_heads
cross_attention_widening_factor: int = 1
cross_attention_num_heads: int = _attention_heads
num_self_attends_per_block: int = 26
num_blocks: int = 1
qk_last_dim: int = _attention_heads * _per_attention_head_last_dim
v_last_dim: int = 1280
dropout_prob: float = 0.0
dropout_attn_prob: float = 0.0
att_init_scale: float = 1.0
dense_init_scale: float = 1.0
norm_epsilon: float = 1e-5
@dataclasses.dataclass
class DecoderConfig(base_config.Config):
"""The perceiver decoder configuration."""
num_heads: int = 8
_per_attention_head_last_dim = 32
output_last_dim: int = 768
qk_last_dim: int = num_heads * _per_attention_head_last_dim
v_last_dim: int = 768
use_query_residual: bool = False
@dataclasses.dataclass
class PositionalDecoder(base_config.Config):
d_model: int = 768
decoder: DecoderConfig = dataclasses.field(default_factory=DecoderConfig)
position_encoding_intializer_stddev: float = 0.02
output_index_dim: int = 512
d_latents: int = 1280
z_index_dim: int = 256
@dataclasses.dataclass
class ClassificationDecoderConfig(PositionalDecoder):
output_index_dim: int = 1
@dataclasses.dataclass
class MaskedLMDecoderConfig(PositionalDecoder):
output_index_dim: int = 512
@dataclasses.dataclass
class SequenceEncoderConfig(base_config.Config):
"""The perceiver sequence encoder configuration."""
d_model: int = 768
d_latents: int = 1280
z_index_dim: int = 256
max_seq_len: int = 512
vocab_size: int = 30_522
embedding_width: int = 768
embedding_initializer_stddev: float = 0.02
input_position_encoding_intializer_stddev: float = 0.02
z_pos_enc_init_scale: float = 0.02
encoder: EncoderConfig = dataclasses.field(default_factory=EncoderConfig)
@dataclasses.dataclass
class PretrainerConfig(base_config.Config):
"""The pretrainer configuration."""
encoder: SequenceEncoderConfig = dataclasses.field(
default_factory=SequenceEncoderConfig
)
decoder: MaskedLMDecoderConfig = dataclasses.field(
default_factory=MaskedLMDecoderConfig
)
mlm_activation: str = 'gelu'
mlm_initializer_range: float = 0.02
@dataclasses.dataclass
class ClassificationConfig(base_config.Config):
"""The classification configuration."""
num_classes: int = 0
use_encoder_pooler: bool = False
encoder: SequenceEncoderConfig = dataclasses.field(
default_factory=SequenceEncoderConfig
)
decoder: ClassificationDecoderConfig = dataclasses.field(
default_factory=ClassificationDecoderConfig
)
@dataclasses.dataclass
class SentencePredictionConfig(cfg.TaskConfig):
"""The sentence prediction task config."""
model: ClassificationConfig = dataclasses.field(
default_factory=ClassificationConfig
)
hub_module_url: str = ''
init_checkpoint: str = ''
init_cls_pooler: bool = False
metric_type: str = 'accuracy'
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(
default_factory=cfg.DataConfig
)
@dataclasses.dataclass
class PretrainConfig(cfg.TaskConfig):
"""The word piece pretrain task config."""
model: PretrainerConfig = dataclasses.field(default_factory=PretrainerConfig)
init_checkpoint: str = ''
scale_loss: bool = False
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(
default_factory=cfg.DataConfig
)
@exp_factory.register_config_factory('perceiver/word_piece_sentence_prediction')
def perceiver_word_piece_sentence_prediction() -> cfg.ExperimentConfig:
"""Config for perceiver sentence prediction.
Returns:
cfg.ExperimentConfig
References:
Perceiver IO (https://arxiv.org/abs/2107.14795).
"""
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(enable_xla=True),
task=SentencePredictionConfig(
train_data=sentence_prediction_dataloader
.SentencePredictionDataConfig(),
validation_data=sentence_prediction_dataloader
.SentencePredictionDataConfig()),
trainer=_SENTENCE_PREDICTION_TRAINER,
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory(
'perceiver/word_piece_raw_sentence_prediction'
)
def perceiver_word_piece_raw_sentence_prediction() -> cfg.ExperimentConfig:
"""Config for perceiver sentence prediction.
Returns:
cfg.ExperimentConfig
References:
Perceiver IO (https://arxiv.org/abs/2107.14795).
"""
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(enable_xla=True),
task=SentencePredictionConfig(
train_data=sentence_prediction_dataloader.SentencePredictionTextDataConfig(),
validation_data=sentence_prediction_dataloader.SentencePredictionTextDataConfig(),
),
trainer=_SENTENCE_PREDICTION_TRAINER,
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
],
)
return config
@exp_factory.register_config_factory('perceiver/wordpiece_pretrain')
def perceiver_wordpiece_pretrain() -> cfg.ExperimentConfig:
"""Config for perceiver wordpiece pretrain.
Returns:
cfg.ExperimentConfig
References:
Perceiver IO (https://arxiv.org/abs/2107.14795).
Bert pretraining data
(https://github.com/google-research/bert/blob/master/tokenization.py#L168)
"""
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(enable_xla=True),
task=PretrainConfig(
train_data=pretrain_dataloader.BertPretrainDataConfig(
global_batch_size=512,
use_next_sentence_label=False,
use_v2_feature_names=True),
validation_data=pretrain_dataloader.BertPretrainDataConfig(
global_batch_size=512,
is_training=False,
use_next_sentence_label=False,
use_v2_feature_names=True)),
trainer=_MLM_WORDPIECE_TRAINER,
restrictions=[
'task.train_data.is_training != None',
])
return config
| 8,975 | 29.324324 | 92 | py |
models | models-master/official/projects/perceiver/configs/perceiver_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.tasks.masked_lm."""
import tensorflow as tf
from official.nlp.data import pretrain_dataloader
from official.nlp.data import sentence_prediction_dataloader
from official.projects.perceiver.configs import perceiver
class PerceiverWordPiecePretrainConfigTest(tf.test.TestCase):
def test_word_piece_pretrain_config(self):
config = perceiver.PretrainConfig(
train_data=pretrain_dataloader.BertPretrainDataConfig(
global_batch_size=512,
use_next_sentence_label=False,
use_v2_feature_names=True),
validation_data=pretrain_dataloader.BertPretrainDataConfig(
global_batch_size=512,
is_training=False,
use_next_sentence_label=False,
use_v2_feature_names=True))
self.assertIsNotNone(config)
self.assertIsNotNone(config.model)
self.assertFalse(config.scale_loss)
class PerceiverWordPieceSentencePredictionConfigTest(tf.test.TestCase):
def test_word_piece_fine_tune_config(self):
config = perceiver.SentencePredictionConfig(
train_data=sentence_prediction_dataloader
.SentencePredictionDataConfig(),
validation_data=sentence_prediction_dataloader
.SentencePredictionDataConfig())
self.assertIsNotNone(config)
self.assertIsNotNone(config.model)
self.assertFalse(config.init_cls_pooler)
def test_perceiver_sentence_prediction_returns_valid_learning_rate(self):
experiment_cfg = perceiver.perceiver_word_piece_sentence_prediction()
self.assertIsNotNone(experiment_cfg.trainer.optimizer_config.learning_rate)
class PerceiverWordPieceRawSentencePredictionConfigTest(tf.test.TestCase):
def test_word_piece_raw_sentence_fine_tune_config(self):
config = perceiver.SentencePredictionConfig(
train_data=sentence_prediction_dataloader
.SentencePredictionTextDataConfig(),
validation_data=sentence_prediction_dataloader
.SentencePredictionTextDataConfig())
self.assertIsNotNone(config)
self.assertIsNotNone(config.model)
self.assertFalse(config.init_cls_pooler)
def test_perceiver_raw_sentence_prediction_returns_valid_learning_rate(self):
experiment_cfg = perceiver.perceiver_word_piece_raw_sentence_prediction()
self.assertIsNotNone(experiment_cfg.trainer.optimizer_config.learning_rate)
if __name__ == "__main__":
tf.test.main()
| 2,997 | 37.435897 | 79 | py |
models | models-master/official/projects/perceiver/configs/encoders.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build perceiver sequence encoder."""
from official.projects.perceiver.configs import perceiver as cfg
from official.projects.perceiver.modeling.layers import encoder
from official.projects.perceiver.modeling.networks import sequence_encoder
def build_encoder(
encoder_config: cfg.SequenceEncoderConfig
) -> sequence_encoder.SequenceEncoder:
"""Instantiate a perceiver encoder network from SequenceEncoderConfig.
Args:
encoder_config:
The sequence encoder config, which provides encoder parameters.
Returns:
An sequence encoder instance.
"""
encoder_ = encoder.Encoder(
**encoder_config.encoder.as_dict())
return sequence_encoder.SequenceEncoder(
encoder=encoder_,
d_model=encoder_config.d_model,
d_latents=encoder_config.d_latents,
z_index_dim=encoder_config.z_index_dim,
max_seq_len=encoder_config.max_seq_len,
vocab_size=encoder_config.vocab_size,
z_pos_enc_init_scale=encoder_config.z_pos_enc_init_scale,
embedding_width=encoder_config.embedding_width,
embedding_initializer_stddev=encoder_config.embedding_initializer_stddev,
input_position_encoding_intializer_stddev=encoder_config
.input_position_encoding_intializer_stddev)
| 1,850 | 37.5625 | 79 | py |
models | models-master/official/projects/perceiver/modeling/networks/positional_decoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perceiver networks."""
import tensorflow as tf
from official.nlp.modeling import layers
class PositionalDecoder(tf.keras.layers.Layer):
"""Perceiver Positional Decoder Network.
Creates a position encoding for queries and composes basic decoder.
e.g. the positional decoder can be used to do MLM, classification, or
regression.
Currently only supports positional decoding.
Use `self.inputs` for inputs.
Attributes:
inputs: A `Dict[Text, tf.keras.Input]` with `latent_output` and
`input_mask`. The shape of `latent_output` is shape
`(z_index_dim, d_latents)` with dtype `tf.float32` and `input_mask` is
shape `(None)` with dtype `tf.int32`.
"""
def __init__(self,
decoder,
output_index_dim,
z_index_dim,
d_latents,
d_model,
position_encoding_intializer_stddev=0.02,
name='positional_decoder',
**kwargs):
"""Init.
Args:
decoder:
Instance of perceiver `Decoder`.
output_index_dim:
Sequence length for the query encoding.
z_index_dim:
Latent index dimension.
d_latents:
Latent last dimension.
d_model:
Model last dimension.
position_encoding_intializer_stddev:
`stddev` of `tf.keras.initializers.TruncatedNormal` used for the
learned position embedding table kernel initializer.
name:
Sets the `tf.keras.layers.Layer` name.
**kwargs:
Any keyword arguments to pass through to `tf.keras.layers.Layer`.
"""
super().__init__(**kwargs, name=name)
self._decoder = decoder
self._output_index_dim = output_index_dim
self._z_index_dim = z_index_dim
self._d_latents = d_latents
self._d_model = d_model
self._output_pos_enc = self._create_decoder_query(
position_encoding_intializer_stddev)
self.inputs = dict(
latent_output=tf.keras.Input(
shape=(self._z_index_dim, self._d_latents),
dtype=tf.float32),
input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32))
def _create_decoder_query(self, position_encoding_intializer_stddev):
"""Create the position encoding for the output query."""
return layers.PositionEmbedding(
max_length=self._output_index_dim,
name='decoder_pos_enc',
initializer=tf.keras.initializers.TruncatedNormal(
stddev=position_encoding_intializer_stddev))
def call(self, inputs, training=None):
"""Return decoded output of latent vector.
Uses the positional encoding as query for the decoder and uses the
`latent_output` as key-value for the decoder.
Args:
inputs:
A `Dict[Text, tf.keras.Input]` with `latent_output` and
`input_mask`. The shape of `latent_output` is shape
`(z_index_dim, d_latents)` with dtype `tf.float32` and `input_mask` is
shape `(None)` with dtype `tf.int32`.
training:
Flag to indicate training status. Default is `None`. It is passed to
the decoder as is.
Returns:
`Dict[Text, tf.Tensor]` decoded `sequence_output` of a latent vector.
"""
if not isinstance(inputs, dict):
raise ValueError(f'Unexpected inputs type to {self.__class__}.')
latent_output = inputs['latent_output']
query_mask = inputs.get('input_mask')
decoder_query = self._output_pos_enc(tf.ones(
(tf.shape(latent_output)[0], self._output_index_dim, self._d_model),
dtype=latent_output.dtype))
z = latent_output
sequence_output = self._decoder(
[decoder_query, z],
query_mask=query_mask,
training=training)
return dict(sequence_output=sequence_output)
| 4,361 | 33.078125 | 78 | py |
models | models-master/official/projects/perceiver/modeling/networks/sequence_encoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for sequence_encoder."""
import numpy as np
import tensorflow as tf
from official.projects.perceiver.configs import encoders
from official.projects.perceiver.configs import perceiver
from official.projects.perceiver.modeling.layers import encoder
from official.projects.perceiver.modeling.networks import sequence_encoder
class SequenceEncoderTest(tf.test.TestCase):
def _create_small_network(
self,
sequence_length,
z_index_dim,
d_latents,
vocab_size=100):
d_model = 64
num_layers = 2
encoder_cfg = perceiver.EncoderConfig(
v_last_dim=d_latents,
num_self_attends_per_block=num_layers)
sequence_encoder_cfg = perceiver.SequenceEncoderConfig(
d_model=d_model,
d_latents=d_latents,
z_index_dim=z_index_dim,
max_seq_len=sequence_length,
vocab_size=vocab_size,
encoder=encoder_cfg)
return encoders.build_encoder(sequence_encoder_cfg)
def test_dict_outputs_network_creation(self):
sequence_length = 21
z_index_dim = 128
d_latents = 48
test_network = self._create_small_network(
sequence_length=sequence_length,
z_index_dim=z_index_dim,
d_latents=d_latents)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
data = dict_outputs["latent_output"]
expected_data_shape = [None, z_index_dim, d_latents]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, data.dtype)
def test_dict_outputs_network_invocation(self):
num_types = 7
vocab_size = 57
sequence_length = 21
z_index_dim = 128
d_latents = 48
test_network = self._create_small_network(
sequence_length=sequence_length,
z_index_dim=z_index_dim,
d_latents=d_latents,
vocab_size=vocab_size)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = test_network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
data = dict_outputs["latent_output"]
# Create a model based off of this network:
model = tf.keras.Model([word_ids, mask, type_ids], [data])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
outputs = model.predict([word_id_data, mask_data, type_id_data])
self.assertEqual(outputs[0].shape[1], d_latents)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
sequence_length = 21
vocab_size = 57
d_model = 64
d_latents = 48
z_index_dim = 128
num_layers = 2
encoder_cfg = perceiver.EncoderConfig(
v_last_dim=d_latents,
num_self_attends_per_block=num_layers)
sequence_encoder_config = perceiver.SequenceEncoderConfig(
d_model=d_model,
d_latents=d_latents,
z_index_dim=z_index_dim,
max_seq_len=sequence_length,
vocab_size=vocab_size,
encoder=encoder_cfg)
encoder_ = encoder.Encoder(
**sequence_encoder_config.encoder.as_dict())
network = sequence_encoder.SequenceEncoder(
encoder=encoder_,
d_model=sequence_encoder_config.d_model,
d_latents=sequence_encoder_config.d_latents,
z_index_dim=sequence_encoder_config.z_index_dim,
max_seq_len=sequence_encoder_config.max_seq_len,
vocab_size=sequence_encoder_config.vocab_size,
z_pos_enc_init_scale=sequence_encoder_config.z_pos_enc_init_scale,
embedding_width=sequence_encoder_config.embedding_width,
embedding_initializer_stddev=sequence_encoder_config
.embedding_initializer_stddev,
input_position_encoding_intializer_stddev=sequence_encoder_config
.input_position_encoding_intializer_stddev)
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = network(
dict(input_word_ids=word_ids, input_mask=mask, input_type_ids=type_ids))
data = dict_outputs["latent_output"]
# Create a model based off of this network:
# model =
_ = tf.keras.Model([word_ids, mask, type_ids], [data])
# TODO(b/222634115) make save work.
# Tests model saving/loading.
# model_path = self.get_temp_dir() + "/model"
# model.save(model_path)
# _ = tf.keras.models.load_model(model_path)
# TODO(b/222634115) add test coverage.
if __name__ == "__main__":
tf.test.main()
| 6,083 | 37.264151 | 80 | py |
models | models-master/official/projects/perceiver/modeling/networks/sequence_encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perceiver sequence encoder."""
from typing import Optional, Dict
import tensorflow as tf
from official.nlp.modeling import layers
class SequenceEncoder(tf.keras.layers.Layer):
"""Perceiver encoder for sequences.
Assumes positional learned encoding for latent inputs and embeddings. Creates
an embedding table with vocab size. It uses the perceiver encode processor
to encode the input and process the latent representation. It can be
pretrained on masked LM and reused for fine-tuning.
Use `self.inputs` for inputs.
"""
def __init__(self,
encoder: tf.keras.layers.Layer,
d_model: int,
d_latents: int,
z_index_dim: int,
max_seq_len: int,
vocab_size: int,
z_pos_enc_init_scale: float = 0.02,
embedding_width: Optional[int] = None,
embedding_initializer_stddev: float = 0.02,
input_position_encoding_intializer_stddev: float = 0.02,
name: str = 'sequence_encoder',
**kwargs):
"""Init.
Args:
encoder:
Instance of perceiver `Encoder`.
d_model:
Last dimension size of the input and output tensors. e.g.
`[batch_size, max_seq_len, d_model]`.
d_latents:
Last dimension size of the latent tensors. e.g.
`[batch_size, z_index_dim, d_latents]`.
z_index_dim:
Second dimension size of the latent tensors. e.g.
`[batch_size, z_index_dim, d_latents]`.
max_seq_len:
Second dimension size of the input and outputs tensors. e.g.
`[batch_size, max_seq_len, d_model]`.
vocab_size:
Vocabulary size of the embedding table.
z_pos_enc_init_scale:
Latent array's positional encoding's truncated_normal initializer's
`stddev`.
embedding_width:
Embedding dimension of the embedding table.
embedding_initializer_stddev:
`stddev` of `tf.keras.initializers.TruncatedNormal` used for the
embedding table kernel initializer.
input_position_encoding_intializer_stddev:
`stddev` of `tf.keras.initializers.TruncatedNormal` used for the
learned position embedding table kernel initializer.
name:
Sets the `tf.keras.layers.Layer` name.
**kwargs:
Any keyword arguments to pass through to `tf.keras.layers.Layer`.
"""
super().__init__(**kwargs, name=name)
self._embedding_width = embedding_width
self._encoder = encoder
self._d_model = d_model
self._z_index_dim = z_index_dim
self._d_latents = d_latents
if self._embedding_width is None:
self._embedding_width = self._d_model
# Construct the embeddling layer for the sequence vocab.
self._embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=self._embedding_width,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=embedding_initializer_stddev),
name='word_embeddings')
# Construct the input positional encoding layer.
self._input_pos_encoding = layers.PositionEmbedding(
max_length=max_seq_len,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=input_position_encoding_intializer_stddev),
name='input_pos_encoding')
# Construct the latent array initial state.
self._z_pos_enc = layers.PositionEmbedding(
max_length=z_index_dim,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=z_pos_enc_init_scale),
name='z_pos_enc')
self.inputs = dict(
input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32))
def get_embedding_table(self) -> tf.Variable:
"""Get embedding table."""
return self._embedding_layer.embeddings
def call(self,
inputs: Dict[str, tf.Tensor],
training: Optional[bool] = None) -> Dict[str, tf.Tensor]:
"""Return encoded and processed latent output of inputs.
Args:
inputs:
Expect inputs to be a dictionary of `input_word_ids` and `input_mask`.
training:
Flag to indicate training status.
Returns:
`Dict[str, tf.Tensor]` decoded output of latent vector via the query.
"""
if not isinstance(inputs, dict):
raise ValueError('Unexpected inputs type to %s.' % self.__class__)
word_ids = inputs['input_word_ids']
input_mask = inputs.get('input_mask')
word_embeddings = self._embedding_layer(word_ids)
pos_encodings = self._input_pos_encoding(word_embeddings)
embeddings = word_embeddings + pos_encodings
tensor_for_shape = tf.ones(
[tf.shape(embeddings)[0], self._z_index_dim, self._d_latents],
dtype=embeddings.dtype)
encoder_query = self._z_pos_enc(tensor_for_shape)
z = self._encoder(
[embeddings, encoder_query], input_mask=input_mask, training=training)
return dict(latent_output=z)
| 5,706 | 35.350318 | 79 | py |
models | models-master/official/projects/perceiver/modeling/networks/positional_decoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for positional_decoder."""
import tensorflow as tf
from official.projects.perceiver.configs import perceiver as cfg
from official.projects.perceiver.modeling.layers import decoder
from official.projects.perceiver.modeling.networks import positional_decoder
class PositionalDecoderTest(tf.test.TestCase):
def test_dict_outputs_network_creation(self):
sequence_length = 21
z_index_dim = 8
d_model = 64
d_latents = 48
decoder_cfg = cfg.DecoderConfig(
output_last_dim=d_latents,
v_last_dim=d_latents,
num_heads=2)
positional_decoder_cfg = cfg.PositionalDecoder(
decoder=decoder_cfg,
d_model=d_model,
d_latents=d_latents,
output_index_dim=sequence_length,
z_index_dim=z_index_dim)
decoder_ = decoder.Decoder(positional_decoder_cfg.decoder.as_dict())
mlm_decoder = positional_decoder.PositionalDecoder(
decoder=decoder_,
output_index_dim=positional_decoder_cfg.output_index_dim,
z_index_dim=positional_decoder_cfg.z_index_dim,
d_latents=positional_decoder_cfg.d_latents,
d_model=positional_decoder_cfg.d_model)
# Create the inputs (note that the first dimension is implicit).
latent_output = tf.keras.Input(
shape=(z_index_dim, d_latents), dtype=tf.float32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = mlm_decoder(
dict(latent_output=latent_output, input_mask=mask))
data = dict_outputs["sequence_output"]
expected_data_shape = [None, sequence_length, d_model]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, data.dtype)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
sequence_length = 21
z_index_dim = 8
d_model = 64
d_latents = 48
decoder_cfg = cfg.DecoderConfig(
output_last_dim=d_latents,
v_last_dim=d_latents,
num_heads=2)
positional_decoder_cfg = cfg.PositionalDecoder(
decoder=decoder_cfg,
d_model=d_model,
d_latents=d_latents,
output_index_dim=sequence_length,
z_index_dim=z_index_dim)
decoder_ = decoder.Decoder(positional_decoder_cfg.decoder.as_dict())
mlm_decoder = positional_decoder.PositionalDecoder(
decoder=decoder_,
output_index_dim=positional_decoder_cfg.output_index_dim,
z_index_dim=positional_decoder_cfg.z_index_dim,
d_latents=positional_decoder_cfg.d_latents,
d_model=positional_decoder_cfg.d_model)
# Create the inputs (note that the first dimension is implicit).
latent_output = tf.keras.Input(
shape=(z_index_dim, d_latents), dtype=tf.float32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
dict_outputs = mlm_decoder(
dict(latent_output=latent_output, input_mask=mask))
data = dict_outputs["sequence_output"]
# Create a model based off of this network:
# model =
_ = tf.keras.Model([latent_output, mask], [data])
# TODO(b/222634115) make save work.
# Tests model saving/loading.
# model_path = self.get_temp_dir() + "/model"
# model.save(model_path)
# _ = tf.keras.models.load_model(model_path)
# TODO(b/222634115) add test coverage.
if __name__ == "__main__":
tf.test.main()
| 4,025 | 35.27027 | 76 | py |
models | models-master/official/projects/perceiver/modeling/models/pretrainer_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Perceiver pretrainer model."""
import itertools
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.modeling import layers
from official.projects.perceiver.configs import encoders
from official.projects.perceiver.configs import perceiver as cfg
from official.projects.perceiver.modeling.layers import decoder
from official.projects.perceiver.modeling.models import pretrainer
from official.projects.perceiver.modeling.networks import positional_decoder
class PretrainerTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(itertools.product(
(False, True),
(False, True),
))
def test_perceiver_pretrainer(self, use_customized_masked_lm,
has_masked_lm_positions):
"""Validate that the Keras object can be created."""
# Build a transformer network to use within the Perceiver trainer.
vocab_size = 100
sequence_length = 512
d_model = 64
d_latents = 48
num_layers = 2
encoder_cfg = cfg.EncoderConfig(
v_last_dim=d_latents,
num_self_attends_per_block=num_layers)
sequence_encoder_cfg = cfg.SequenceEncoderConfig(
d_model=d_model,
d_latents=d_latents,
vocab_size=vocab_size,
encoder=encoder_cfg)
test_network = encoders.build_encoder(sequence_encoder_cfg)
_ = test_network(test_network.inputs)
deocder_cfg = cfg.DecoderConfig(
output_last_dim=d_latents,
v_last_dim=d_latents)
perceiver_mlm_decoder_cfg = cfg.MaskedLMDecoderConfig(
d_model=d_model,
decoder=deocder_cfg,
d_latents=d_latents)
decoder_ = decoder.Decoder(
**perceiver_mlm_decoder_cfg.decoder.as_dict())
positional_decoder_ = positional_decoder.PositionalDecoder(
decoder=decoder_,
output_index_dim=perceiver_mlm_decoder_cfg.output_index_dim,
z_index_dim=perceiver_mlm_decoder_cfg.z_index_dim,
d_latents=perceiver_mlm_decoder_cfg.d_latents,
d_model=perceiver_mlm_decoder_cfg.d_model,
position_encoding_intializer_stddev=perceiver_mlm_decoder_cfg
.position_encoding_intializer_stddev)
if use_customized_masked_lm:
customized_masked_lm = layers.MaskedLM(
embedding_table=test_network.get_embedding_table())
else:
customized_masked_lm = None
# Create a Perceiver trainer with the created network.
perceiver_trainer_model = pretrainer.Pretrainer(
encoder=test_network,
decoder=positional_decoder_,
customized_masked_lm=customized_masked_lm)
num_token_predictions = 20
# Create a set of 2-dimensional inputs (the first dimension is implicit).
inputs = dict(
input_word_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32))
if has_masked_lm_positions:
inputs['masked_lm_positions'] = tf.keras.Input(
shape=(num_token_predictions,), dtype=tf.int32)
# Invoke the trainer model on the inputs. This causes the layer to be built.
outputs = perceiver_trainer_model(inputs)
expected_keys = ['sequence_output']
if has_masked_lm_positions:
expected_keys.append('mlm_logits')
self.assertSameElements(outputs.keys(), expected_keys)
# Validate that the outputs are of the expected shape.
expected_lm_shape = [None, num_token_predictions, vocab_size]
if has_masked_lm_positions:
self.assertAllEqual(expected_lm_shape,
outputs['mlm_logits'].shape.as_list())
expected_sequence_output_shape = [None, sequence_length, d_model]
self.assertAllEqual(expected_sequence_output_shape,
outputs['sequence_output'].shape.as_list())
def test_serialize_deserialize(self):
"""Validate that the trainer can be serialized and deserialized."""
vocab_size = 100
d_model = 64
d_latents = 48
num_layers = 2
encoder_cfg = cfg.EncoderConfig(
v_last_dim=d_latents,
num_self_attends_per_block=num_layers)
sequence_encoder_cfg = cfg.SequenceEncoderConfig(
d_model=d_model,
d_latents=d_latents,
vocab_size=vocab_size,
encoder=encoder_cfg)
test_network = encoders.build_encoder(sequence_encoder_cfg)
_ = test_network(test_network.inputs)
deocder_cfg = cfg.DecoderConfig(
output_last_dim=d_latents,
v_last_dim=d_latents)
perceiver_mlm_decoder_cfg = cfg.MaskedLMDecoderConfig(
d_model=d_model,
decoder=deocder_cfg,
d_latents=d_latents)
decoder_ = decoder.Decoder(
**perceiver_mlm_decoder_cfg.decoder.as_dict())
positional_decoder_ = positional_decoder.PositionalDecoder(
decoder=decoder_,
output_index_dim=perceiver_mlm_decoder_cfg.output_index_dim,
z_index_dim=perceiver_mlm_decoder_cfg.z_index_dim,
d_latents=perceiver_mlm_decoder_cfg.d_latents,
d_model=perceiver_mlm_decoder_cfg.d_model,
position_encoding_intializer_stddev=perceiver_mlm_decoder_cfg
.position_encoding_intializer_stddev)
# Create a Perceiver trainer with the created network.
perceiver_trainer_model = pretrainer.Pretrainer(
encoder=test_network,
decoder=positional_decoder_)
config = perceiver_trainer_model.get_config()
new_perceiver_trainer_model = pretrainer.Pretrainer.from_config(config)
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(perceiver_trainer_model.get_config(),
new_perceiver_trainer_model.get_config())
# TODO(b/222634115) add test coverage.
if __name__ == '__main__':
tf.test.main()
| 6,423 | 37.933333 | 80 | py |
models | models-master/official/projects/perceiver/modeling/models/classifier_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for classifier."""
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.modeling import layers
from official.projects.perceiver.configs import encoders
from official.projects.perceiver.configs import perceiver as cfg
from official.projects.perceiver.modeling.layers import decoder
from official.projects.perceiver.modeling.models import classifier
from official.projects.perceiver.modeling.networks import positional_decoder
class ClassifierTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('single_cls', 1), ('3_cls', 3))
def test_perceiver_trainer(self, num_classes):
"""Validate that the Keras object can be created."""
# Build a perceiver sequence encoder network to use within the perceiver
# trainer.
vocab_size = 100
sequence_length = 512
d_model = 64
d_latents = 48
num_layers = 2
encoder_cfg = cfg.EncoderConfig(
v_last_dim=d_latents,
num_self_attends_per_block=num_layers)
sequence_encoder_cfg = cfg.SequenceEncoderConfig(
d_model=d_model,
d_latents=d_latents,
vocab_size=vocab_size,
encoder=encoder_cfg)
test_network = encoders.build_encoder(sequence_encoder_cfg)
deocder_cfg = cfg.DecoderConfig(
output_last_dim=d_latents,
v_last_dim=d_latents)
perceiver_classification_decoder_cfg = cfg.ClassificationDecoderConfig(
d_model=d_model,
decoder=deocder_cfg,
d_latents=d_latents)
decoder_ = decoder.Decoder(
**perceiver_classification_decoder_cfg.decoder.as_dict())
positional_decoder_ = positional_decoder.PositionalDecoder(
decoder=decoder_,
output_index_dim=perceiver_classification_decoder_cfg.output_index_dim,
z_index_dim=perceiver_classification_decoder_cfg.z_index_dim,
d_latents=perceiver_classification_decoder_cfg.d_latents,
d_model=perceiver_classification_decoder_cfg.d_model,
position_encoding_intializer_stddev=perceiver_classification_decoder_cfg
.position_encoding_intializer_stddev)
# Create a classifier with the created network.
trainer_model = classifier.Classifier(
network=test_network,
decoder=positional_decoder_,
num_classes=num_classes)
# Create a set of 2-dimensional inputs (the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
# Invoke the trainer model on the inputs. This causes the layer to be built.
cls_outs = trainer_model({
'input_word_ids': word_ids,
'input_mask': mask,
'input_type_ids': type_ids})
# Validate that the outputs are of the expected shape.
expected_classification_shape = [None, num_classes]
self.assertAllEqual(expected_classification_shape, cls_outs.shape.as_list())
@parameterized.named_parameters(
('single_cls', 1, False),
('2_cls', 2, False),
('single_cls_custom_head', 1, True),
('2_cls_custom_head', 2, True))
def test_perceiver_trainer_tensor_call(self, num_classes, use_custom_head):
"""Validate that the Keras object can be invoked."""
# Build a perceiver sequence encoder network to use within the perceiver
# trainer.
vocab_size = 100
d_model = 64
d_latents = 48
num_layers = 2
encoder_cfg = cfg.EncoderConfig(
v_last_dim=d_latents,
num_self_attends_per_block=num_layers)
sequence_encoder_cfg = cfg.SequenceEncoderConfig(
d_model=d_model,
d_latents=d_latents,
vocab_size=vocab_size,
encoder=encoder_cfg)
test_network = encoders.build_encoder(sequence_encoder_cfg)
deocder_cfg = cfg.DecoderConfig(
output_last_dim=d_latents,
v_last_dim=d_latents)
perceiver_classification_decoder_cfg = cfg.ClassificationDecoderConfig(
d_model=d_model,
decoder=deocder_cfg,
d_latents=d_latents)
decoder_ = decoder.Decoder(
**perceiver_classification_decoder_cfg.decoder.as_dict())
positional_decoder_ = positional_decoder.PositionalDecoder(
decoder=decoder_,
output_index_dim=perceiver_classification_decoder_cfg.output_index_dim,
z_index_dim=perceiver_classification_decoder_cfg.z_index_dim,
d_latents=perceiver_classification_decoder_cfg.d_latents,
d_model=perceiver_classification_decoder_cfg.d_model,
position_encoding_intializer_stddev=perceiver_classification_decoder_cfg
.position_encoding_intializer_stddev)
cls_head = layers.GaussianProcessClassificationHead(
inner_dim=0, num_classes=num_classes) if use_custom_head else None
# Create a classifier with the created network.
trainer_model = classifier.Classifier(
network=test_network,
decoder=positional_decoder_,
cls_head=cls_head,
num_classes=num_classes)
# Create a set of 2-dimensional data tensors to feed into the model.
word_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32)
mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32)
type_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32)
# Invoke the trainer model on the tensors. In Eager mode, this does the
# actual calculation. (We can't validate the outputs, since the network is
# too complex: this simply ensures we're not hitting runtime errors.)
_ = trainer_model({
'input_word_ids': word_ids,
'input_mask': mask,
'input_type_ids': type_ids})
@parameterized.named_parameters(
('default_cls_head', None),
('sngp_cls_head', layers.GaussianProcessClassificationHead(
inner_dim=0, num_classes=4)))
def test_serialize_deserialize(self, cls_head):
"""Validate that the trainer can be serialized and deserialized."""
del cls_head
vocab_size = 100
d_model = 64
d_latents = 48
num_layers = 2
encoder_cfg = cfg.EncoderConfig(
v_last_dim=d_latents,
num_self_attends_per_block=num_layers)
sequence_encoder_cfg = cfg.SequenceEncoderConfig(
d_model=d_model,
d_latents=d_latents,
vocab_size=vocab_size,
encoder=encoder_cfg)
test_network = encoders.build_encoder(sequence_encoder_cfg)
deocder_cfg = cfg.DecoderConfig(
output_last_dim=d_latents,
v_last_dim=d_latents)
perceiver_classification_decoder_cfg = cfg.ClassificationDecoderConfig(
d_model=d_model,
decoder=deocder_cfg,
d_latents=d_latents)
decoder_ = decoder.Decoder(
**perceiver_classification_decoder_cfg.decoder.as_dict())
positional_decoder_ = positional_decoder.PositionalDecoder(
decoder=decoder_,
output_index_dim=perceiver_classification_decoder_cfg.output_index_dim,
z_index_dim=perceiver_classification_decoder_cfg.z_index_dim,
d_latents=perceiver_classification_decoder_cfg.d_latents,
d_model=perceiver_classification_decoder_cfg.d_model,
position_encoding_intializer_stddev=perceiver_classification_decoder_cfg
.position_encoding_intializer_stddev)
# Create a classifier with the created network.
trainer_model = classifier.Classifier(
network=test_network,
decoder=positional_decoder_,
num_classes=4)
# Create another trainer via serialization and deserialization.
config = trainer_model.get_config()
new_trainer_model = classifier.Classifier.from_config(config)
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(trainer_model.get_config(),
new_trainer_model.get_config())
# TODO(b/222634115) add test coverage.
if __name__ == '__main__':
tf.test.main()
| 8,478 | 39.184834 | 80 | py |
models | models-master/official/projects/perceiver/modeling/models/classifier.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perceiver classifier."""
import numpy as np
import tensorflow as tf
from official.nlp.modeling import layers
class Classifier(tf.keras.Model):
"""Classifier model based on a shared encoder and optional decoder.
This is an implementation of the network structure surrounding a transformer
encoder as described in "Perceiver IO: A General Architecture for Structured
Inputs & Outputs" (https://arxiv.org/abs/2107.14795).
The Classifier allows a user to pass in an encoder stack and an optional
decoder stack (e.g. perceiver decoder), and instantiates a classification
network based on the passed `num_classes` argument. If `num_classes` is set
to 1, a regression network is instantiated.
This is forked from
(https://github.com/tensorflow/models/blob/master/official/nlp/modeling/models/bert_classifier.py)
Attributes:
network:
A perceiver encode and processor transformer network. This network
should output a classification output. Furthermore, it should expose its
embedding table via a "get_embedding_table" method.
num_classes:
Number of classes outputted by classification head.
inputs:
A `Dict[str, tf.keras.Input]` with `input_word_ids`, `input_mask`, and
`input_type_ids`. The shapes are all `(None)` with dtype `tf.int32`.
head_name:
Name of the classification head.
classifier:
Classification head layer.
initializer:
`tf.keras.initializers.Initializer` used for classification head layer.
"""
def __init__(self,
network,
num_classes,
decoder=None,
initializer=None,
dropout_rate=0.0,
head_name='glue',
cls_head=None,
name='classifier',
**kwargs):
"""Init.
Args:
network:
A perceiver encode and processor transformer network. This network
should output a classification output. Furthermore, it should expose its
embedding table via a "get_embedding_table" method.
num_classes:
Number of classes to predict from the classification network.
decoder:
A perceiver decoder network. This network should accept the
latent output of the encoder and emits logits.
initializer:
The initializer (if any) to use in the classification networks.
Defaults to a Glorot uniform initializer.
dropout_rate:
The dropout probability of the cls head.
head_name:
Name of the classification head.
cls_head:
(Optional) The layer instance to use for the classifier head.
It should take in the output from network and produce the final logits.
If set, the arguments ('num_classes', 'initializer', 'dropout_rate',
'use_encoder_pooler', 'head_name') will be ignored.
name:
Sets the `tf.keras.Model` name.
**kwargs:
Any keyword arguments to pass through to `tf.keras.Model`.
"""
super().__init__(name=name, **kwargs)
self._config = {
'network': network,
'decoder': decoder,
'num_classes': num_classes,
'initializer': initializer,
'dropout_rate': dropout_rate,
'head_name': head_name,
'cls_head': cls_head,
'name': name,
}
self.num_classes = num_classes
self.head_name = head_name
self.initializer = initializer
self._decoder = decoder
self._network = network
inputs = self._network.inputs
outputs = self._network(inputs)
if 'sequence_output' not in outputs:
if 'latent_output' in outputs and self._decoder is not None:
decoder_inputs = {
'latent_output': outputs['latent_output'],
'input_mask': inputs['input_mask'],
}
decoder_outputs = self._decoder(decoder_inputs)
sequence_output = decoder_outputs['sequence_output']
else:
raise ValueError('if `sequence_output` is not in encoder output, '
'`latent_output` must be in encoder output and'
'decoder must exist.')
else:
sequence_output = outputs['sequence_output']
cls_inputs = sequence_output
if initializer is None:
stddev = 1. / np.sqrt(cls_inputs.shape[-1])
initializer = tf.keras.initializers.TruncatedNormal(stddev=stddev)
if cls_head:
classifier = cls_head
else:
classifier = layers.ClassificationHead(
inner_dim=cls_inputs.shape[-1],
num_classes=num_classes,
initializer=initializer,
dropout_rate=dropout_rate,
name=head_name)
_ = classifier(cls_inputs)
self.inputs = inputs
self._cls_head = cls_head
self._name = name
self.classifier = classifier
def call(self, inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
"""Return perceiver classifier model output tensors in a dict.
Accepts inputs as dictionary of tensors.
Args:
inputs:
A `Dict[str, tf.keras.Input]` with `input_word_ids`, `input_mask`, and
`input_type_ids`. The shapes are all `(None)` with dtype `tf.int32`.
Returns:
`tf.Tensor` classification output.
"""
if not isinstance(inputs, dict):
raise ValueError(f'Unexpected inputs type to {self.__class__}.')
word_ids = inputs['input_word_ids']
input_type_ids = inputs.get('input_type_ids')
input_mask = inputs.get('input_mask')
encoder_inputs = {
'input_word_ids': word_ids,
'input_mask': input_mask,
'input_type_ids': input_type_ids,
}
encoder_outputs = self._network(encoder_inputs)
if 'sequence_output' not in encoder_outputs:
if 'latent_output' in encoder_outputs:
z = encoder_outputs['latent_output']
decoder_inputs = {'latent_output': z, 'input_mask': input_mask}
decoder_output = self._decoder(decoder_inputs)
outputs = dict()
if isinstance(decoder_output, dict):
outputs = decoder_output
else:
raise ValueError('decoder\'s output should be a dict,'
f'but got {decoder_output}')
else:
raise ValueError('If `sequence_output` is not in encoder output,'
'`latent_output` must be in encoder output.')
else:
outputs = encoder_outputs
return self.classifier(outputs['sequence_output'])
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(encoder=self._network, decoder=self._decoder)
if hasattr(self.classifier, 'checkpoint_items'):
for key, item in self.classifier.checkpoint_items.items():
items['.'.join([self.classifier.name, key])] = item
return items
def get_config(self):
"""Return the configuration to set up this object using `from_config`."""
return self._config
@classmethod
def from_config(cls, config, custom_objects=None):
"""Initialize object using config from `get_config`.
https://www.tensorflow.org/api_docs/python/tf/keras/models/model_from_config
Args:
config:
Return the configuration to set up this object.
custom_objects:
Optional dictionary mapping names (strings) to custom classes or
functions to be considered during deserialization.
Returns:
A Keras model instance (uncompiled).
"""
return cls(**config)
| 8,091 | 34.336245 | 100 | py |
models | models-master/official/projects/perceiver/modeling/models/pretrainer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perceiver networks."""
import copy
import tensorflow as tf
from official.nlp.modeling import layers
class Pretrainer(tf.keras.Model):
"""Perceiver Pretrainer.
Adds the masked language model head upon the encoder output. Optionally
incorporates decoder output.
Forked from
(https://github.com/tensorflow/models/blob/master/official/nlp/modeling/models/bert_pretrainer.py)
Attributes:
encoder:
A perceiver encode and processor transformer network. This network
should output a classification output. Furthermore, it should expose its
embedding table via a "get_embedding_table" method.
masked_lm:
Masked language model network head for language modeling with encoder
and optionally decoded output.
inputs:
A `Dict[str, tf.keras.Input]` with `input_word_ids`, `input_mask`, and
`input_type_ids`. The shapes are all `(None)` with dtype `tf.int32`.
If `masked_lm_positions` is included, it will run masked language
modeling layer to return sequence of logits.
"""
def __init__(self,
encoder,
decoder=None,
mlm_activation=None,
mlm_initializer='glorot_uniform',
customized_masked_lm=None,
name='pretrainer',
**kwargs):
"""Init.
Args:
encoder:
A perceiver encode and processor transformer network. It should expose
its embedding table via a "get_embedding_table" method. Decoder won't
be used if `sequence_output` is in the output of the encoder.
decoder:
A perceiver decoder network. This parameter is optional. This layer
accepts the latent output of the encoder and emits logits. Decoder must
accept a dictionary of `latent_output` and `input_mask` as inputs. This
will not be used if `sequence_output` is an output from `encoder`.
mlm_activation:
The activation (if any) to use in the masked LM network. If `None`, no
activation will be used.
mlm_initializer:
The initializer (if any) to use in the masked LM. Default
to a Glorot uniform initializer.
customized_masked_lm:
A customized masked_lm layer. If None, will create
a standard layer from `layers.MaskedLM`; if not None, will use the
specified masked_lm layer. Above arguments `mlm_activation` and
`mlm_initializer` will be ignored.
name:
Sets the `tf.keras.Model` name.
**kwargs:
Any keyword arguments to pass through to `tf.keras.Model`.
"""
super().__init__(**kwargs, name=name)
self._config = {
'encoder': encoder,
'decoder': decoder,
'mlm_initializer': mlm_initializer,
'mlm_activation': mlm_activation,
'customized_masked_lm': customized_masked_lm,
'name': name,
}
self._decoder = decoder
self.encoder = encoder
encoder_inputs = self.encoder.inputs
# Makes sure the weights are built.
encoder_outputs = self.encoder(encoder_inputs)
if 'sequence_output' not in encoder_outputs:
if 'latent_output' in encoder_outputs and self._decoder is not None:
decoder_inputs = {
'latent_output': encoder_outputs['latent_output'],
'input_mask': encoder_inputs['input_mask'],
}
decoder_outputs = self._decoder(decoder_inputs)
if 'sequence_output' not in decoder_outputs:
raise ValueError('`sequence_output` must be in decoder output.')
else:
raise ValueError('if `sequence_output` is not in encoder output, '
'`latent_output` must be in encoder output and'
'decoder must exist.')
encoder_inputs = copy.copy(self.encoder.inputs)
inputs = dict(encoder_inputs)
if self._decoder is not None:
inputs.update(copy.copy(self._decoder.inputs))
self.masked_lm = customized_masked_lm or layers.MaskedLM(
embedding_table=self.encoder.get_embedding_table(),
activation=mlm_activation,
initializer=mlm_initializer,
name='cls/predictions')
masked_lm_positions = tf.keras.layers.Input(
shape=(None,), name='masked_lm_positions', dtype=tf.int32)
if isinstance(inputs, dict):
inputs['masked_lm_positions'] = masked_lm_positions
else:
raise ValueError(f'Unexpected inputs type to {self.__class__}.')
self.inputs = inputs
def call(self, inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
"""Return perceiver pretrainer model output tensors in a dict.
Accepts inputs as dictionary of tensors.
Args:
inputs:
A `Dict[str, tf.keras.Input]` with `input_word_ids`, `input_mask`, and
`input_type_ids`. The shapes are all `(None)` with dtype `tf.int32`.
If `masked_lm_positions` is included, it will run masked language
modeling layer to return sequence of logits.
Returns:
`Dict[str, tf.Tensor]` with `sequence_output` and optionally
`mlm_logits`.
"""
if not isinstance(inputs, dict):
raise ValueError(f'Unexpected inputs type to {self.__class__}.')
word_ids = inputs['input_word_ids']
input_type_ids = inputs.get('input_type_ids')
input_mask = inputs.get('input_mask')
encoder_inputs = {
'input_word_ids': word_ids,
'input_mask': input_mask,
'input_type_ids': input_type_ids,
}
encoder_outputs = self.encoder(encoder_inputs)
if 'sequence_output' not in encoder_outputs:
if 'latent_output' in encoder_outputs:
z = encoder_outputs['latent_output']
decoder_inputs = {'latent_output': z, 'input_mask': input_mask}
decoder_output = self._decoder(decoder_inputs)
outputs = dict()
if isinstance(decoder_output, dict):
outputs = decoder_output
else:
raise ValueError('decoder\'s output should be a dict,'
f'but got {decoder_output}')
else:
raise ValueError('If `sequence_output` is not in encoder output,'
'`latent_output` must be in encoder output.')
else:
outputs = encoder_outputs
sequence_output = outputs['sequence_output']
# Inference may not have masked_lm_positions and mlm_logits is not needed.
if 'masked_lm_positions' in inputs:
masked_lm_positions = inputs['masked_lm_positions']
outputs['mlm_logits'] = self.masked_lm(
sequence_output, masked_positions=masked_lm_positions)
return outputs
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(
encoder=self.encoder,
masked_lm=self.masked_lm,
decoder=self._decoder)
return items
def get_config(self):
"""Return the configuration to set up this object using `from_config`."""
return self._config
@classmethod
def from_config(cls, config, custom_objects=None):
"""Initialize object using config from `get_config`.
https://www.tensorflow.org/api_docs/python/tf/keras/models/model_from_config
Args:
config:
Return the configuration to set up this object.
custom_objects:
Optional dictionary mapping names (strings) to custom classes or
functions to be considered during deserialization.
Returns:
A Keras model instance (uncompiled).
"""
return cls(**config)
| 8,096 | 35.972603 | 100 | py |
models | models-master/official/projects/perceiver/modeling/layers/decoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for decoder."""
import numpy as np
import tensorflow as tf
from official.projects.perceiver.modeling.layers import decoder
class PerceiverBasicDecoderTest(tf.test.TestCase):
def test_layer_creation(self):
sequence_length = 80
embedding_width = 800
test_layer = decoder.Decoder(
output_last_dim=embedding_width,
num_heads=8)
lantent_length = 8
latent_width = 80
query_input = tf.keras.Input(
shape=(sequence_length, embedding_width))
latent_input = tf.keras.Input(
shape=(lantent_length, latent_width))
output_tensor = test_layer((query_input, latent_input))
self.assertEqual(
query_input.shape.as_list(),
output_tensor.shape.as_list())
def test_layer_creation_with_mask(self):
embedding_width = 800
sequence_length = 80
test_layer = decoder.Decoder(
output_last_dim=embedding_width,
num_heads=8)
lantent_length = 8
latent_width = 80
query_input = tf.keras.Input(
shape=(sequence_length, embedding_width))
latent_input = tf.keras.Input(
shape=(lantent_length, latent_width))
mask_tensor = tf.keras.Input(
shape=(sequence_length),
dtype=tf.int32)
output_tensor = test_layer(
(query_input, latent_input),
query_mask=mask_tensor)
self.assertEqual(
query_input.shape.as_list(),
output_tensor.shape.as_list())
def test_layer_invocation(self):
embedding_width = 800
sequence_length = 80
test_layer = decoder.Decoder(
output_last_dim=embedding_width,
num_heads=8)
lantent_length = 8
latent_width = 80
query_input = tf.keras.Input(
shape=(sequence_length, embedding_width))
latent_input = tf.keras.Input(
shape=(lantent_length, latent_width))
mask_tensor = tf.keras.Input(
shape=(sequence_length),
dtype=tf.int32)
output_tensor = test_layer(
(query_input, latent_input),
query_mask=mask_tensor)
# Create a model from the test layer.
model = tf.keras.Model(
((query_input, latent_input), mask_tensor),
output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
latent_data = 10 * np.random.random_sample(
(batch_size, lantent_length, latent_width))
mask_data = tf.ones((batch_size, sequence_length), dtype=tf.int32)
query_data = tf.ones(
(batch_size, sequence_length, embedding_width),
dtype=tf.float32)
_ = model.predict(((query_data, latent_data), mask_data))
# TODO(b/222634115) Add tests to validate logic and dims.
if __name__ == "__main__":
tf.test.main()
| 3,391 | 31.615385 | 79 | py |
models | models-master/official/projects/perceiver/modeling/layers/utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perceiver modeling utils."""
import functools
import tensorflow as tf
def make_cross_attention_mask(query_mask, kv_mask):
"""Compute the outer product between `query_mask` and `kv_mask`."""
# Porting `mask = jax.vmap(jnp.outer)(query_mask, kv_mask)`
return tf.einsum("ab,ac->abc", query_mask, kv_mask)
def build_cross_attention_block_args(
input_shape,
widening_factor=1,
dropout_prob=0.0,
dropout_attn_prob=0.0,
num_heads=8,
att_init_scale=1.0,
dense_init_scale=1.0,
shape_for_attn="kv",
use_query_residual=True,
norm_epsilon=1e-5,
qk_last_dim=None,
v_last_dim=None):
"""Builds cross attention block arguments for `TransformerEncoderBlock`.
Build cross attention block arguments for `TransformerEncoderBlock` used
in Perceiver.
The last dimension of the output of the attention block or `output_last_dim`
of `TransformerEncoderBlocks` is set to the first `input_shape`'s last
dimension.
`diff_q_kv_att_layer_norm` is set to `True`.
`inner_dropout` is set to 0.
`norm_first` is set to `True`.
`inner_activation` is set to gelu.
`kernel_initializer` and `attention_initializer` are both
`tf.keras.initializers.VarianceScaling`.
Args:
input_shape:
Check `input_shape` doc in `_build_transformer_encoder_block_args`.
widening_factor:
Check `widening_factor` doc in `_build_transformer_encoder_block_args`.
dropout_prob:
Check `dropout_prob` doc in `_build_transformer_encoder_block_args`.
dropout_attn_prob:
Check `dropout_attn_prob` doc in `_build_transformer_encoder_block_args`.
num_heads:
Check `num_heads` doc in `_build_transformer_encoder_block_args`.
att_init_scale:
Check `att_init_scale` doc in `_build_transformer_encoder_block_args`.
dense_init_scale:
Check `dense_init_scale` doc in `_build_transformer_encoder_block_args`.
shape_for_attn:
Valid values are `q` or `kv`. This value is used to determine the last
dimension of the attention score output attention last dimension.
`qk_last_dim` has higher precedence over `shape_for_attn`.
use_query_residual:
Toggle to execute residual connection after attention.
norm_epsilon:
Check `norm_epsilon` doc in `_build_transformer_encoder_block_args`.
qk_last_dim:
When set, determines the last dimension of the attention score output.
When it's `None`, it uses the first `input_shape`'s last dimension as the
last dimension of the attention score output. `qk_last_dim` has higher
precedence over `shape_for_attn`.
v_last_dim:
Check `v_last_dim` doc in `_build_transformer_encoder_block_args`.
Returns:
A `dict` mapping `TransformerEncoderBlock` arguments.
References:
[Perceiver: General Perception with Iterative
Attention](https://arxiv.org/abs/2103.03206)
(https://github.com/deepmind/deepmind-research/blob/master/perceiver/perceiver.py)
(https://github.com/tensorflow/models/blob/871c4e0a393ef4385534bee55354a5df8aa1ccf4/official/nlp/modeling/layers/transformer_encoder_block.py)
"""
inputs_q_shape = input_shape[0]
inputs_kv_shape = input_shape[1]
output_last_dim = inputs_q_shape[-1]
if shape_for_attn == "q":
f_qk_last_dim = inputs_q_shape[-1]
elif shape_for_attn == "kv":
f_qk_last_dim = inputs_kv_shape[-1]
else:
raise ValueError(f"Unknown value {shape_for_attn} for "
"shape_for_attention.")
f_v_last_dim = None
if qk_last_dim is not None:
f_qk_last_dim = qk_last_dim
if v_last_dim is not None:
f_v_last_dim = v_last_dim
return _build_transformer_encoder_block_args(
input_shape=input_shape,
widening_factor=widening_factor,
dropout_prob=dropout_prob,
dropout_attn_prob=dropout_attn_prob,
num_heads=num_heads,
att_init_scale=att_init_scale,
dense_init_scale=dense_init_scale,
use_query_residual=use_query_residual,
norm_epsilon=norm_epsilon,
qk_last_dim=f_qk_last_dim,
v_last_dim=f_v_last_dim,
diff_q_kv_att_layer_norm=True,
output_last_dim=output_last_dim)
def build_self_attention_block_args(
input_shape,
widening_factor=4,
dropout_prob=0.0,
dropout_attn_prob=0.0,
num_heads=8,
att_init_scale=1.0,
dense_init_scale=1.0,
norm_epsilon=1e-5,
qk_last_dim=None,
v_last_dim=None):
"""Builds self attention block arguments for `TransformerEncoderBlock`.
Light wrapper around `_build_transformer_encoder_block_args` with some
assumptions around self attention block. Builds the arguments for
`TransformerEncoderBlock` used in Perceiver.
The last dimension of the output of the attention block or `output_last_dim`
of `TransformerEncoderBlocks` is set using the logic described in the
doc associated with `output_last_dim` in
`_build_transformer_encoder_block_args`.
`diff_q_kv_att_layer_norm` is set to `False`.
`use_query_residual` is set to `True`.
`inner_dropout` is set to 0.
`norm_first` is set to `True`.
`inner_activation` is set to gelu.
`kernel_initializer` and `attention_initializer` are both
`tf.keras.initializers.VarianceScaling`.
Args:
input_shape:
Check `input_shape` doc in `_build_transformer_encoder_block_args`.
widening_factor:
Check `widening_factor` doc in `_build_transformer_encoder_block_args`.
dropout_prob:
Check `dropout_prob` doc in `_build_transformer_encoder_block_args`.
dropout_attn_prob:
Check `dropout_attn_prob` doc in `_build_transformer_encoder_block_args`.
num_heads:
Check `num_heads` doc in `_build_transformer_encoder_block_args`.
att_init_scale:
Check `att_init_scale` doc in `_build_transformer_encoder_block_args`.
dense_init_scale:
Check `dense_init_scale` doc in `_build_transformer_encoder_block_args`.
norm_epsilon:
Check `norm_epsilon` doc in `_build_transformer_encoder_block_args`.
qk_last_dim:
Check `qk_last_dim` doc in `_build_transformer_encoder_block_args`.
v_last_dim:
Check `v_last_dim` doc in `_build_transformer_encoder_block_args`.
Returns:
A `dict` mapping `TransformerEncoderBlock` arguments.
References:
[Perceiver: General Perception with Iterative
Attention](https://arxiv.org/abs/2103.03206)
(https://github.com/deepmind/deepmind-research/blob/master/perceiver/perceiver.py)
(https://github.com/tensorflow/models/blob/871c4e0a393ef4385534bee55354a5df8aa1ccf4/official/nlp/modeling/layers/transformer_encoder_block.py)
"""
return _build_transformer_encoder_block_args(
input_shape=input_shape,
widening_factor=widening_factor,
dropout_prob=dropout_prob,
dropout_attn_prob=dropout_attn_prob,
num_heads=num_heads,
att_init_scale=att_init_scale,
dense_init_scale=dense_init_scale,
use_query_residual=True,
norm_epsilon=norm_epsilon,
qk_last_dim=qk_last_dim,
v_last_dim=v_last_dim,
diff_q_kv_att_layer_norm=False,
output_last_dim=None)
def _build_transformer_encoder_block_args(
input_shape,
widening_factor,
dropout_prob,
dropout_attn_prob,
num_heads,
att_init_scale,
dense_init_scale,
use_query_residual,
norm_epsilon,
qk_last_dim,
v_last_dim,
diff_q_kv_att_layer_norm,
output_last_dim):
"""Build arguments for `TransformerEncoderBlock`.
`inner_dropout` is set to 0.
`norm_first` is set to `True`.
`inner_activation` is set to gelu.
`kernel_initializer` and `attention_initializer` are both
`tf.keras.initializers.VarianceScaling`.
Args:
input_shape:
input shape(s). Usually passed through `build` method in
`tf.keras.layers.Layer`.
widening_factor:
Multiplier used to widen on the inner layer of the MLP step within a
transformer attention block.
dropout_prob:
Dropout probability for the post-attention and output dropout.
dropout_attn_prob:
Dropout probability for within the attention layer.
num_heads:
Number of attention heads.
att_init_scale:
Scale for the `tf.keras.initializers.VarianceScaling` used in attention
kernel.
dense_init_scale:
Scale for the `tf.keras.initializers.VarianceScaling` used in MLP kernel.
use_query_residual:
Toggle to execute residual connection after attention.
norm_epsilon:
Epsilon value to initialize normalization layers.
qk_last_dim:
When set, determines the last dimension of the attention score output.
When it's `None`, it uses the first `input_shape`'s last dimension as the
last dimension of the attention score output.
v_last_dim:
When set, determines the value's last dimension in the multi-head
attention.
When it's `None`, it uses the `qk_last_dim` for `inner_dim` and
`value_dim`.
If `qk_last_dim` is `None`, the first input_shape's last dimension is used
as the last dimension of the attention score output.
If `output_last_dim` is `None`, `v_last_dim` is used to set the
`TransformerEncoderBlock`'s output's last dimension.
diff_q_kv_att_layer_norm:
If `True`, create a separate attention layer norm layer for query and
key-value if `norm_first` is `True`. Invalid to set to `True` if
`norm_first` is `False`.
output_last_dim:
When set, the value determines the last dimension of the output of the
attention block or `output_last_dim`.
When it's `None`, it uses, in order of decreasing precedence,
`v_last_dim`, `qk_last_dim`, and finally first `input_shape`'s last
dimension. To clarify, if `v_last_dim` or `qk_last_dim` is `None`, the
next order of precedence is used. The value is used to determine the last
dimension of the output of the attention block or `output_last_dim`.
Returns:
A `dict` mapping `TransformerEncoderBlock` arguments.
References:
[Perceiver: General Perception with Iterative
Attention](https://arxiv.org/abs/2103.03206)
(https://github.com/deepmind/deepmind-research/blob/master/perceiver/perceiver.py)
(https://github.com/tensorflow/models/blob/871c4e0a393ef4385534bee55354a5df8aa1ccf4/official/nlp/modeling/layers/transformer_encoder_block.py)
"""
inputs_q_shape = input_shape[0]
# Q and K must have the same number of last dim.
# Default to preserving Q's input's shape.
if qk_last_dim is None:
qk_last_dim = inputs_q_shape[-1]
# V's number of last dim determines the shape of the output of QKV-attention.
# Default to the same number of last dim used in the key-query operation.
if v_last_dim is None:
v_last_dim = qk_last_dim
# Project the output of QKV attention to a desired number of last dim.
# Default to the same number as the output of the QKV attention operation.
if output_last_dim is None:
output_last_dim = v_last_dim
assert qk_last_dim % num_heads == 0
assert v_last_dim % num_heads == 0
qk_last_dim_per_head = qk_last_dim // num_heads
v_last_dim_per_head = v_last_dim // num_heads
return {
"num_attention_heads":
num_heads,
"inner_dim":
output_last_dim * widening_factor,
"inner_activation":
functools.partial(tf.keras.activations.gelu, approximate=True),
"kernel_initializer":
tf.keras.initializers.VarianceScaling(scale=dense_init_scale),
"attention_initializer":
tf.keras.initializers.VarianceScaling(scale=att_init_scale),
"norm_first":
True,
"norm_epsilon":
norm_epsilon,
"output_dropout":
dropout_prob,
"attention_dropout":
dropout_attn_prob,
"inner_dropout":
0.0,
"use_query_residual":
use_query_residual,
"value_dim":
v_last_dim_per_head,
"key_dim":
qk_last_dim_per_head,
"output_last_dim":
output_last_dim,
"diff_q_kv_att_layer_norm":
diff_q_kv_att_layer_norm,
}
| 12,649 | 34.9375 | 146 | py |
models | models-master/official/projects/perceiver/modeling/layers/encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perceiver encode processor."""
import tensorflow as tf
from official.nlp.modeling import layers
from official.projects.perceiver.modeling.layers import utils
class Encoder(tf.keras.layers.Layer):
"""Perceiver Encoder and Processor(s) layer.
This layer implements the Perceiver Encoder and Processor stack from
"Perceiver: General Perception with Iterative Attention".
(https://arxiv.org/abs/2103.03206)
It uses SelfAttention and CrossAttention modules.
It allows the user to choose the initial latent positional encodings.
References:
[Perceiver: General Perception with Iterative
Attention](https://arxiv.org/abs/2103.03206)
(https://github.com/deepmind/deepmind-research/blob/master/perceiver/perceiver.py)
(https://github.com/tensorflow/models/blob/871c4e0a393ef4385534bee55354a5df8aa1ccf4/official/nlp/modeling/layers/transformer_encoder_block.py)
"""
def __init__(self,
self_attention_num_heads=8,
self_attention_widening_factor=1,
cross_attention_num_heads=8,
cross_attention_widening_factor=1,
num_self_attends_per_block=6,
num_blocks=8,
qk_last_dim=None,
v_last_dim=None,
dropout_prob=0.0,
dropout_attn_prob=0.0,
att_init_scale=1.0,
dense_init_scale=1.0,
norm_epsilon=1e-5,
name="encode_processor",
**kwargs):
"""Init.
Args:
self_attention_num_heads:
Number of attention heads in the self-attention transformer block.
self_attention_widening_factor:
Multiplier used to widen on the inner layer of the MLP step within the
self-attention transformer block.
cross_attention_num_heads:
Number of attention heads in the cross-attention transformer block.
cross_attention_widening_factor:
Multiplier used to widen on the inner layer of the MLP step within the
cross-attention transformer block.
num_self_attends_per_block:
Number of different self-attention encoders initialized per latent
perceiver block.
num_blocks:
Number of latent perceiver blocks.
qk_last_dim:
When set, determines the last dimension of the attention score output.
Check `qk_last_dim` doc in `utils.build_cross_attention_block_args` for
more details.
v_last_dim:
It can impact the last dimension size of value projection in mult-head
attention output and `TransformerEncoderBlock`'s output.
For more details, check `v_last_dim` doc in
`utils._build_transformer_encoder_block_args`.
dropout_prob:
Dropout probability for the post-attention and output dropout.
dropout_attn_prob:
Dropout probability for within the attention layer.
att_init_scale:
Scale for the `tf.keras.initializers.VarianceScaling` used in attention
kernel.
dense_init_scale:
Scale for the `tf.keras.initializers.VarianceScaling` used in MLP
kernel.
norm_epsilon:
Epsilon value to initialize normalization layers.
name:
Sets the `tf.keras.layers.Layer` name.
**kwargs:
Any keyword arguments to pass through to `tf.keras.layers.Layer`.
"""
super().__init__(name=name, **kwargs)
self._input_is_1d = True
self._num_self_attends_per_block = num_self_attends_per_block
self._dropout_prob = dropout_prob
self._qk_last_dim = qk_last_dim
self._v_last_dim = v_last_dim
self._norm_epsilon = norm_epsilon
self._dropout_attn_prob = dropout_attn_prob
self._att_init_scale = att_init_scale
self._dense_init_scale = dense_init_scale
self._num_blocks = num_blocks
self._self_attention_widening_factor = self_attention_widening_factor
self._self_attention_num_heads = self_attention_num_heads
self._cross_attention_widening_factor = cross_attention_widening_factor
self._cross_attention_num_heads = cross_attention_num_heads
self._cross_attention_shape_for_attn = "kv"
self._cross_attention_use_query_residual = True
def build(self, input_shape):
embeddings_shape = input_shape[0]
z_shape = input_shape[1]
self._self_attention_encoder_blocks = []
for i in range(self._num_self_attends_per_block):
self._self_attention_encoder_blocks.append(layers.TransformerEncoderBlock(
name=f"self_attention_encoder_{i}",
**utils.build_self_attention_block_args(
(z_shape,),
widening_factor=self._self_attention_widening_factor,
dropout_prob=self._dropout_prob,
dropout_attn_prob=self._dropout_attn_prob,
num_heads=self._self_attention_num_heads,
att_init_scale=self._att_init_scale,
dense_init_scale=self._dense_init_scale,
qk_last_dim=self._qk_last_dim,
v_last_dim=self._v_last_dim,
norm_epsilon=self._norm_epsilon)))
self._cross_attention_encoder_block = layers.TransformerEncoderBlock(
name="cross_attention_encoder",
**utils.build_cross_attention_block_args(
(z_shape, embeddings_shape),
widening_factor=self._cross_attention_widening_factor,
dropout_prob=self._dropout_prob,
dropout_attn_prob=self._dropout_attn_prob,
num_heads=self._cross_attention_num_heads,
att_init_scale=self._att_init_scale,
dense_init_scale=self._dense_init_scale,
shape_for_attn=self._cross_attention_shape_for_attn,
use_query_residual=self._cross_attention_use_query_residual,
norm_epsilon=self._norm_epsilon,
qk_last_dim=self._qk_last_dim,
v_last_dim=self._v_last_dim))
def call(self, inputs, input_mask=None, training=None):
embeddings = inputs[0]
z = inputs[1]
if input_mask is None:
input_mask = tf.ones(tf.shape(embeddings)[:2], dtype=tf.int32)
attention_mask = utils.make_cross_attention_mask(
query_mask=tf.ones(tf.shape(z)[:2], dtype=tf.int32),
kv_mask=input_mask)
z = self._cross_attention_encoder_block(
(z, embeddings, attention_mask),
training=training)
for _ in range(self._num_blocks):
for self_attention_block in self._self_attention_encoder_blocks:
z = self_attention_block(z, training=training)
return z
| 7,101 | 40.532164 | 146 | py |
models | models-master/official/projects/perceiver/modeling/layers/encoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for encoder."""
import numpy as np
import tensorflow as tf
from official.projects.perceiver.modeling.layers import encoder
class EncoderTest(tf.test.TestCase):
def test_layer_creation(self):
test_layer = encoder.Encoder(
self_attention_num_heads=8,
cross_attention_num_heads=8)
sequence_length = 80
embedding_width = 800
lantent_length = 8
latent_width = 80
data_input = tf.keras.Input(
shape=(sequence_length, embedding_width))
latent_input = tf.keras.Input(
shape=(lantent_length, latent_width))
output_tensor = test_layer((data_input, latent_input))
self.assertEqual(
latent_input.shape.as_list(),
output_tensor.shape.as_list())
def test_layer_creation_with_mask(self):
test_layer = encoder.Encoder(
self_attention_num_heads=8,
cross_attention_num_heads=8)
sequence_length = 80
embedding_width = 800
lantent_length = 8
latent_width = 80
data_input = tf.keras.Input(
shape=(sequence_length, embedding_width))
latent_input = tf.keras.Input(
shape=(lantent_length, latent_width))
mask_tensor = tf.keras.Input(
shape=(sequence_length),
dtype=tf.int32)
output_tensor = test_layer(
(data_input, latent_input),
input_mask=mask_tensor)
self.assertEqual(
latent_input.shape.as_list(),
output_tensor.shape.as_list())
def test_layer_invocation(self):
test_layer = encoder.Encoder(
self_attention_num_heads=8,
cross_attention_num_heads=8)
sequence_length = 80
embedding_width = 800
lantent_length = 8
latent_width = 80
data_input = tf.keras.Input(
shape=(sequence_length, embedding_width))
latent_input = tf.keras.Input(
shape=(lantent_length, latent_width))
mask_tensor = tf.keras.Input(
shape=(sequence_length),
dtype=tf.int32)
output_tensor = test_layer(
(data_input, latent_input),
input_mask=mask_tensor)
# Create a model from the test layer.
model = tf.keras.Model(
((data_input, latent_input), mask_tensor),
output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, embedding_width))
mask_data = tf.ones((batch_size, sequence_length), dtype=tf.int32)
latent_data = tf.ones((batch_size, lantent_length, latent_width),
dtype=tf.float32)
_ = model.predict(((input_data, latent_data), mask_data))
def test_self_attention_widening_factor(self):
last_dim = 160
self_attention_widening_factor = 2
test_layer = encoder.Encoder(
self_attention_widening_factor=self_attention_widening_factor,
v_last_dim=last_dim)
some_sequence_length = 80
some_embedding_width = 800
some_lantent_length = 8
some_latent_width = last_dim
data_input = tf.keras.Input(
shape=(some_sequence_length, some_embedding_width))
latent_input = tf.keras.Input(
shape=(some_lantent_length, some_latent_width))
mask_tensor = tf.keras.Input(shape=(some_sequence_length), dtype=tf.int32)
test_layer((data_input, latent_input), input_mask=mask_tensor)
value = test_layer._self_attention_encoder_blocks[
0]._intermediate_dense.get_config()['output_shape'].pop()
self.assertEqual(last_dim * self_attention_widening_factor, value)
def test_cross_attention_widening_factor(self):
last_dim = 160
cross_attention_widening_factor = 2
test_layer = encoder.Encoder(
cross_attention_widening_factor=cross_attention_widening_factor,
v_last_dim=last_dim)
some_sequence_length = 80
some_embedding_width = 800
some_lantent_length = 8
some_latent_width = last_dim
data_input = tf.keras.Input(
shape=(some_sequence_length, some_embedding_width))
latent_input = tf.keras.Input(
shape=(some_lantent_length, some_latent_width))
mask_tensor = tf.keras.Input(shape=(some_sequence_length), dtype=tf.int32)
test_layer((data_input, latent_input), input_mask=mask_tensor)
value = test_layer._cross_attention_encoder_block._intermediate_dense.get_config(
)['output_shape'].pop()
self.assertEqual(last_dim * cross_attention_widening_factor, value)
def test_self_attention_num_heads(self):
# TODO(b/222634115) parameterize test.
self_attention_num_heads = 16
test_layer = encoder.Encoder(
self_attention_num_heads=self_attention_num_heads)
some_sequence_length = 80
some_embedding_width = 800
some_lantent_length = 8
some_latent_width = 64
data_input = tf.keras.Input(
shape=(some_sequence_length, some_embedding_width))
latent_input = tf.keras.Input(
shape=(some_lantent_length, some_latent_width))
mask_tensor = tf.keras.Input(shape=(some_sequence_length), dtype=tf.int32)
test_layer((data_input, latent_input), input_mask=mask_tensor)
value = test_layer._self_attention_encoder_blocks[
0]._attention_layer.get_config()['num_heads']
self.assertEqual(self_attention_num_heads, value)
def test_cross_attention_num_heads(self):
# TODO(b/222634115) parameterize test.
cross_attention_num_heads = 16
test_layer = encoder.Encoder(
cross_attention_num_heads=cross_attention_num_heads)
some_sequence_length = 80
some_embedding_width = 800
some_lantent_length = 8
some_latent_width = 64
data_input = tf.keras.Input(
shape=(some_sequence_length, some_embedding_width))
latent_input = tf.keras.Input(
shape=(some_lantent_length, some_latent_width))
mask_tensor = tf.keras.Input(shape=(some_sequence_length), dtype=tf.int32)
test_layer((data_input, latent_input), input_mask=mask_tensor)
value = test_layer._cross_attention_encoder_block._attention_layer.get_config(
)['num_heads']
self.assertEqual(cross_attention_num_heads, value)
def test_num_self_attends_per_block(self):
# TODO(b/222634115) parameterize test.
num_self_attends_per_block = 3
test_layer = encoder.Encoder(
num_self_attends_per_block=num_self_attends_per_block)
some_sequence_length = 80
some_embedding_width = 800
some_lantent_length = 8
some_latent_width = 64
data_input = tf.keras.Input(
shape=(some_sequence_length, some_embedding_width))
latent_input = tf.keras.Input(
shape=(some_lantent_length, some_latent_width))
mask_tensor = tf.keras.Input(shape=(some_sequence_length), dtype=tf.int32)
test_layer((data_input, latent_input), input_mask=mask_tensor)
self.assertLen(
test_layer._self_attention_encoder_blocks,
num_self_attends_per_block)
# TODO(b/222634115) num_blocks
# TODO(b/222634115) qk_last_dim validations
# TODO(b/222634115) v_last_dim validations
# TODO(b/222634115) dropout_prob validation
# TODO(b/222634115) dropout_attn_prob validation
# TODO(b/222634115) att_init_scale validation
# TODO(b/222634115) dense_init_scale validation
# TODO(b/222634115) cross_attention_use_query_residual validation
# (value passed correctly)
# TODO(b/222634115) norm_epsilon
# TODO(b/222634115) check latent dims
# TODO(b/222634115) make cross att mask validation when input_mask is None
# TODO(b/222634115) make cross att mask validation when input_mask is not None
if __name__ == '__main__':
tf.test.main()
| 8,211 | 36.669725 | 85 | py |
models | models-master/official/projects/perceiver/modeling/layers/decoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perceiver basic decoder."""
import collections
import tensorflow as tf
from official.nlp.modeling import layers
from official.projects.perceiver.modeling.layers import utils
class Decoder(tf.keras.layers.Layer):
"""Perceiver Decoder layer.
Uses cross attention decoder layer.
This layer implements a Perceiver Decoder from
"Perceiver: General Perception with Iterative Attention".
(https://arxiv.org/abs/2103.03206)
References:
[Attention Is All You Need](https://arxiv.org/abs/1706.03762)
[Perceiver: General Perception with Iterative
Attention](https://arxiv.org/abs/2103.03206)
(https://github.com/deepmind/deepmind-research/blob/master/perceiver/perceiver.py)
(https://github.com/tensorflow/models/blob/871c4e0a393ef4385534bee55354a5df8aa1ccf4/official/nlp/modeling/layers/transformer_encoder_block.py)
"""
def __init__(self,
output_last_dim,
qk_last_dim=None,
v_last_dim=None,
use_query_residual=False,
output_w_init=None,
num_heads=1,
name="decoder",
**kwargs):
"""Init.
Args:
output_last_dim:
Last dim size for output.
qk_last_dim:
When set, determines the last dimension of the attention score output.
Check `qk_last_dim` doc in `utils.build_cross_attention_block_args`.
v_last_dim:
When set, determines the value's last dimension in the multi-head
attention.
Check `v_last_dim` doc in `utils._build_transformer_encoder_block_args`.
use_query_residual:
Toggle to execute residual connection after attention.
output_w_init:
Ouptut layer kernel initializer.
num_heads:
Number of attention heads for the `TransformerEncoderBlock`.
name:
Sets the `tf.keras.layers.Layer` name.
**kwargs:
Any keyword arguments to pass through to `tf.keras.layers.Layer`.
"""
super().__init__(name=name, **kwargs)
self._output_last_dim = output_last_dim
self._output_w_init = output_w_init
self._use_query_residual = use_query_residual
self._qk_last_dim = qk_last_dim
self._v_last_dim = v_last_dim
self._final_project = False # Make variable if needed
self._num_heads = num_heads
# Omitted `concat_preprocessed_input` for MLM use-case.
def build(self, input_shape):
"""Build layers using `input_shape`.
Args:
input_shape:
Input shape(s) of the layer call.
"""
decoder_query_shape = input_shape[0]
z_shape = input_shape[1]
self._decoding_cross_attn = layers.TransformerEncoderBlock(
**utils.build_cross_attention_block_args(
(decoder_query_shape, z_shape),
widening_factor=1,
dropout_prob=0.0,
num_heads=self._num_heads,
shape_for_attn="kv",
qk_last_dim=self._qk_last_dim,
v_last_dim=self._v_last_dim,
use_query_residual=self._use_query_residual))
def call(self, inputs, training=None, query_mask=None):
"""Return decoded output of latent vector via the query.
Args:
inputs:
Expect inputs to be a tuple of perceiver's decoder query tensor and
latent tensor (z). For the cross attention block, `z` is the key-value
tensor and decoder query is the query tensor.
Latent tensor comes from the self-attention processing blocks and
decoder query comes from users to query for the desired output.
training:
Flag to indicate training status.
query_mask:
mask used to create the attention mask for the query tensor in the
cross attention block.
Returns:
`tf.Tensor` decoded output of latent vector via the query.
"""
if not isinstance(inputs, collections.abc.Sequence):
raise ValueError("`inputs` must be a sequence.")
if len(inputs) != 2:
raise ValueError("`inputs` must have two elements.")
query, z = inputs
# Cross-attention decoding.
# key, value: B x N x K; query: B x M x K
# Attention maps -> B x N x M
# Output -> B x M x K
# Construct cross attention and linear layer lazily, in case we don't need
# them.
if query_mask is None:
attention_mask = None
else:
attention_mask = utils.make_cross_attention_mask(
query_mask=query_mask,
kv_mask=tf.ones(tf.shape(z)[:2], dtype=tf.int32))
output = self._decoding_cross_attn(
(query, z, attention_mask),
training=training)
return output
| 5,196 | 34.114865 | 146 | py |
models | models-master/official/projects/perceiver/modeling/layers/utils_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utils."""
import tensorflow as tf
from official.projects.perceiver.modeling.layers import utils
class PerceiverUtilsSelfAttentionBlockArgsTest(tf.test.TestCase):
def test_output_last_dim_is_same_as_input_last_dim(self):
q_seq_len = 10
input_last_dim = 30
some_num_heads = 2
some_input_shape = ((2, q_seq_len, input_last_dim),)
args = utils.build_self_attention_block_args(
some_input_shape,
num_heads=some_num_heads)
self.assertEqual(args['output_last_dim'], input_last_dim)
def test_value_dim_is_same_as_input_last_dim_div_num_heads(self):
q_seq_len = 10
input_last_dim = 30
some_num_heads = 2
some_input_shape = ((2, q_seq_len, input_last_dim),)
args = utils.build_self_attention_block_args(
some_input_shape,
num_heads=some_num_heads)
self.assertEqual(args['value_dim'], input_last_dim // some_num_heads)
# TODO(b/222634115) Add tests for `build_self_attention_block_args` for
# better coverage
class PerceiverUtilsCrossAttentionBlockArgsTest(tf.test.TestCase):
def test_1(self):
some_batch_size = 2
q_seq_len = 10
q_input_last_dim = 30
kv_seq_len = 6
kv_input_last_dim = 60
some_num_heads = 2
some_input_shape = (
(some_batch_size, q_seq_len, q_input_last_dim),
(some_batch_size, kv_seq_len, kv_input_last_dim))
args = utils.build_cross_attention_block_args(
some_input_shape,
num_heads=some_num_heads)
self.assertEqual(args['output_last_dim'], q_input_last_dim)
# TODO(b/222634115) Add tests for `build_cross_attention_block_args` for
# better coverage
if __name__ == '__main__':
tf.test.main()
| 2,302 | 30.121622 | 74 | py |
models | models-master/official/projects/perceiver/tasks/pretrain.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Task for perceiver wordpiece tokenized masked language model (MLM)."""
import tensorflow as tf
from official.core import task_factory
from official.modeling import tf_utils
from official.nlp.tasks import masked_lm
from official.projects.perceiver.configs import encoders
from official.projects.perceiver.configs import perceiver
from official.projects.perceiver.modeling.layers import decoder
from official.projects.perceiver.modeling.models import pretrainer
from official.projects.perceiver.modeling.networks import positional_decoder
@task_factory.register_task_cls(perceiver.PretrainConfig)
class PretrainTask(masked_lm.MaskedLMTask):
"""Task for masked language modeling for wordpiece tokenized perceiver."""
def build_model(self, params=None):
"""Creates perceiver pretrainer model architecture.
Args:
params:
The task configuration instance, which can be any of dataclass,
ConfigDict, namedtuple, etc.
Returns:
A model instance.
"""
config = params or self.task_config.model
sequence_encoder_cfg = config.encoder
encoder_network = encoders.build_encoder(sequence_encoder_cfg)
decoder_cfg = config.decoder
decoder_ = decoder.Decoder(decoder_cfg.decoder.as_dict())
mlm_decoder = positional_decoder.PositionalDecoder(
decoder=decoder_,
output_index_dim=decoder_cfg.output_index_dim,
z_index_dim=decoder_cfg.z_index_dim,
d_latents=decoder_cfg.d_latents,
d_model=decoder_cfg.d_model,
position_encoding_intializer_stddev=decoder_cfg
.position_encoding_intializer_stddev)
return pretrainer.Pretrainer(
mlm_activation=tf_utils.get_activation(config.mlm_activation),
mlm_initializer=tf.keras.initializers.TruncatedNormal(
stddev=config.mlm_initializer_range),
encoder=encoder_network,
decoder=mlm_decoder)
| 2,498 | 39.306452 | 76 | py |
models | models-master/official/projects/perceiver/tasks/sentence_prediction_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.tasks.sentence_prediction."""
import functools
import os
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.data import sentence_prediction_dataloader
from official.nlp.tasks import sentence_prediction
from official.projects.perceiver.configs import perceiver
from official.projects.perceiver.tasks import pretrain
from official.projects.perceiver.tasks import sentence_prediction as perceiver_pred
def _create_fake_dataset(output_path, seq_length, num_classes, num_examples):
"""Creates a fake dataset.
Args:
output_path:
output path for the writer to serialize the dataset.
seq_length:
sequence length of the data.
num_classes:
Number of classes in the sentence prediction output. This is used to
determine if the label id feature should be for regression or
classification.
num_examples:
number of fake examples to create.
"""
with tf.io.TFRecordWriter(output_path) as writer:
def create_int_feature(values):
return tf.train.Feature(
int64_list=tf.train.Int64List(value=np.ravel(values)))
def create_float_feature(values):
return tf.train.Feature(
float_list=tf.train.FloatList(value=np.ravel(values)))
for i in range(num_examples):
features = {}
input_ids = np.random.randint(100, size=(seq_length))
features["input_ids"] = create_int_feature(input_ids)
features["input_mask"] = create_int_feature(np.ones_like(input_ids))
features["segment_ids"] = create_int_feature(np.ones_like(input_ids))
features["segment_ids"] = create_int_feature(np.ones_like(input_ids))
features["example_id"] = create_int_feature([i])
if num_classes == 1:
features["label_ids"] = create_float_feature([np.random.random()])
else:
features["label_ids"] = create_int_feature(
[np.random.random_integers(0, num_classes - 1, size=())])
tf_example = tf.train.Example(
features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
class SentencePredictionTaskTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
self._train_data_config = (
sentence_prediction_dataloader.SentencePredictionDataConfig(
input_path="dummy", seq_length=128, global_batch_size=1))
def get_model_config(self, num_classes):
return perceiver.ClassificationConfig(
num_classes=num_classes,
encoder=perceiver.SequenceEncoderConfig(
vocab_size=30_522,
encoder=perceiver.EncoderConfig(
num_self_attends_per_block=2)))
def _run_task(self, config):
task = perceiver_pred.SentencePredictionTask(config)
model = task.build_model()
metrics = task.build_metrics()
strategy = tf.distribute.get_strategy()
dataset = strategy.distribute_datasets_from_function(
functools.partial(task.build_inputs, config.train_data))
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
# model.save(os.path.join(self.get_temp_dir(), "saved_model"))
# TODO(b/222634115) fix save
return task.validation_step(next(iterator), model, metrics=metrics)
def test_task(self):
# Saves a checkpoint.
pretrain_cfg = perceiver.PretrainerConfig(
encoder=perceiver.SequenceEncoderConfig(
vocab_size=30_522,
encoder=perceiver.EncoderConfig(
num_self_attends_per_block=2)))
pretrain_model = pretrain.PretrainTask(
None).build_model(pretrain_cfg)
# The model variables will be created after the forward call.
_ = pretrain_model(pretrain_model.inputs)
ckpt = tf.train.Checkpoint(
model=pretrain_model, **pretrain_model.checkpoint_items)
init_path = ckpt.save(self.get_temp_dir())
# Creates the task.
config = perceiver.SentencePredictionConfig(
init_checkpoint=init_path,
model=self.get_model_config(num_classes=2),
train_data=self._train_data_config)
task = perceiver_pred.SentencePredictionTask(config)
model = task.build_model()
metrics = task.build_metrics()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.initialize(model)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
task.validation_step(next(iterator), model, metrics=metrics)
@parameterized.named_parameters(
{
"testcase_name":
"regression",
"num_classes":
1,
"expected_loss_predicate":
lambda loss: loss > 1.0,
"metric": tf.keras.metrics.MeanSquaredError,
},
{
"testcase_name":
"classification",
"num_classes":
2,
"expected_loss_predicate":
lambda loss: loss < 1.0,
"metric": tf.keras.metrics.SparseCategoricalAccuracy
},
)
def test_metrics_and_losses(self, num_classes, expected_loss_predicate,
metric):
config = perceiver.SentencePredictionConfig(
init_checkpoint=self.get_temp_dir(),
model=self.get_model_config(num_classes),
train_data=self._train_data_config)
task = perceiver_pred.SentencePredictionTask(config)
model = task.build_model()
metrics = task.build_metrics()
self.assertIsInstance(metrics[0], metric)
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
logs = task.validation_step(next(iterator), model, metrics=metrics)
loss = logs["loss"].numpy()
self.assertTrue(expected_loss_predicate(loss))
@parameterized.named_parameters(
{
"testcase_name": "matthews_corrcoef",
"num_classes": 2,
"metric_type": "matthews_corrcoef"
}, {
"testcase_name": "pearson_spearman_corr",
"num_classes": 1,
"metric_type": "pearson_spearman_corr"
})
def test_np_metrics(self, metric_type, num_classes):
config = perceiver.SentencePredictionConfig(
metric_type=metric_type,
init_checkpoint=self.get_temp_dir(),
model=self.get_model_config(num_classes),
train_data=self._train_data_config)
task = perceiver_pred.SentencePredictionTask(config)
model = task.build_model()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
strategy = tf.distribute.get_strategy()
distributed_outputs = strategy.run(
functools.partial(task.validation_step, model=model),
args=(next(iterator),))
outputs = tf.nest.map_structure(strategy.experimental_local_results,
distributed_outputs)
aggregated = task.aggregate_logs(step_outputs=outputs)
aggregated = task.aggregate_logs(state=aggregated, step_outputs=outputs)
self.assertIn(metric_type, task.reduce_aggregated_logs(aggregated))
def test_np_metrics_cola_partial_batch(self):
train_data_path = os.path.join(self.get_temp_dir(), "train.tf_record")
num_examples = 5
global_batch_size = 8
seq_length = 16
_create_fake_dataset(
train_data_path,
seq_length=seq_length,
num_classes=2,
num_examples=num_examples)
train_data_config = (
sentence_prediction_dataloader.SentencePredictionDataConfig(
input_path=train_data_path,
seq_length=seq_length,
is_training=True,
label_type="int",
global_batch_size=global_batch_size,
drop_remainder=False,
include_example_id=True))
config = perceiver.SentencePredictionConfig(
metric_type="matthews_corrcoef",
model=self.get_model_config(2),
train_data=train_data_config)
outputs = self._run_task(config)
self.assertEqual(outputs["sentence_prediction"].shape.as_list(), [8, 1])
@parameterized.named_parameters(
{
"testcase_name": "classification",
"num_classes": 5,
}, {
"testcase_name": "regression",
"num_classes": 1,
})
def test_prediction(self, num_classes):
config = perceiver.SentencePredictionConfig(
model=self.get_model_config(num_classes=num_classes),
train_data=self._train_data_config)
task = perceiver_pred.SentencePredictionTask(config)
model = task.build_model()
test_data_path = os.path.join(self.get_temp_dir(), "test.tf_record")
seq_length = 16
num_examples = 100
_create_fake_dataset(
test_data_path,
seq_length=seq_length,
num_classes=num_classes,
num_examples=num_examples)
test_data_config = (
sentence_prediction_dataloader.SentencePredictionDataConfig(
input_path=test_data_path,
seq_length=seq_length,
is_training=False,
label_type="int" if num_classes > 1 else "float",
global_batch_size=16,
drop_remainder=False,
include_example_id=True))
predictions = sentence_prediction.predict(task, test_data_config, model)
self.assertLen(predictions, num_examples)
for prediction in predictions:
self.assertEqual(prediction.dtype,
tf.int64 if num_classes > 1 else tf.float32)
if __name__ == "__main__":
tf.test.main()
| 10,241 | 35.448399 | 83 | py |
models | models-master/official/projects/perceiver/tasks/sentence_prediction.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sentence prediction (classification) task."""
from official.core import task_factory
from official.nlp.tasks import sentence_prediction
from official.projects.perceiver.configs import encoders
from official.projects.perceiver.configs import perceiver
from official.projects.perceiver.modeling.layers import decoder
from official.projects.perceiver.modeling.models import classifier
from official.projects.perceiver.modeling.networks import positional_decoder
@task_factory.register_task_cls(perceiver.SentencePredictionConfig)
class SentencePredictionTask(sentence_prediction.SentencePredictionTask):
"""Task object for sentence_prediction.
Note: Making this similar to nlp.tasks.sentence_prediction.py to potentially
merge.
"""
def build_model(self):
"""Creates perceiver classification model architecture.
Returns:
A model instance.
"""
encoder_network = encoders.build_encoder(self.task_config.model.encoder)
decoder_config = self.task_config.model.decoder
decoder_ = decoder.Decoder(decoder_config.decoder.as_dict())
classification_decoder = positional_decoder.PositionalDecoder(
decoder=decoder_,
d_model=decoder_config.d_model,
output_index_dim=decoder_config.output_index_dim,
z_index_dim=decoder_config.z_index_dim,
d_latents=decoder_config.d_latents,
position_encoding_intializer_stddev=decoder_config
.position_encoding_intializer_stddev)
return classifier.Classifier(
network=encoder_network,
decoder=classification_decoder,
num_classes=self.task_config.model.num_classes)
| 2,230 | 39.563636 | 78 | py |
models | models-master/official/projects/perceiver/tasks/pretrain_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.tasks.masked_lm."""
import tensorflow as tf
import tensorflow_datasets as tfds
from official.nlp.data import pretrain_dataloader
from official.projects.perceiver.configs import perceiver
from official.projects.perceiver.tasks import pretrain as tasks
_NUM_EXAMPLES = 10
def _gen_fn():
word_ids = tf.constant([1, 1], dtype=tf.int32)
mask = tf.constant([1, 1], dtype=tf.int32)
lm_mask = tf.constant([1, 1], dtype=tf.int32)
return {
'file_name': 'test',
'masked_lm_positions': lm_mask,
'input_word_ids': word_ids,
'input_mask': mask,
}
def _as_dataset(self, *args, **kwargs):
del args
del kwargs
return tf.data.Dataset.from_generator(
lambda: (_gen_fn() for i in range(_NUM_EXAMPLES)),
output_types=self.info.features.dtype,
output_shapes=self.info.features.shape,
)
def _fake_build_inputs(self, params, input_context=None): # pylint: disable=unused-argument
def dummy_data(_):
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
dummy_lm = tf.zeros((1, params.max_predictions_per_seq), dtype=tf.int32)
return dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
masked_lm_positions=dummy_lm,
masked_lm_ids=dummy_lm,
masked_lm_weights=tf.cast(dummy_lm, dtype=tf.float32))
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
class PretrainTaskTest(tf.test.TestCase):
def setUp(self):
super().setUp()
tasks.PretrainTask.build_inputs = _fake_build_inputs
def test_task(self):
config = perceiver.PretrainConfig(
train_data=pretrain_dataloader.BertPretrainDataConfig(
input_path='dummy',
global_batch_size=512,
use_next_sentence_label=False,
use_v2_feature_names=True),
validation_data=pretrain_dataloader.BertPretrainDataConfig(
input_path='dummy',
global_batch_size=512,
is_training=False,
use_next_sentence_label=False,
use_v2_feature_names=True))
task = tasks.PretrainTask(config)
model = task.build_model()
metrics = task.build_metrics()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
task.validation_step(next(iterator), model, metrics=metrics)
# Saves a checkpoint.
_ = tf.train.Checkpoint(model=model, **model.checkpoint_items)
# ckpt.save(config.init_checkpoint)
# TODO(b/222634115) fix ckpt.save
task.initialize(model)
def test_train_step(self):
config = perceiver.PretrainConfig(
train_data=pretrain_dataloader.BertPretrainDataConfig(
input_path='dummy',
global_batch_size=512,
use_next_sentence_label=False,
use_v2_feature_names=True),
validation_data=pretrain_dataloader.BertPretrainDataConfig(
input_path='dummy',
global_batch_size=512,
is_training=False,
use_next_sentence_label=False,
use_v2_feature_names=True))
with tfds.testing.mock_data(as_dataset_fn=_as_dataset):
task = tasks.PretrainTask(config)
model = task.build_model()
dataset = task.build_inputs(config.train_data)
metrics = task.build_metrics()
iterator = iter(dataset)
opt_cfg = perceiver._MLM_WORDPIECE_TRAINER.optimizer_config
optimizer = tasks.PretrainTask.create_optimizer(opt_cfg)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
# TODO(b/222634115) add test coverage.
if __name__ == '__main__':
tf.test.main()
| 4,425 | 32.278195 | 92 | py |
models | models-master/official/projects/mosaic/mosaic_tasks.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Task definition for image semantic segmentation with MOSAIC models."""
from absl import logging
import tensorflow as tf
from official.core import task_factory
from official.projects.mosaic.configs import mosaic_config
from official.projects.mosaic.modeling import mosaic_model
from official.vision.tasks import semantic_segmentation as seg_tasks
@task_factory.register_task_cls(mosaic_config.MosaicSemanticSegmentationTask)
class MosaicSemanticSegmentationTask(seg_tasks.SemanticSegmentationTask):
"""A task for semantic segmentation using MOSAIC model."""
# Note: the `build_model` is overrided to add an additional `train` flag
# for the purpose of indicating the model is built for performing `training`
# or `eval`. This is to make sure the model is initialized with proper
# `input_shape` if the model will be trained and evaluated in different
# `input_shape`. For example, the model is trained with cropping but
# evaluated with original shape.
def build_model(self, training: bool = True) -> tf.keras.Model:
"""Builds MOSAIC segmentation model."""
input_specs = tf.keras.layers.InputSpec(
shape=[None] + self.task_config.model.input_size)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (tf.keras.regularizers.l2(
l2_weight_decay / 2.0) if l2_weight_decay else None)
model = mosaic_model.build_mosaic_segmentation_model(
input_specs=input_specs,
model_config=self.task_config.model,
l2_regularizer=l2_regularizer)
# Note: Create a dummy input and call model instance to initialize.
# This ensures all the layers are built; otherwise some layers may be
# missing from the model and cannot be associated with variables from
# a loaded checkpoint. The input size is determined by whether the model
# is built for performing training or eval.
if training:
input_size = self.task_config.train_data.output_size
crop_size = self.task_config.train_data.crop_size
if crop_size:
input_size = crop_size
else:
input_size = self.task_config.validation_data.output_size
if len(self.task_config.model.input_size) == 3:
input_channel = self.task_config.model.input_size[-1]
else:
input_channel = 3
dummy_input = tf.ones(shape=[1] + input_size + [input_channel])
model(dummy_input)
return model
def initialize(self, model: tf.keras.Model):
"""Loads pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if 'all' in self.task_config.init_checkpoint_modules:
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
ckpt_items = {}
if 'backbone' in self.task_config.init_checkpoint_modules:
ckpt_items.update(backbone=model.backbone)
if 'neck' in self.task_config.init_checkpoint_modules:
ckpt_items.update(neck=model.neck)
ckpt = tf.train.Checkpoint(**ckpt_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
| 4,303 | 40.786408 | 78 | py |
models | models-master/official/projects/mosaic/registry_imports.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All necessary imports for registration on MOSAIC project."""
# pylint: disable=unused-import
from official.projects.mosaic import mosaic_tasks
from official.projects.mosaic.configs import mosaic_config
from official.projects.mosaic.modeling import mosaic_model
from official.projects.mosaic.qat.configs import mosaic_config as mosaic_qat_config
from official.projects.mosaic.qat.tasks import mosaic_tasks as mosaic_qat_tasks
| 1,037 | 46.181818 | 83 | py |
models | models-master/official/projects/mosaic/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training driver for MOSAIC models."""
from absl import app
from absl import flags
import gin
from official.common import distribute_utils
from official.common import flags as tfm_flags
from official.core import base_trainer
from official.core import config_definitions
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
# Import MOSAIC libraries to register the model into tf.vision
# model garden factory.
# pylint: disable=unused-import
from official.projects.mosaic import mosaic_tasks
from official.projects.mosaic import registry_imports as mosaic_registry_imports
from official.vision import registry_imports
# pylint: enable=unused-import
FLAGS = flags.FLAGS
# Note: we overrided the `build_trainer` due to the customized `build_model`
# methods in `MosaicSemanticSegmentationTask.
def _build_mosaic_trainer(params: config_definitions.ExperimentConfig,
task: mosaic_tasks.MosaicSemanticSegmentationTask,
model_dir: str, train: bool,
evaluate: bool) -> base_trainer.Trainer:
"""Creates custom trainer."""
checkpoint_exporter = train_lib.maybe_create_best_ckpt_exporter(
params, model_dir)
model = task.build_model(train)
optimizer = train_utils.create_optimizer(task, params)
trainer = base_trainer.Trainer(
params,
task,
model=model,
optimizer=optimizer,
train=train,
evaluate=evaluate,
checkpoint_exporter=checkpoint_exporter)
return trainer
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu)
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
mosaic_trainer = _build_mosaic_trainer(
task=task,
params=params,
model_dir=model_dir,
train='train' in FLAGS.mode,
evaluate='eval' in FLAGS.mode)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir,
trainer=mosaic_trainer)
train_utils.save_gin_config(FLAGS.mode, model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
flags.mark_flags_as_required(['experiment', 'mode', 'model_dir'])
app.run(main)
| 3,932 | 36.457143 | 80 | py |
models | models-master/official/projects/mosaic/mosaic_tasks_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mosaic task."""
# pylint: disable=unused-import
import os
from absl.testing import parameterized
import orbit
import tensorflow as tf
from official import vision
from official.core import exp_factory
from official.modeling import optimization
from official.projects.mosaic import mosaic_tasks
from official.projects.mosaic.configs import mosaic_config as exp_cfg
from official.vision.dataloaders import tfexample_utils
class MosaicTaskTest(parameterized.TestCase, tf.test.TestCase):
def _create_test_tfrecord(self, tfrecord_file, example, num_samples):
examples = [example] * num_samples
tfexample_utils.dump_to_tfrecord(
record_file=tfrecord_file, tf_examples=examples)
@parameterized.parameters(
('mosaic_mnv35_cityscapes', True),
('mosaic_mnv35_cityscapes', False),
)
def test_semantic_segmentation_task(self, test_config, is_training):
"""Tests mosaic task for training and eval using toy configs."""
input_image_size = [1024, 2048]
test_tfrecord_file = os.path.join(self.get_temp_dir(), 'seg_test.tfrecord')
example = tfexample_utils.create_segmentation_test_example(
image_height=input_image_size[0],
image_width=input_image_size[1],
image_channel=3)
self._create_test_tfrecord(
tfrecord_file=test_tfrecord_file, example=example, num_samples=10)
config = exp_factory.get_exp_config(test_config)
# Modify config to suit local testing
config.task.model.input_size = [None, None, 3]
config.trainer.steps_per_loop = 1
config.task.train_data.global_batch_size = 1
config.task.validation_data.global_batch_size = 1
config.task.train_data.output_size = [1024, 2048]
config.task.validation_data.output_size = [1024, 2048]
config.task.train_data.crop_size = [512, 512]
config.task.train_data.shuffle_buffer_size = 2
config.task.validation_data.shuffle_buffer_size = 2
config.task.validation_data.input_path = test_tfrecord_file
config.task.train_data.input_path = test_tfrecord_file
config.train_steps = 1
config.task.model.num_classes = 256
config.task.model.head.num_classes = 256
config.task.model.head.decoder_projected_filters = [256, 256]
task = mosaic_tasks.MosaicSemanticSegmentationTask(config.task)
model = task.build_model(training=is_training)
metrics = task.build_metrics(training=is_training)
strategy = tf.distribute.get_strategy()
data_config = config.task.train_data if is_training else config.task.validation_data
dataset = orbit.utils.make_distributed_dataset(strategy, task.build_inputs,
data_config)
iterator = iter(dataset)
opt_factory = optimization.OptimizerFactory(config.trainer.optimizer_config)
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
if is_training:
logs = task.train_step(next(iterator), model, optimizer, metrics=metrics)
else:
logs = task.validation_step(next(iterator), model, metrics=metrics)
self.assertIn('loss', logs)
if __name__ == '__main__':
tf.test.main()
| 3,732 | 39.576087 | 88 | py |
models | models-master/official/projects/mosaic/qat/serving/export_saved_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Vision models export binary for serving/inference.
To export a trained checkpoint in saved_model format (shell script):
EXPERIMENT_TYPE = XX
CHECKPOINT_PATH = XX
EXPORT_DIR_PATH = XX
export_saved_model --experiment=${EXPERIMENT_TYPE} \
--export_dir=${EXPORT_DIR_PATH}/ \
--checkpoint_path=${CHECKPOINT_PATH} \
--batch_size=2 \
--input_image_size=224,224
To serve (python):
export_dir_path = XX
input_type = XX
input_images = XX
imported = tf.saved_model.load(export_dir_path)
model_fn = imported.signatures['serving_default']
output = model_fn(input_images)
"""
from absl import app
from absl import flags
from official.core import exp_factory
from official.modeling import hyperparams
from official.projects.mosaic import registry_imports # pylint: disable=unused-import
from official.projects.mosaic.configs import mosaic_config
from official.projects.mosaic.qat.serving import export_module
from official.vision.serving import export_saved_model_lib
FLAGS = flags.FLAGS
_EXPERIMENT = flags.DEFINE_string(
'experiment', None, 'experiment type, e.g. retinanet_resnetfpn_coco')
_EXPORT_DIR = flags.DEFINE_string('export_dir', None, 'The export directory.')
_CHECKPOINT_PATH = flags.DEFINE_string('checkpoint_path', None,
'Checkpoint path.')
_CONFIG_FILE = flags.DEFINE_multi_string(
'config_file',
default=None,
help='YAML/JSON files which specifies overrides. The override order '
'follows the order of args. Note that each file '
'can be used as an override template to override the default parameters '
'specified in Python. If the same parameter is specified in both '
'`--config_file` and `--params_override`, `config_file` will be used '
'first, followed by params_override.')
_PARAMS_OVERRIDE = flags.DEFINE_string(
'params_override', '',
'The JSON/YAML file or string which specifies the parameter to be overriden'
' on top of `config_file` template.')
_BATCH_SIZE = flags.DEFINE_integer('batch_size', None, 'The batch size.')
_IMAGE_TYPE = flags.DEFINE_string(
'input_type', 'image_tensor',
'One of `image_tensor`, `image_bytes`, `tf_example` and `tflite`.')
_INPUT_IMAGE_SIZE = flags.DEFINE_string(
'input_image_size', '224,224',
'The comma-separated string of two integers representing the height,width '
'of the input to the model.')
_EXPORT_CHECKPOINT_SUBDIR = flags.DEFINE_string(
'export_checkpoint_subdir', 'checkpoint',
'The subdirectory for checkpoints.')
_EXPORT_SAVED_MODEL_SUBDIR = flags.DEFINE_string(
'export_saved_model_subdir', 'saved_model',
'The subdirectory for saved model.')
_LOG_MODEL_FLOPS_AND_PARAMS = flags.DEFINE_bool(
'log_model_flops_and_params', False,
'If true, logs model flops and parameters.')
_INPUT_NAME = flags.DEFINE_string(
'input_name', None,
'Input tensor name in signature def. Default at None which'
'produces input tensor name `inputs`.')
def main(_):
params = exp_factory.get_exp_config(_EXPERIMENT.value)
for config_file in _CONFIG_FILE.value or []:
params = hyperparams.override_params_dict(
params, config_file, is_strict=True)
if _PARAMS_OVERRIDE.value:
params = hyperparams.override_params_dict(
params, _PARAMS_OVERRIDE.value, is_strict=True)
params.validate()
params.lock()
input_image_size = [int(x) for x in _INPUT_IMAGE_SIZE.value.split(',')]
if isinstance(params.task, mosaic_config.MosaicSemanticSegmentationTask):
export_module_cls = export_module.MosaicModule
else:
raise TypeError(f'Export module for {type(params.task)} is not supported.')
module = export_module_cls(
params=params,
batch_size=_BATCH_SIZE.value,
input_image_size=input_image_size,
input_type=_IMAGE_TYPE.value,
num_channels=3)
export_saved_model_lib.export_inference_graph(
input_type=_IMAGE_TYPE.value,
batch_size=_BATCH_SIZE.value,
input_image_size=input_image_size,
params=params,
checkpoint_path=_CHECKPOINT_PATH.value,
export_dir=_EXPORT_DIR.value,
export_checkpoint_subdir=_EXPORT_CHECKPOINT_SUBDIR.value,
export_saved_model_subdir=_EXPORT_SAVED_MODEL_SUBDIR.value,
export_module=module,
log_model_flops_and_params=_LOG_MODEL_FLOPS_AND_PARAMS.value,
input_name=_INPUT_NAME.value)
if __name__ == '__main__':
app.run(main)
| 5,064 | 36.798507 | 86 | py |
models | models-master/official/projects/mosaic/qat/serving/export_module.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export modules for QAT model serving/inference."""
import tensorflow as tf
from official.projects.mosaic.modeling import mosaic_model
from official.projects.mosaic.qat.modeling import factory as qat_factory
from official.vision.serving import semantic_segmentation
class MosaicModule(semantic_segmentation.SegmentationModule):
"""MOSAIC Module."""
def _build_model(self) -> tf.keras.Model:
input_specs = tf.keras.layers.InputSpec(shape=[1] +
self._input_image_size + [3])
model = mosaic_model.build_mosaic_segmentation_model(
input_specs=input_specs,
model_config=self.params.task.model,
l2_regularizer=None)
dummy_input = tf.ones(shape=input_specs.shape)
model(dummy_input)
# Check whether "quantization" is in task config to support both
# `quantized` and `non-quantized` version of Mosaic.
if hasattr(self.params.task, "quantization"):
return qat_factory.build_qat_mosaic_model(
model, self.params.task.quantization, input_specs)
return model
| 1,685 | 37.318182 | 74 | py |
models | models-master/official/projects/mosaic/qat/serving/export_tflite.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Binary to convert a saved model to TFLite model for the QAT model."""
from absl import app
from official.projects.mosaic import registry_imports # pylint: disable=unused-import
from official.vision.serving import export_tflite
if __name__ == '__main__':
app.run(export_tflite.main)
| 901 | 35.08 | 86 | py |
models | models-master/official/projects/mosaic/qat/configs/mosaic_config.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mosaic configuration definition."""
import dataclasses
from typing import Optional
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.mosaic.configs import mosaic_config
from official.projects.qat.vision.configs import common
@dataclasses.dataclass
class MosaicSemanticSegmentationTask(
mosaic_config.MosaicSemanticSegmentationTask):
quantization: Optional[common.Quantization] = None
@exp_factory.register_config_factory('mosaic_mnv35_cityscapes_qat')
def mosaic_mnv35_cityscapes() -> cfg.ExperimentConfig:
"""Experiment configuration of image segmentation task with QAT."""
config = mosaic_config.mosaic_mnv35_cityscapes()
task = MosaicSemanticSegmentationTask.from_args(
quantization=common.Quantization(), **config.task.as_dict())
config.task = task
return config
| 1,469 | 36.692308 | 74 | py |
models | models-master/official/projects/mosaic/qat/configs/mosaic_config_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mosaic."""
# pylint: disable=unused-import
from absl.testing import parameterized
import tensorflow as tf
from official import vision
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.mosaic.configs import mosaic_config as exp_cfg
from official.projects.mosaic.qat.configs import mosaic_config as qat_exp_cfg
from official.projects.qat.vision.configs import common
class MosaicConfigTest(tf.test.TestCase, parameterized.TestCase):
def test_mosaic_configs(self):
config = exp_factory.get_exp_config('mosaic_mnv35_cityscapes_qat')
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task,
qat_exp_cfg.MosaicSemanticSegmentationTask)
self.assertIsInstance(config.task.model,
exp_cfg.MosaicSemanticSegmentationModel)
self.assertIsInstance(config.task.quantization, common.Quantization)
config.validate()
config.task.train_data.is_training = None
with self.assertRaisesRegex(KeyError, 'Found inconsistency between key'):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 1,790 | 37.934783 | 77 | py |
models | models-master/official/projects/mosaic/qat/modeling/factory_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for factory.py."""
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.mosaic.modeling import mosaic_blocks
from official.projects.mosaic.modeling import mosaic_head
from official.projects.mosaic.modeling import mosaic_model
from official.projects.mosaic.qat.modeling import factory as qat_factory
from official.projects.qat.vision.configs import common
from official.vision.modeling import backbones
from official.vision.modeling.heads import segmentation_heads
class SegmentationModelBuilderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(128, [4, 8], [3, 2], ['concat_merge', 'sum_merge']),
(128, [1, 4, 8], [3, 2], ['concat_merge', 'sum_merge']),
(128, [1, 4, 8], [3, 2], ['sum_merge', 'sum_merge']),
(128, [1, 4, 8], [3, 2], ['concat_merge', 'concat_merge']),
(512, [1, 4, 8, 16], [3, 2], ['concat_merge', 'sum_merge']),
(256, [4, 8], [3, 2], ['concat_merge', 'sum_merge']),
(256, [1, 4, 8], [3, 2], ['concat_merge', 'sum_merge']),
(256, [1, 4, 8, 16], [3, 2], ['concat_merge', 'sum_merge']),
)
def test_mosaic_segmentation_model(self, input_size, pyramid_pool_bin_nums,
decoder_input_levels,
decoder_stage_merge_styles):
"""Test for building and calling of a MOSAIC segmentation network."""
num_classes = 32
tf.keras.backend.set_image_data_format('channels_last')
backbone = backbones.MobileNet(model_id='MobileNetMultiAVGSeg')
encoder_input_level = 4
# Create a regular FP32 MOSAIC model.
neck = mosaic_blocks.MosaicEncoderBlock(
encoder_input_level=encoder_input_level,
branch_filter_depths=[64, 64],
conv_kernel_sizes=[3, 5],
pyramid_pool_bin_nums=pyramid_pool_bin_nums)
head = mosaic_head.MosaicDecoderHead(
num_classes=num_classes,
decoder_input_levels=decoder_input_levels,
decoder_stage_merge_styles=decoder_stage_merge_styles,
decoder_filters=[64, 64],
decoder_projected_filters=[32, 32])
mask_scoring_head = segmentation_heads.MaskScoring(
num_classes=num_classes,
num_convs=1,
num_filters=32,
fc_dims=128,
num_fcs=2,
fc_input_size=[8, 8],
)
model = mosaic_model.MosaicSegmentationModel(
backbone=backbone,
head=head,
neck=neck,
mask_scoring_head=mask_scoring_head,
)
inputs = np.random.rand(2, input_size, input_size, 3)
input_specs = tf.keras.layers.InputSpec(shape=inputs.shape)
expected_outputs = model(inputs)
# Create a quantized MOSAIC model from the regular FP32 model instance.
quantization_config = common.Quantization()
quantized_model = qat_factory.build_qat_mosaic_model(
model=model,
quantization=quantization_config,
input_specs=input_specs)
actual_output = quantized_model(inputs)
self.assertAllEqual(actual_output['logits'].numpy().shape,
expected_outputs['logits'].numpy().shape)
if __name__ == '__main__':
tf.test.main()
| 3,796 | 37.353535 | 77 | py |
models | models-master/official/projects/mosaic/qat/modeling/factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory methods to build models."""
# Import libraries
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.projects.mosaic.modeling import mosaic_blocks
from official.projects.mosaic.modeling import mosaic_head
from official.projects.mosaic.modeling import mosaic_model
from official.projects.mosaic.qat.modeling.heads import mosaic_head as qat_mosaic_head
from official.projects.mosaic.qat.modeling.layers import nn_blocks as qat_nn_blocks
from official.projects.qat.vision.configs import common
from official.projects.qat.vision.modeling.layers import nn_layers as qat_nn_layers
from official.projects.qat.vision.quantization import helper
from official.projects.qat.vision.quantization import schemes
def build_qat_mosaic_model(
model: tf.keras.Model,
quantization: common.Quantization,
input_specs: tf.keras.layers.InputSpec) -> tf.keras.Model:
"""Applies quantization aware training for mosaic segmentation model.
Args:
model: The model applying quantization aware training.
quantization: The Quantization config.
input_specs: The shape specifications of input tensor.
Returns:
The model that applied optimization techniques.
"""
original_checkpoint = quantization.pretrained_original_checkpoint
if original_checkpoint is not None:
ckpt = tf.train.Checkpoint(model=model, **model.checkpoint_items)
status = ckpt.read(original_checkpoint)
status.expect_partial().assert_existing_objects_matched()
scope_dict = {
'L2': tf.keras.regularizers.l2,
}
model.use_legacy_config = True # Ensures old Keras serialization format
# Apply QAT to backbone (a tf.keras.Model) first, and then neck and head.
with tfmot.quantization.keras.quantize_scope(scope_dict):
annotated_backbone = tfmot.quantization.keras.quantize_annotate_model(
model.backbone)
optimized_backbone = tfmot.quantization.keras.quantize_apply(
annotated_backbone, scheme=schemes.Default8BitQuantizeScheme())
# Check for valid encoder and head.
if not isinstance(model.head, mosaic_head.MosaicDecoderHead):
raise ValueError('Only support MosaicDecoderHead for head.')
if not isinstance(model.neck, mosaic_blocks.MosaicEncoderBlock):
raise ValueError('Only support MosaicEncoderBlock for encoder.')
head = qat_mosaic_head.MosaicDecoderHeadQuantized.from_config(
model.head.get_config())
neck = qat_nn_blocks.MosaicEncoderBlockQuantized.from_config(
model.neck.get_config())
mask_scoring_head = None
if model.mask_scoring_head is not None:
mask_scoring_head = qat_nn_layers.MaskScoringQuantized.from_config(
model.mask_scoring_head.get_config()
)
optimized_model = mosaic_model.MosaicSegmentationModel(
backbone=optimized_backbone,
head=head,
neck=neck,
mask_scoring_head=mask_scoring_head,
)
dummpy_input = tf.zeros([1] + list(input_specs.shape[1:]))
optimized_model(dummpy_input, training=True)
helper.copy_original_weights(model.head, optimized_model.head)
helper.copy_original_weights(model.neck, optimized_model.neck)
if model.mask_scoring_head is not None:
helper.copy_original_weights(
model.mask_scoring_head, optimized_model.mask_scoring_head
)
return optimized_model
| 3,920 | 38.21 | 86 | py |
models | models-master/official/projects/mosaic/qat/modeling/layers/nn_blocks_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for nn_blocks."""
from typing import Any, Iterable, Tuple
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.projects.mosaic.qat.modeling.layers import nn_blocks
def distribution_strategy_combinations() -> Iterable[Tuple[Any, ...]]:
"""Returns the combinations of end-to-end tests to run."""
return combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
)
class NNBlocksTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(nn_blocks.MultiKernelGroupConvBlockQuantized, [32, 64]),
(nn_blocks.MultiKernelGroupConvBlockQuantized, [64, 128]),
)
def test_multi_kernel_grouped_convolution_block_creation(
self, block_fn, output_filter_depths):
input_size = 32
inputs = tf.keras.Input(shape=(input_size, input_size, 16), batch_size=1)
block = block_fn(
output_filter_depths=output_filter_depths, kernel_sizes=[3, 3])
features = block(inputs)
self.assertAllEqual([1, input_size, input_size,
sum(output_filter_depths)], features.shape.as_list())
@parameterized.parameters(
(nn_blocks.MosaicEncoderBlockQuantized, [32, 64], [3, 3], [2, 2]),
(nn_blocks.MosaicEncoderBlockQuantized, [64, 128], [3, 1], [2, 4]),
(nn_blocks.MosaicEncoderBlockQuantized, [128, 256], [1, 1], [1, 1]),
(nn_blocks.MosaicEncoderBlockQuantized, [128, 256], [3, 3], [4, 4]),
)
def test_mosaic_encoder_block_creation(self, block_fn, branch_filter_depths,
conv_kernel_sizes,
pyramid_pool_bin_nums):
input_size = 128
in_filters = 24
inputs = tf.keras.Input(
shape=(input_size, input_size, in_filters), batch_size=1)
block = block_fn(
branch_filter_depths=branch_filter_depths,
conv_kernel_sizes=conv_kernel_sizes,
pyramid_pool_bin_nums=pyramid_pool_bin_nums)
features = block(inputs)
self.assertAllEqual([1, input_size, input_size,
sum(branch_filter_depths)], features.shape.as_list())
@parameterized.parameters(
(nn_blocks.DecoderSumMergeBlockQuantized, 32, [128, 64]),
(nn_blocks.DecoderSumMergeBlockQuantized, 16, [32, 32]),
)
def test_decoder_sum_merge_block_creation(self, block_fn,
decoder_projected_depth,
output_size):
inputs = (tf.keras.Input(shape=(64, 64, 128), batch_size=1),
tf.keras.Input(shape=(16, 16, 256), batch_size=1))
block = block_fn(
decoder_projected_depth=decoder_projected_depth,
output_size=output_size)
features = block(inputs)
self.assertAllEqual(
[1, output_size[0], output_size[1], decoder_projected_depth],
features.shape.as_list())
@parameterized.parameters(
(nn_blocks.DecoderConcatMergeBlockQuantized, 64, 32, [128, 64]),
(nn_blocks.DecoderConcatMergeBlockQuantized, 256, 16, [32, 32]),
)
def test_decoder_concat_merge_block_creation(self, block_fn,
decoder_internal_depth,
decoder_projected_depth,
output_size):
inputs = (tf.keras.Input(shape=(64, 64, 128), batch_size=1),
tf.keras.Input(shape=(16, 16, 256), batch_size=1))
block = block_fn(
decoder_internal_depth=decoder_internal_depth,
decoder_projected_depth=decoder_projected_depth,
output_size=output_size)
features = block(inputs)
self.assertAllEqual(
[1, output_size[0], output_size[1], decoder_projected_depth],
features.shape.as_list())
if __name__ == '__main__':
tf.test.main()
| 4,687 | 37.743802 | 78 | py |
models | models-master/official/projects/mosaic/qat/modeling/layers/nn_blocks.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains quantized neural blocks for the QAT."""
from typing import Dict, Tuple, Union
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.modeling import tf_utils
from official.projects.mosaic.modeling import mosaic_blocks
from official.projects.qat.vision.quantization import configs
from official.projects.qat.vision.quantization import helper
@tf.keras.utils.register_keras_serializable(package='Vision')
class MultiKernelGroupConvBlockQuantized(mosaic_blocks.MultiKernelGroupConvBlock
):
"""A quantized multi-kernel grouped convolution block.
This block is used in the segmentation neck introduced in MOSAIC.
Reference:
[MOSAIC: Mobile Segmentation via decoding Aggregated Information and encoded
Context](https://arxiv.org/pdf/2112.11623.pdf)
"""
def build(self, input_shape: tf.TensorShape) -> None:
"""Builds the block with the given input shape."""
input_channels = input_shape[self._group_split_axis]
if input_channels % self._num_groups != 0:
raise ValueError('The number of input channels must be divisible by '
'the number of groups for evenly group split.')
# Override the activation and bn with their quantized version.
self._activation_fn = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(self._activation, use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig())
norm_layer = (
tf.keras.layers.experimental.SyncBatchNormalization
if self._use_sync_bn else tf.keras.layers.BatchNormalization)
norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer)
norm_no_quantize = helper.BatchNormalizationNoQuantized(norm_layer)
self._bn_op = helper.norm_by_activation(
self._activation, norm_with_quantize, norm_no_quantize)
self._conv_branches = []
if self._use_depthwise_convolution:
for i, conv_kernel_size in enumerate(self._kernel_sizes):
depthwise_conv = helper.DepthwiseConv2DQuantized(
kernel_size=(conv_kernel_size, conv_kernel_size),
depth_multiplier=1,
padding='same',
depthwise_regularizer=self._kernel_regularizer,
depthwise_initializer=self._kernel_initializer,
use_bias=False,
activation=helper.NoOpActivation())
# Add BN->RELU after depthwise convolution.
batchnorm_op_depthwise = self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon)
activation_depthwise = self._activation_fn
feature_conv = helper.Conv2DQuantized(
filters=self._output_filter_depths[i],
kernel_size=(1, 1),
padding='same',
kernel_regularizer=self._kernel_regularizer,
kernel_initializer=self._kernel_initializer,
activation=helper.NoOpActivation(),
use_bias=False)
batchnorm_op = self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon)
# Use list manually as current QAT API does not support sequential model
# within a tf.keras.Sequential block, e.g. conv_branch =
# tf.keras.Sequential([depthwise_conv, feature_conv, batchnorm_op,])
conv_branch = [
depthwise_conv,
batchnorm_op_depthwise,
activation_depthwise,
feature_conv,
batchnorm_op,
]
self._conv_branches.append(conv_branch)
else:
for i, conv_kernel_size in enumerate(self._kernel_sizes):
norm_conv = helper.Conv2DQuantized(
filters=self._output_filter_depths[i],
kernel_size=(conv_kernel_size, conv_kernel_size),
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
activation=helper.NoOpActivation(),
use_bias=False)
batchnorm_op = self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon)
conv_branch = [norm_conv, batchnorm_op]
self._conv_branches.append(conv_branch)
self._concat_groups = helper.ConcatenateQuantized(
axis=self._group_split_axis)
@tf.keras.utils.register_keras_serializable(package='Vision')
class MosaicEncoderBlockQuantized(mosaic_blocks.MosaicEncoderBlock):
"""Implements the encoder module/block of MOSAIC model.
Spatial Pyramid Pooling and Multi-kernel Conv layer
SpatialPyramidPoolingMultiKernelConv
References:
[MOSAIC: Mobile Segmentation via decoding Aggregated Information and encoded
context](https://arxiv.org/pdf/2112.11623.pdf)
"""
def build(
self, input_shape: Union[tf.TensorShape, Dict[str,
tf.TensorShape]]) -> None:
"""Builds this MOSAIC encoder block with the given single input shape."""
input_shape = (
input_shape[self._encoder_input_level]
if isinstance(input_shape, dict) else input_shape)
self._data_format = tf.keras.backend.image_data_format()
if self._data_format == 'channels_last':
height = input_shape[1]
width = input_shape[2]
else:
height = input_shape[2]
width = input_shape[3]
self._global_pool_branch = None
self._spatial_pyramid = []
# Override the activation and bn with their quantized version.
self._activation_fn = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(self._activation, use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig())
norm_layer = (
tf.keras.layers.experimental.SyncBatchNormalization
if self._use_sync_bn else tf.keras.layers.BatchNormalization)
norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer)
norm_no_quantize = helper.BatchNormalizationNoQuantized(norm_layer)
self._bn_op = helper.norm_by_activation(
self._activation, norm_with_quantize, norm_no_quantize)
for pyramid_pool_bin_num in self._pyramid_pool_bin_nums:
if pyramid_pool_bin_num == 1:
global_pool = helper.GlobalAveragePooling2DQuantized(
data_format=self._data_format, keepdims=True)
global_projection = helper.Conv2DQuantized(
filters=max(self._branch_filter_depths),
kernel_size=(1, 1),
padding='same',
activation=helper.NoOpActivation(),
kernel_regularizer=self._kernel_regularizer,
kernel_initializer=self._kernel_initializer,
use_bias=False)
batch_norm_global_branch = self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon)
# Use list manually instead of tf.keras.Sequential([])
self._global_pool_branch = [
global_pool,
global_projection,
batch_norm_global_branch,
]
else:
if height < pyramid_pool_bin_num or width < pyramid_pool_bin_num:
raise ValueError('The number of pooling bins must be smaller than '
'input sizes.')
assert pyramid_pool_bin_num >= 2, (
'Except for the gloabl pooling, the number of bins in pyramid '
'pooling must be at least two.')
pool_height, stride_height = self._get_bin_pool_kernel_and_stride(
height, pyramid_pool_bin_num)
pool_width, stride_width = self._get_bin_pool_kernel_and_stride(
width, pyramid_pool_bin_num)
bin_pool_level = helper.AveragePooling2DQuantized(
pool_size=(pool_height, pool_width),
strides=(stride_height, stride_width),
padding='valid',
data_format=self._data_format)
self._spatial_pyramid.append(bin_pool_level)
# Grouped multi-kernel Convolution.
self._multi_kernel_group_conv = MultiKernelGroupConvBlockQuantized(
output_filter_depths=self._branch_filter_depths,
kernel_sizes=self._conv_kernel_sizes,
use_sync_bn=self._use_sync_bn,
batchnorm_momentum=self._batchnorm_momentum,
batchnorm_epsilon=self._batchnorm_epsilon,
activation=self._activation,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_depthwise_convolution=self._use_depthwise_convolution)
# Encoder's final 1x1 feature projection.
# Considering the relatively large #channels merged before projection,
# enlarge the projection #channels to the sum of the filter depths of
# branches.
self._output_channels = sum(self._branch_filter_depths)
# Use list manually instead of tf.keras.Sequential([]).
self._encoder_projection = [
helper.Conv2DQuantized(
filters=self._output_channels,
kernel_size=(1, 1),
padding='same',
activation=helper.NoOpActivation(),
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_bias=False),
self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon),
]
# Use the TF2 default feature alignment rule for bilinear resizing.
self._upsample = helper.ResizingQuantized(
height,
width,
interpolation=self._interpolation,
crop_to_aspect_ratio=False)
self._concat_layer = helper.ConcatenateQuantized(axis=self._channel_axis)
@tf.keras.utils.register_keras_serializable(package='Vision')
class DecoderSumMergeBlockQuantized(mosaic_blocks.DecoderSumMergeBlock):
"""Implements the decoder feature sum merge block of MOSAIC model.
This block is used in the decoder of segmentation head introduced in MOSAIC.
It essentially merges a high-resolution feature map of a low semantic level
and a low-resolution feature map of a higher semantic level by 'Sum-Merge'.
"""
def build(
self,
input_shape: Tuple[tf.TensorShape, tf.TensorShape]) -> None:
"""Builds the block with the given input shape."""
# Assume backbone features of the same level are concated before input.
low_res_input_shape = input_shape[0]
high_res_input_shape = input_shape[1]
low_res_channels = low_res_input_shape[self._channel_axis]
high_res_channels = high_res_input_shape[self._channel_axis]
# Override the activation and bn with their quantized version.
self._activation_fn = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(self._activation, use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig())
norm_layer = (
tf.keras.layers.experimental.SyncBatchNormalization
if self._use_sync_bn else tf.keras.layers.BatchNormalization)
norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer)
norm_no_quantize = helper.BatchNormalizationNoQuantized(norm_layer)
self._bn_op = helper.norm_by_activation(
self._activation, norm_with_quantize, norm_no_quantize)
if low_res_channels != self._decoder_projected_depth:
low_res_feature_conv = helper.Conv2DQuantized(
filters=self._decoder_projected_depth,
kernel_size=(1, 1),
padding='same',
kernel_regularizer=self._kernel_regularizer,
kernel_initializer=self._kernel_initializer,
activation=helper.NoOpActivation(),
use_bias=False)
batchnorm_op = self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon)
self._low_res_branch = [
low_res_feature_conv,
batchnorm_op,
]
if high_res_channels != self._decoder_projected_depth:
high_res_feature_conv = helper.Conv2DQuantized(
filters=self._decoder_projected_depth,
kernel_size=(1, 1),
padding='same',
kernel_regularizer=self._kernel_regularizer,
kernel_initializer=self._kernel_initializer,
activation=helper.NoOpActivation(),
use_bias=False)
batchnorm_op_high = self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon)
self._high_res_branch = [
high_res_feature_conv,
batchnorm_op_high,
]
# Resize feature maps.
if tf.keras.backend.image_data_format() == 'channels_last':
low_res_height = low_res_input_shape[1]
low_res_width = low_res_input_shape[2]
high_res_height = high_res_input_shape[1]
high_res_width = high_res_input_shape[2]
else:
low_res_height = low_res_input_shape[2]
low_res_width = low_res_input_shape[3]
high_res_height = high_res_input_shape[2]
high_res_width = high_res_input_shape[3]
if (self._output_size[0] == 0 or self._output_size[1] == 0):
self._output_size = (high_res_height, high_res_width)
if (low_res_height != self._output_size[0] or
low_res_width != self._output_size[1]):
self._upsample_low_res = helper.ResizingQuantized(
self._output_size[0],
self._output_size[1],
interpolation=self._interpolation,
crop_to_aspect_ratio=False)
if (high_res_height != self._output_size[0] or
high_res_width != self._output_size[1]):
self._upsample_high_res = helper.ResizingQuantized(
self._output_size[0],
self._output_size[1],
interpolation=self._interpolation,
crop_to_aspect_ratio=False)
self._add_layer = tfmot.quantization.keras.QuantizeWrapperV2(
tf.keras.layers.Add(), configs.Default8BitQuantizeConfig([], [], True))
@tf.keras.utils.register_keras_serializable(package='Vision')
class DecoderConcatMergeBlockQuantized(mosaic_blocks.DecoderConcatMergeBlock):
"""Implements the decoder feature concat merge block of MOSAIC model.
This block is used in the decoder of segmentation head introduced in MOSAIC.
It essentially merges a high-resolution feature map of a low semantic level
and a low-resolution feature of a higher semantic level by 'Concat-Merge'.
"""
def build(
self,
input_shape: Tuple[tf.TensorShape, tf.TensorShape]) -> None:
"""Builds this block with the given input shape."""
# Assume backbone features of the same level are concated before input.
low_res_input_shape = input_shape[0]
high_res_input_shape = input_shape[1]
# Set up resizing feature maps before concat.
if tf.keras.backend.image_data_format() == 'channels_last':
low_res_height = low_res_input_shape[1]
low_res_width = low_res_input_shape[2]
high_res_height = high_res_input_shape[1]
high_res_width = high_res_input_shape[2]
else:
low_res_height = low_res_input_shape[2]
low_res_width = low_res_input_shape[3]
high_res_height = high_res_input_shape[2]
high_res_width = high_res_input_shape[3]
self._concat_layer = helper.ConcatenateQuantized(axis=self._channel_axis)
# Override the activation and bn with their quantized version.
self._activation_fn = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(self._activation, use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig())
norm_layer = (
tf.keras.layers.experimental.SyncBatchNormalization
if self._use_sync_bn else tf.keras.layers.BatchNormalization)
norm_with_quantize = helper.BatchNormalizationQuantized(norm_layer)
norm_no_quantize = helper.BatchNormalizationNoQuantized(norm_layer)
self._bn_op = helper.norm_by_activation(
self._activation, norm_with_quantize, norm_no_quantize)
if (self._output_size[0] == 0 or self._output_size[1] == 0):
self._output_size = (high_res_height, high_res_width)
if (low_res_height != self._output_size[0] or
low_res_width != self._output_size[1]):
self._upsample_low_res = helper.ResizingQuantized(
self._output_size[0],
self._output_size[1],
interpolation=self._interpolation,
crop_to_aspect_ratio=False)
if (high_res_height != self._output_size[0] or
high_res_width != self._output_size[1]):
self._upsample_high_res = helper.ResizingQuantized(
self._output_size[0],
self._output_size[1],
interpolation=self._interpolation,
crop_to_aspect_ratio=False)
# Set up a 3-layer separable convolution blocks, i.e.
# 1x1->BN->RELU + Depthwise->BN->RELU + 1x1->BN->RELU.
initial_feature_conv = helper.Conv2DQuantized(
filters=self._decoder_internal_depth,
kernel_size=(1, 1),
padding='same',
kernel_regularizer=self._kernel_regularizer,
kernel_initializer=self._kernel_initializer,
activation=helper.NoOpActivation(),
use_bias=False)
batchnorm_op1 = self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon)
activation1 = self._activation_fn
depthwise_conv = helper.DepthwiseConv2DQuantized(
kernel_size=(3, 3),
depth_multiplier=1,
padding='same',
depthwise_regularizer=self._kernel_regularizer,
depthwise_initializer=self._kernel_initializer,
use_bias=False,
activation=helper.NoOpActivation())
batchnorm_op2 = self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon)
activation2 = self._activation_fn
project_feature_conv = helper.Conv2DQuantized(
filters=self._decoder_projected_depth,
kernel_size=(1, 1),
padding='same',
kernel_regularizer=self._kernel_regularizer,
kernel_initializer=self._kernel_initializer,
activation=helper.NoOpActivation(),
use_bias=False)
batchnorm_op3 = self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon)
activation3 = self._activation_fn
self._feature_fusion_block = [
initial_feature_conv,
batchnorm_op1,
activation1,
depthwise_conv,
batchnorm_op2,
activation2,
project_feature_conv,
batchnorm_op3,
activation3,
]
self._concat_layer = helper.ConcatenateQuantized(axis=self._channel_axis)
| 19,266 | 41.910913 | 80 | py |
models | models-master/official/projects/mosaic/qat/modeling/heads/mosaic_head.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of segmentation head of the MOSAIC model."""
from typing import List, Optional
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.modeling import tf_utils
from official.projects.mosaic.modeling import mosaic_head
from official.projects.mosaic.qat.modeling.layers import nn_blocks
from official.projects.qat.vision.quantization import configs
from official.projects.qat.vision.quantization import helper
@tf.keras.utils.register_keras_serializable(package='Vision')
class MosaicDecoderHeadQuantized(mosaic_head.MosaicDecoderHead):
"""Creates a quantized MOSAIC decoder in segmentation head.
Reference:
[MOSAIC: Mobile Segmentation via decoding Aggregated Information and encoded
Context](https://arxiv.org/pdf/2112.11623.pdf)
"""
def __init__(
self,
num_classes: int,
decoder_input_levels: Optional[List[str]] = None,
decoder_stage_merge_styles: Optional[List[str]] = None,
decoder_filters: Optional[List[int]] = None,
decoder_projected_filters: Optional[List[int]] = None,
encoder_end_level: Optional[int] = 4,
use_additional_classifier_layer: bool = False,
classifier_kernel_size: int = 1,
activation: str = 'relu',
use_sync_bn: bool = False,
batchnorm_momentum: float = 0.99,
batchnorm_epsilon: float = 0.001,
kernel_initializer: str = 'GlorotUniform',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
interpolation: str = 'bilinear',
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initializes a MOSAIC segmentation head.
Args:
num_classes: An `int` number of mask classification categories. The number
of classes does not include background class.
decoder_input_levels: A list of `str` specifying additional
input levels from the backbone outputs for mask refinement in decoder.
decoder_stage_merge_styles: A list of `str` specifying the merge style at
each stage of the decoder, merge styles can be 'concat_merge' or
'sum_merge'.
decoder_filters: A list of integers specifying the number of channels used
at each decoder stage. Note: this only has affects if the decoder merge
style is 'concat_merge'.
decoder_projected_filters: A list of integers specifying the number of
projected channels at the end of each decoder stage.
encoder_end_level: An optional integer specifying the output level of the
encoder stage, which is used if the input from the encoder to the
decoder head is a dictionary.
use_additional_classifier_layer: A `bool` specifying whether to use an
additional classifier layer or not. It must be True if the final decoder
projected filters does not match the `num_classes`.
classifier_kernel_size: An `int` number to specify the kernel size of the
classifier layer.
activation: A `str` that indicates which activation is used, e.g. 'relu',
'swish', etc.
use_sync_bn: A `bool` that indicates whether to use synchronized batch
normalization across different replicas.
batchnorm_momentum: A `float` of normalization momentum for the moving
average.
batchnorm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_initializer: Kernel initializer for conv layers. Defaults to
`glorot_uniform`.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
interpolation: The interpolation method for upsampling. Defaults to
`bilinear`.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
**kwargs: Additional keyword arguments to be passed.
"""
super().__init__(
num_classes=num_classes,
decoder_input_levels=decoder_input_levels,
decoder_stage_merge_styles=decoder_stage_merge_styles,
decoder_filters=decoder_filters,
decoder_projected_filters=decoder_projected_filters,
encoder_end_level=encoder_end_level,
use_additional_classifier_layer=use_additional_classifier_layer,
classifier_kernel_size=classifier_kernel_size,
activation=activation,
use_sync_bn=use_sync_bn,
batchnorm_momentum=batchnorm_momentum,
batchnorm_epsilon=batchnorm_epsilon,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
interpolation=interpolation,
bias_regularizer=bias_regularizer,
**kwargs)
# Assuming decoder_input_levels and the following lists are sorted and
# follow the same order.
if decoder_input_levels is None:
decoder_input_levels = ['3', '2']
if decoder_stage_merge_styles is None:
decoder_stage_merge_styles = ['concat_merge', 'sum_merge']
if decoder_filters is None:
decoder_filters = [64, 64]
if decoder_projected_filters is None:
decoder_projected_filters = [32, 32]
self._decoder_input_levels = decoder_input_levels
self._decoder_stage_merge_styles = decoder_stage_merge_styles
self._decoder_filters = decoder_filters
self._decoder_projected_filters = decoder_projected_filters
if (len(decoder_input_levels) != len(decoder_stage_merge_styles) or
len(decoder_input_levels) != len(decoder_filters) or
len(decoder_input_levels) != len(decoder_projected_filters)):
raise ValueError('The number of Decoder inputs and settings must match.')
self._merge_stages = []
for (stage_merge_style, decoder_filter,
decoder_projected_filter) in zip(decoder_stage_merge_styles,
decoder_filters,
decoder_projected_filters):
if stage_merge_style == 'concat_merge':
concat_merge_stage = nn_blocks.DecoderConcatMergeBlockQuantized(
decoder_internal_depth=decoder_filter,
decoder_projected_depth=decoder_projected_filter,
output_size=(0, 0),
use_sync_bn=use_sync_bn,
batchnorm_momentum=batchnorm_momentum,
batchnorm_epsilon=batchnorm_epsilon,
activation=activation,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
interpolation=interpolation)
self._merge_stages.append(concat_merge_stage)
elif stage_merge_style == 'sum_merge':
sum_merge_stage = nn_blocks.DecoderSumMergeBlockQuantized(
decoder_projected_depth=decoder_projected_filter,
output_size=(0, 0),
use_sync_bn=use_sync_bn,
batchnorm_momentum=batchnorm_momentum,
batchnorm_epsilon=batchnorm_epsilon,
activation=activation,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
interpolation=interpolation)
self._merge_stages.append(sum_merge_stage)
else:
raise ValueError(
'A stage merge style in MOSAIC Decoder can only be concat_merge '
'or sum_merge.')
# Concat merge or sum merge does not require an additional classifer layer
# unless the final decoder projected filter does not match num_classes.
final_decoder_projected_filter = decoder_projected_filters[-1]
if (final_decoder_projected_filter != num_classes and
not use_additional_classifier_layer):
raise ValueError('Additional classifier layer is needed if final decoder '
'projected filters does not match num_classes!')
self._use_additional_classifier_layer = use_additional_classifier_layer
if use_additional_classifier_layer:
# This additional classification layer uses different kernel
# initializers and bias compared to earlier blocks.
self._pixelwise_classifier = helper.Conv2DQuantized(
name='pixelwise_classifier',
filters=num_classes,
kernel_size=classifier_kernel_size,
padding='same',
bias_initializer=tf.zeros_initializer(),
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activation=helper.NoOpActivation(),
use_bias=True)
self._activation_fn = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(activation, use_keras_layer=True),
configs.Default8BitActivationQuantizeConfig())
self._config_dict = {
'num_classes': num_classes,
'decoder_input_levels': decoder_input_levels,
'decoder_stage_merge_styles': decoder_stage_merge_styles,
'decoder_filters': decoder_filters,
'decoder_projected_filters': decoder_projected_filters,
'encoder_end_level': encoder_end_level,
'use_additional_classifier_layer': use_additional_classifier_layer,
'classifier_kernel_size': classifier_kernel_size,
'activation': activation,
'use_sync_bn': use_sync_bn,
'batchnorm_momentum': batchnorm_momentum,
'batchnorm_epsilon': batchnorm_epsilon,
'kernel_initializer': kernel_initializer,
'kernel_regularizer': kernel_regularizer,
'interpolation': interpolation,
'bias_regularizer': bias_regularizer
}
| 10,060 | 46.457547 | 80 | py |
models | models-master/official/projects/mosaic/qat/modeling/heads/mosaic_head_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mosaic_head."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.projects.mosaic.qat.modeling.heads import mosaic_head
class MosaicBlocksTest(parameterized.TestCase, tf.test.TestCase):
def test_mosaic_head(self):
decoder_head = mosaic_head.MosaicDecoderHeadQuantized(
num_classes=32,
decoder_input_levels=['3', '2'],
decoder_stage_merge_styles=['concat_merge', 'sum_merge'],
decoder_filters=[64, 64],
decoder_projected_filters=[32, 32])
inputs = [
tf.ones([1, 32, 32, 128]), {
'2': tf.ones([1, 128, 128, 64]),
'3': tf.ones([1, 64, 64, 192])
}
]
outputs = decoder_head(inputs)
self.assertAllEqual(outputs.shape, [1, 128, 128, 32])
def test_mosaic_head_3laterals(self):
decoder_head = mosaic_head.MosaicDecoderHeadQuantized(
num_classes=32,
decoder_input_levels=['3', '2', '1'],
decoder_stage_merge_styles=[
'concat_merge', 'concat_merge', 'sum_merge'
],
decoder_filters=[64, 64, 64],
decoder_projected_filters=[32, 32, 32])
inputs = [
tf.ones([1, 32, 32, 128]), {
'1': tf.ones([1, 256, 256, 64]),
'2': tf.ones([1, 128, 128, 64]),
'3': tf.ones([1, 64, 64, 192])
}
]
outputs = decoder_head(inputs)
self.assertAllEqual(outputs.shape, [1, 256, 256, 32])
if __name__ == '__main__':
tf.test.main()
| 2,106 | 31.921875 | 74 | py |
models | models-master/official/projects/mosaic/qat/tasks/mosaic_tasks.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Semantic segmentation task definition."""
import tensorflow as tf
from official.core import task_factory
from official.projects.mosaic import mosaic_tasks
from official.projects.mosaic.qat.configs import mosaic_config as exp_cfg
from official.projects.mosaic.qat.modeling import factory
@task_factory.register_task_cls(exp_cfg.MosaicSemanticSegmentationTask)
class MosaicSemanticSegmentationTask(mosaic_tasks.MosaicSemanticSegmentationTask
):
"""A task for semantic segmentation with QAT."""
def build_model(self, training=True) -> tf.keras.Model:
"""Builds semantic segmentation model with QAT."""
model = super().build_model(training)
if training:
input_size = self.task_config.train_data.output_size
crop_size = self.task_config.train_data.crop_size
if crop_size:
input_size = crop_size
else:
input_size = self.task_config.validation_data.output_size
input_specs = tf.keras.layers.InputSpec(shape=[None] + input_size + [3])
if self.task_config.quantization:
model = factory.build_qat_mosaic_model(
model, self.task_config.quantization, input_specs)
return model
| 1,797 | 39.863636 | 80 | py |
models | models-master/official/projects/mosaic/qat/tasks/mosaic_tasks_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mosaic task."""
# pylint: disable=unused-import
import os
from absl.testing import parameterized
import orbit
import tensorflow as tf
from official import vision
from official.core import exp_factory
from official.modeling import optimization
from official.projects.mosaic.configs import mosaic_config as exp_cfg
from official.projects.mosaic.qat.tasks import mosaic_tasks
from official.vision.dataloaders import tfexample_utils
class MosaicSemanticSegmentationTask(parameterized.TestCase, tf.test.TestCase):
def _create_test_tfrecord(self, tfrecord_file, example, num_samples):
examples = [example] * num_samples
tfexample_utils.dump_to_tfrecord(
record_file=tfrecord_file, tf_examples=examples)
@parameterized.parameters(
('mosaic_mnv35_cityscapes_qat', True),
('mosaic_mnv35_cityscapes_qat', False),
)
def test_semantic_segmentation_task(self, test_config, is_training):
"""Semantic segmentation task test for training and val using toy configs."""
input_image_size = [1024, 2048]
test_tfrecord_file = os.path.join(self.get_temp_dir(), 'seg_test.tfrecord')
example = tfexample_utils.create_segmentation_test_example(
image_height=input_image_size[0],
image_width=input_image_size[1],
image_channel=3)
self._create_test_tfrecord(
tfrecord_file=test_tfrecord_file, example=example, num_samples=2)
config = exp_factory.get_exp_config(test_config)
# modify config to suit local testing
config.task.model.input_size = [None, None, 3]
config.trainer.steps_per_loop = 1
config.task.train_data.global_batch_size = 1
config.task.validation_data.global_batch_size = 1
config.task.train_data.output_size = [1024, 2048]
config.task.validation_data.output_size = [1024, 2048]
config.task.train_data.crop_size = [512, 512]
config.task.train_data.shuffle_buffer_size = 2
config.task.validation_data.shuffle_buffer_size = 2
config.task.validation_data.input_path = test_tfrecord_file
config.task.train_data.input_path = test_tfrecord_file
config.train_steps = 1
config.task.model.num_classes = 256
config.task.model.head.num_classes = 256
config.task.model.head.decoder_projected_filters = [256, 256]
task = mosaic_tasks.MosaicSemanticSegmentationTask(config.task)
model = task.build_model(is_training)
metrics = task.build_metrics(training=is_training)
strategy = tf.distribute.get_strategy()
data_config = config.task.train_data if is_training else config.task.validation_data
dataset = orbit.utils.make_distributed_dataset(strategy, task.build_inputs,
data_config)
iterator = iter(dataset)
opt_factory = optimization.OptimizerFactory(config.trainer.optimizer_config)
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
if is_training:
task.train_step(next(iterator), model, optimizer, metrics=metrics)
else:
task.validation_step(next(iterator), model, metrics=metrics)
if __name__ == '__main__':
tf.test.main()
| 3,723 | 39.923077 | 88 | py |
models | models-master/official/projects/mosaic/configs/mosaic_config.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration definition for Semantic Segmentation with MOSAIC."""
import dataclasses
import os
from typing import List, Optional, Union
import numpy as np
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.vision.configs import backbones
from official.vision.configs import common
from official.vision.configs import semantic_segmentation as seg_cfg
@dataclasses.dataclass
class MosaicDecoderHead(hyperparams.Config):
"""MOSAIC decoder head config for Segmentation."""
num_classes: int = 19
decoder_input_levels: List[str] = dataclasses.field(default_factory=list)
decoder_stage_merge_styles: List[str] = dataclasses.field(
default_factory=list)
decoder_filters: List[int] = dataclasses.field(default_factory=list)
decoder_projected_filters: List[int] = dataclasses.field(default_factory=list)
encoder_end_level: int = 4
use_additional_classifier_layer: bool = False
classifier_kernel_size: int = 1
activation: str = 'relu'
kernel_initializer: str = 'glorot_uniform'
interpolation: str = 'bilinear'
@dataclasses.dataclass
class MosaicEncoderNeck(hyperparams.Config):
"""MOSAIC encoder neck config for segmentation."""
encoder_input_level: Union[str, int] = '4'
branch_filter_depths: List[int] = dataclasses.field(default_factory=list)
conv_kernel_sizes: List[int] = dataclasses.field(default_factory=list)
pyramid_pool_bin_nums: List[int] = dataclasses.field(default_factory=list)
activation: str = 'relu'
dropout_rate: float = 0.1
kernel_initializer: str = 'glorot_uniform'
interpolation: str = 'bilinear'
use_depthwise_convolution: bool = True
@dataclasses.dataclass
class MosaicSemanticSegmentationModel(hyperparams.Config):
"""MOSAIC semantic segmentation model config."""
num_classes: int = 19
input_size: List[int] = dataclasses.field(default_factory=list)
head: MosaicDecoderHead = dataclasses.field(default_factory=MosaicDecoderHead)
backbone: backbones.Backbone = dataclasses.field(
default_factory=lambda: backbones.Backbone( # pylint: disable=g-long-lambda
type='mobilenet', mobilenet=backbones.MobileNet()
)
)
neck: MosaicEncoderNeck = dataclasses.field(default_factory=MosaicEncoderNeck)
mask_scoring_head: Optional[seg_cfg.MaskScoringHead] = None
norm_activation: common.NormActivation = dataclasses.field(
default_factory=lambda: common.NormActivation( # pylint: disable=g-long-lambda
use_sync_bn=True, norm_momentum=0.99, norm_epsilon=0.001
)
)
@dataclasses.dataclass
class MosaicSemanticSegmentationTask(seg_cfg.SemanticSegmentationTask):
"""The config for MOSAIC segmentation task."""
model: MosaicSemanticSegmentationModel = dataclasses.field(
default_factory=MosaicSemanticSegmentationModel
)
train_data: seg_cfg.DataConfig = dataclasses.field(
default_factory=lambda: seg_cfg.DataConfig(is_training=True)
)
validation_data: seg_cfg.DataConfig = dataclasses.field(
default_factory=lambda: seg_cfg.DataConfig(is_training=False)
)
losses: seg_cfg.Losses = dataclasses.field(default_factory=seg_cfg.Losses)
evaluation: seg_cfg.Evaluation = dataclasses.field(
default_factory=seg_cfg.Evaluation
)
train_input_partition_dims: List[int] = dataclasses.field(
default_factory=list)
eval_input_partition_dims: List[int] = dataclasses.field(
default_factory=list)
init_checkpoint: Optional[str] = None
init_checkpoint_modules: Union[
str, List[str]] = 'all' # all, backbone, and/or neck.
export_config: seg_cfg.ExportConfig = dataclasses.field(
default_factory=seg_cfg.ExportConfig
)
# Cityscapes Dataset (Download and process the dataset yourself)
CITYSCAPES_TRAIN_EXAMPLES = 2975
CITYSCAPES_VAL_EXAMPLES = 500
CITYSCAPES_INPUT_PATH_BASE = 'cityscapes/tfrecord'
@exp_factory.register_config_factory('mosaic_mnv35_cityscapes')
def mosaic_mnv35_cityscapes() -> cfg.ExperimentConfig:
"""Instantiates an experiment configuration of image segmentation task.
This image segmentation experiment is conducted on Cityscapes dataset. The
model architecture is a MOSAIC encoder-decoer. The default backbone network is
a mobilenet variant called Mobilenet_v3.5-MultiAvg on top of which the MOSAIC
encoder-decoder can be deployed. All detailed configurations can be overridden
by a .yaml file provided by the user to launch the experiments. Please refer
to .yaml examples in the path of ../configs/experiments/.
Returns:
A particular instance of cfg.ExperimentConfig for MOSAIC model based
image semantic segmentation task.
"""
train_batch_size = 16
eval_batch_size = 16
steps_per_epoch = CITYSCAPES_TRAIN_EXAMPLES // train_batch_size
output_stride = 16
backbone_output_level = int(np.math.log2(output_stride))
config = cfg.ExperimentConfig(
task=MosaicSemanticSegmentationTask(
model=MosaicSemanticSegmentationModel(
# Cityscapes uses only 19 semantic classes for train/evaluation.
# The void (background) class is ignored in train and evaluation.
num_classes=19,
input_size=[None, None, 3],
backbone=backbones.Backbone(
type='mobilenet',
mobilenet=backbones.MobileNet(
model_id='MobileNetMultiAVGSeg',
output_intermediate_endpoints=True,
output_stride=output_stride)),
neck=MosaicEncoderNeck(
encoder_input_level=backbone_output_level,
branch_filter_depths=[64, 64],
conv_kernel_sizes=[3, 5],
pyramid_pool_bin_nums=[1, 4, 8, 16], # paper default
activation='relu',
dropout_rate=0.1,
kernel_initializer='glorot_uniform',
interpolation='bilinear',
use_depthwise_convolution=True),
head=MosaicDecoderHead(
num_classes=19,
decoder_input_levels=['3/depthwise', '2/depthwise'],
decoder_stage_merge_styles=['concat_merge', 'sum_merge'],
decoder_filters=[64, 64],
decoder_projected_filters=[19, 19],
encoder_end_level=backbone_output_level,
use_additional_classifier_layer=False,
classifier_kernel_size=1,
activation='relu',
kernel_initializer='glorot_uniform',
interpolation='bilinear'),
norm_activation=common.NormActivation(
activation='relu',
norm_momentum=0.99,
norm_epsilon=1e-3,
use_sync_bn=True)),
losses=seg_cfg.Losses(l2_weight_decay=4e-5),
train_data=seg_cfg.DataConfig(
input_path=os.path.join(CITYSCAPES_INPUT_PATH_BASE,
'train_fine**'),
crop_size=[1024, 2048],
output_size=[1024, 2048],
is_training=True,
global_batch_size=train_batch_size,
aug_scale_min=0.5,
aug_scale_max=2.0),
validation_data=seg_cfg.DataConfig(
input_path=os.path.join(CITYSCAPES_INPUT_PATH_BASE, 'val_fine*'),
output_size=[1024, 2048],
is_training=False,
global_batch_size=eval_batch_size,
resize_eval_groundtruth=True,
drop_remainder=False),
# Imagenet pre-trained Mobilenet_v3.5-MultiAvg checkpoint.
init_checkpoint='gs://tf_model_garden/vision/mobilenet/v3.5multiavg_seg_float/',
init_checkpoint_modules='backbone'),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=100000,
validation_steps=CITYSCAPES_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
best_checkpoint_eval_metric='mean_iou',
best_checkpoint_export_subdir='best_ckpt',
best_checkpoint_metric_comp='higher',
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 0.1,
'decay_steps': 100000,
'end_learning_rate': 0.0,
'power': 0.9
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
| 9,857 | 40.594937 | 90 | py |
models | models-master/official/projects/mosaic/modeling/mosaic_blocks_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mosaic_blocks."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.projects.mosaic.modeling import mosaic_blocks
class MosaicBlocksTest(parameterized.TestCase, tf.test.TestCase):
def test_multi_kernel_group_conv_block(self):
block = mosaic_blocks.MultiKernelGroupConvBlock([64, 64], [3, 5])
inputs = tf.ones([1, 4, 4, 448])
outputs = block(inputs)
self.assertAllEqual(outputs.shape, [1, 4, 4, 128])
def test_mosaic_encoder_block(self):
block = mosaic_blocks.MosaicEncoderBlock(
encoder_input_level=4,
branch_filter_depths=[64, 64],
conv_kernel_sizes=[3, 5],
pyramid_pool_bin_nums=[1, 4, 8, 16])
inputs = tf.ones([1, 32, 32, 448])
outputs = block(inputs)
self.assertAllEqual(outputs.shape, [1, 32, 32, 128])
def test_mosaic_encoder_block_odd_input_overlap_pool(self):
block = mosaic_blocks.MosaicEncoderBlock(
encoder_input_level=4,
branch_filter_depths=[64, 64],
conv_kernel_sizes=[3, 5],
pyramid_pool_bin_nums=[1, 4, 8, 16])
inputs = tf.ones([1, 31, 31, 448])
outputs = block(inputs)
self.assertAllEqual(outputs.shape, [1, 31, 31, 128])
def test_mosaic_encoder_non_separable_block(self):
block = mosaic_blocks.MosaicEncoderBlock(
encoder_input_level=4,
branch_filter_depths=[64, 64],
conv_kernel_sizes=[3, 5],
pyramid_pool_bin_nums=[1, 4, 8, 16],
use_depthwise_convolution=False)
inputs = tf.ones([1, 32, 32, 448])
outputs = block(inputs)
self.assertAllEqual(outputs.shape, [1, 32, 32, 128])
def test_mosaic_decoder_concat_merge_block(self):
concat_merge_block = mosaic_blocks.DecoderConcatMergeBlock(64, 32, [64, 64])
inputs = [tf.ones([1, 32, 32, 128]), tf.ones([1, 64, 64, 192])]
outputs = concat_merge_block(inputs)
self.assertAllEqual(outputs.shape, [1, 64, 64, 32])
def test_mosaic_decoder_concat_merge_block_default_output_size(self):
concat_merge_block = mosaic_blocks.DecoderConcatMergeBlock(64, 32)
inputs = [tf.ones([1, 32, 32, 128]), tf.ones([1, 64, 64, 192])]
outputs = concat_merge_block(inputs)
self.assertAllEqual(outputs.shape, [1, 64, 64, 32])
def test_mosaic_decoder_concat_merge_block_default_output_size_4x(self):
concat_merge_block = mosaic_blocks.DecoderConcatMergeBlock(64, 32)
inputs = [tf.ones([1, 32, 32, 128]), tf.ones([1, 128, 128, 192])]
outputs = concat_merge_block(inputs)
self.assertAllEqual(outputs.shape, [1, 128, 128, 32])
def test_mosaic_decoder_concat_merge_block_default_output_size_4x_rec(self):
concat_merge_block = mosaic_blocks.DecoderConcatMergeBlock(64, 32)
inputs = [tf.ones([1, 32, 64, 128]), tf.ones([1, 128, 256, 64])]
outputs = concat_merge_block(inputs)
self.assertAllEqual(outputs.shape, [1, 128, 256, 32])
def test_mosaic_decoder_sum_merge_block(self):
concat_merge_block = mosaic_blocks.DecoderSumMergeBlock(32, [128, 128])
inputs = [tf.ones([1, 64, 64, 32]), tf.ones([1, 128, 128, 64])]
outputs = concat_merge_block(inputs)
self.assertAllEqual(outputs.shape, [1, 128, 128, 32])
def test_mosaic_decoder_sum_merge_block_default_output_size(self):
concat_merge_block = mosaic_blocks.DecoderSumMergeBlock(32)
inputs = [tf.ones([1, 64, 64, 32]), tf.ones([1, 128, 128, 64])]
outputs = concat_merge_block(inputs)
self.assertAllEqual(outputs.shape, [1, 128, 128, 32])
if __name__ == '__main__':
tf.test.main()
| 4,128 | 39.881188 | 80 | py |
models | models-master/official/projects/mosaic/modeling/mosaic_head.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of segmentation head of the MOSAIC model."""
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.mosaic.modeling import mosaic_blocks
@tf.keras.utils.register_keras_serializable(package='Vision')
class MosaicDecoderHead(tf.keras.layers.Layer):
"""Creates a MOSAIC decoder in segmentation head.
Reference:
[MOSAIC: Mobile Segmentation via decoding Aggregated Information and encoded
Context](https://arxiv.org/pdf/2112.11623.pdf)
"""
def __init__(
self,
num_classes: int,
decoder_input_levels: Optional[List[str]] = None,
decoder_stage_merge_styles: Optional[List[str]] = None,
decoder_filters: Optional[List[int]] = None,
decoder_projected_filters: Optional[List[int]] = None,
encoder_end_level: Optional[int] = 4,
use_additional_classifier_layer: bool = False,
classifier_kernel_size: int = 1,
activation: str = 'relu',
use_sync_bn: bool = False,
batchnorm_momentum: float = 0.99,
batchnorm_epsilon: float = 0.001,
kernel_initializer: str = 'GlorotUniform',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
interpolation: str = 'bilinear',
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initializes a MOSAIC segmentation head.
Args:
num_classes: An `int` number of mask classification categories. The number
of classes does not include background class.
decoder_input_levels: A list of `str` specifying additional
input levels from the backbone outputs for mask refinement in decoder.
decoder_stage_merge_styles: A list of `str` specifying the merge style at
each stage of the decoder, merge styles can be 'concat_merge' or
'sum_merge'.
decoder_filters: A list of integers specifying the number of channels used
at each decoder stage. Note: this only has affects if the decoder merge
style is 'concat_merge'.
decoder_projected_filters: A list of integers specifying the number of
projected channels at the end of each decoder stage.
encoder_end_level: An optional integer specifying the output level of the
encoder stage, which is used if the input from the encoder to the
decoder head is a dictionary.
use_additional_classifier_layer: A `bool` specifying whether to use an
additional classifier layer or not. It must be True if the final decoder
projected filters does not match the `num_classes`.
classifier_kernel_size: An `int` number to specify the kernel size of the
classifier layer.
activation: A `str` that indicates which activation is used, e.g. 'relu',
'swish', etc.
use_sync_bn: A `bool` that indicates whether to use synchronized batch
normalization across different replicas.
batchnorm_momentum: A `float` of normalization momentum for the moving
average.
batchnorm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_initializer: Kernel initializer for conv layers. Defaults to
`glorot_uniform`.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
interpolation: The interpolation method for upsampling. Defaults to
`bilinear`.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
**kwargs: Additional keyword arguments to be passed.
"""
super(MosaicDecoderHead, self).__init__(**kwargs)
# Assuming 'decoder_input_levels' are sorted in descending order and the
# other setting are listed in the order according to 'decoder_input_levels'.
if decoder_input_levels is None:
decoder_input_levels = ['3', '2']
if decoder_stage_merge_styles is None:
decoder_stage_merge_styles = ['concat_merge', 'sum_merge']
if decoder_filters is None:
decoder_filters = [64, 64]
if decoder_projected_filters is None:
decoder_projected_filters = [32, 32]
self._decoder_input_levels = decoder_input_levels
self._decoder_stage_merge_styles = decoder_stage_merge_styles
self._decoder_filters = decoder_filters
self._decoder_projected_filters = decoder_projected_filters
if (len(decoder_input_levels) != len(decoder_stage_merge_styles) or
len(decoder_input_levels) != len(decoder_filters) or
len(decoder_input_levels) != len(decoder_projected_filters)):
raise ValueError('The number of Decoder inputs and settings must match.')
self._merge_stages = []
for (stage_merge_style, decoder_filter,
decoder_projected_filter) in zip(decoder_stage_merge_styles,
decoder_filters,
decoder_projected_filters):
if stage_merge_style == 'concat_merge':
concat_merge_stage = mosaic_blocks.DecoderConcatMergeBlock(
decoder_internal_depth=decoder_filter,
decoder_projected_depth=decoder_projected_filter,
output_size=(0, 0),
use_sync_bn=use_sync_bn,
batchnorm_momentum=batchnorm_momentum,
batchnorm_epsilon=batchnorm_epsilon,
activation=activation,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
interpolation=interpolation)
self._merge_stages.append(concat_merge_stage)
elif stage_merge_style == 'sum_merge':
sum_merge_stage = mosaic_blocks.DecoderSumMergeBlock(
decoder_projected_depth=decoder_projected_filter,
output_size=(0, 0),
use_sync_bn=use_sync_bn,
batchnorm_momentum=batchnorm_momentum,
batchnorm_epsilon=batchnorm_epsilon,
activation=activation,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
interpolation=interpolation)
self._merge_stages.append(sum_merge_stage)
else:
raise ValueError(
'A stage merge style in MOSAIC Decoder can only be concat_merge '
'or sum_merge.')
# Concat merge or sum merge does not require an additional classifer layer
# unless the final decoder projected filter does not match num_classes.
final_decoder_projected_filter = decoder_projected_filters[-1]
if (final_decoder_projected_filter != num_classes and
not use_additional_classifier_layer):
raise ValueError('Additional classifier layer is needed if final decoder '
'projected filters does not match num_classes!')
self._use_additional_classifier_layer = use_additional_classifier_layer
if use_additional_classifier_layer:
# This additional classification layer uses different kernel
# initializers and bias compared to earlier blocks.
self._pixelwise_classifier = tf.keras.layers.Conv2D(
name='pixelwise_classifier',
filters=num_classes,
kernel_size=classifier_kernel_size,
padding='same',
bias_initializer=tf.zeros_initializer(),
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_bias=True)
self._activation_fn = tf_utils.get_activation(activation)
self._config_dict = {
'num_classes': num_classes,
'decoder_input_levels': decoder_input_levels,
'decoder_stage_merge_styles': decoder_stage_merge_styles,
'decoder_filters': decoder_filters,
'decoder_projected_filters': decoder_projected_filters,
'encoder_end_level': encoder_end_level,
'use_additional_classifier_layer': use_additional_classifier_layer,
'classifier_kernel_size': classifier_kernel_size,
'activation': activation,
'use_sync_bn': use_sync_bn,
'batchnorm_momentum': batchnorm_momentum,
'batchnorm_epsilon': batchnorm_epsilon,
'kernel_initializer': kernel_initializer,
'kernel_regularizer': kernel_regularizer,
'interpolation': interpolation,
'bias_regularizer': bias_regularizer
}
def call(self,
inputs: Tuple[Union[tf.Tensor, Mapping[str, tf.Tensor]],
Union[tf.Tensor, Mapping[str, tf.Tensor]]],
training: Optional[bool] = None) -> tf.Tensor:
"""Forward pass of the segmentation head.
It supports a tuple of 2 elements. Each element is a tensor or a tensor
dictionary. The first one is the final (low-resolution) encoder endpoints,
and the second one is higher-resolution backbone endpoints.
When inputs are tensors, they are from a single level of feature maps.
When inputs are dictionaries, they contain multiple levels of feature maps,
where the key is the level/index of feature map.
Note: 'level' denotes the number of 2x downsampling, defined in backbone.
Args:
inputs: A tuple of 2 elements, each element can either be a tensor
representing feature maps or 1 dictionary of tensors:
- key: A `str` of the level of the multilevel features.
- values: A `tf.Tensor` of the feature map tensors.
The first is encoder endpoints, and the second is backbone endpoints.
training: a `Boolean` indicating whether it is in `training` mode.
Returns:
segmentation mask prediction logits: A `tf.Tensor` representing the
output logits before the final segmentation mask.
"""
encoder_outputs = inputs[0]
backbone_outputs = inputs[1]
y = encoder_outputs[str(
self._config_dict['encoder_end_level'])] if isinstance(
encoder_outputs, dict) else encoder_outputs
if isinstance(backbone_outputs, dict):
for level, merge_stage in zip(
self._decoder_input_levels, self._merge_stages):
x = backbone_outputs[str(level)]
y = merge_stage([y, x], training=training)
else:
x = backbone_outputs
y = self._merge_stages[0]([y, x], training=training)
if self._use_additional_classifier_layer:
y = self._pixelwise_classifier(y)
y = self._activation_fn(y)
return y
def get_config(self) -> Dict[str, Any]:
"""Returns a config dictionary for initialization from serialization."""
base_config = super().get_config()
base_config.update(self._config_dict)
return base_config
@classmethod
def from_config(cls, config: Dict[str, Any]):
return cls(**config)
| 11,284 | 45.440329 | 80 | py |
models | models-master/official/projects/mosaic/modeling/mosaic_head_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mosaic_head."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.projects.mosaic.modeling import mosaic_head
class MosaicBlocksTest(parameterized.TestCase, tf.test.TestCase):
def test_mosaic_head(self):
decoder_head = mosaic_head.MosaicDecoderHead(
num_classes=32,
decoder_input_levels=['3', '2'],
decoder_stage_merge_styles=['concat_merge', 'sum_merge'],
decoder_filters=[64, 64],
decoder_projected_filters=[32, 32])
inputs = [
tf.ones([1, 32, 32, 128]), {
'2': tf.ones([1, 128, 128, 64]),
'3': tf.ones([1, 64, 64, 192])
}
]
outputs = decoder_head(inputs)
self.assertAllEqual(outputs.shape, [1, 128, 128, 32])
def test_mosaic_head_3laterals(self):
decoder_head = mosaic_head.MosaicDecoderHead(
num_classes=32,
decoder_input_levels=[3, 2, 1],
decoder_stage_merge_styles=[
'concat_merge', 'concat_merge', 'sum_merge'
],
decoder_filters=[64, 64, 64],
decoder_projected_filters=[32, 32, 32])
inputs = [
tf.ones([1, 32, 32, 128]), {
'1': tf.ones([1, 256, 256, 64]),
'2': tf.ones([1, 128, 128, 64]),
'3': tf.ones([1, 64, 64, 192])
}
]
outputs = decoder_head(inputs)
self.assertAllEqual(outputs.shape, [1, 256, 256, 32])
if __name__ == '__main__':
tf.test.main()
| 2,072 | 31.390625 | 74 | py |
models | models-master/official/projects/mosaic/modeling/mosaic_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the overall MOSAIC segmentation network modeling."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.mosaic.modeling import mosaic_blocks
from official.projects.mosaic.modeling import mosaic_head
from official.projects.mosaic.modeling import mosaic_model
from official.vision.modeling import backbones
from official.vision.modeling.heads import segmentation_heads
class SegmentationNetworkTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(128, [4, 8], [3, 2], ['concat_merge', 'sum_merge']),
(128, [1, 4, 8], [3, 2], ['concat_merge', 'sum_merge']),
(128, [1, 4, 8], [3, 2], ['sum_merge', 'sum_merge']),
(128, [1, 4, 8], [3, 2], ['concat_merge', 'concat_merge']),
(512, [1, 4, 8, 16], [3, 2], ['concat_merge', 'sum_merge']),
(256, [4, 8], [3, 2], ['concat_merge', 'sum_merge']),
(256, [1, 4, 8], [3, 2], ['concat_merge', 'sum_merge']),
(256, [1, 4, 8, 16], [3, 2], ['concat_merge', 'sum_merge']),
)
def test_mosaic_segmentation_model(self,
input_size,
pyramid_pool_bin_nums,
decoder_input_levels,
decoder_stage_merge_styles):
"""Test for building and calling of a MOSAIC segmentation network."""
num_classes = 32
inputs = np.random.rand(2, input_size, input_size, 3)
tf.keras.backend.set_image_data_format('channels_last')
backbone = backbones.MobileNet(model_id='MobileNetMultiAVGSeg')
encoder_input_level = 4
neck = mosaic_blocks.MosaicEncoderBlock(
encoder_input_level=encoder_input_level,
branch_filter_depths=[64, 64],
conv_kernel_sizes=[3, 5],
pyramid_pool_bin_nums=pyramid_pool_bin_nums)
head = mosaic_head.MosaicDecoderHead(
num_classes=num_classes,
decoder_input_levels=decoder_input_levels,
decoder_stage_merge_styles=decoder_stage_merge_styles,
decoder_filters=[64, 64],
decoder_projected_filters=[32, 32])
mask_scoring_head = segmentation_heads.MaskScoring(
num_classes=num_classes,
fc_input_size=[4, 4],
num_convs=1,
num_filters=32,
fc_dims=32,
num_fcs=1)
model = mosaic_model.MosaicSegmentationModel(
backbone=backbone,
head=head,
neck=neck,
mask_scoring_head=mask_scoring_head,
)
# Calls the MOSAIC model.
outputs = model(inputs)
level = min(decoder_input_levels)
self.assertAllEqual(
[2, input_size // (2**level), input_size // (2**level), num_classes],
outputs['logits'].numpy().shape)
self.assertAllEqual(
[2, num_classes],
outputs['mask_scores'].numpy().shape)
def test_serialize_deserialize(self):
"""Validate the mosaic network can be serialized and deserialized."""
num_classes = 8
backbone = backbones.ResNet(model_id=50)
neck = mosaic_blocks.MosaicEncoderBlock(
encoder_input_level=4,
branch_filter_depths=[64, 64],
conv_kernel_sizes=[3, 5],
pyramid_pool_bin_nums=[1, 4, 8, 16])
head = mosaic_head.MosaicDecoderHead(
num_classes=num_classes,
decoder_input_levels=[3, 2],
decoder_stage_merge_styles=['concat_merge', 'sum_merge'],
decoder_filters=[64, 64],
decoder_projected_filters=[32, 8])
mask_scoring_head = segmentation_heads.MaskScoring(
num_classes=num_classes,
fc_input_size=[4, 4],
num_convs=1,
num_filters=32,
fc_dims=32,
num_fcs=1)
model = mosaic_model.MosaicSegmentationModel(
backbone=backbone,
head=head,
neck=neck,
mask_scoring_head=mask_scoring_head,
)
config = model.get_config()
new_model = mosaic_model.MosaicSegmentationModel.from_config(config)
# Validate that the config can be forced to JSON.
_ = new_model.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(model.get_config(), new_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 4,825 | 36.123077 | 79 | py |
models | models-master/official/projects/mosaic/modeling/mosaic_blocks.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definitions of building blocks for MOSAIC model.
Reference:
[MOSAIC: Mobile Segmentation via decoding Aggregated Information and encoded
Context](https://arxiv.org/pdf/2112.11623.pdf)
"""
from typing import Any, Dict, List, Optional, Tuple, Union
import tensorflow as tf
from official.modeling import tf_utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class MultiKernelGroupConvBlock(tf.keras.layers.Layer):
"""A multi-kernel grouped convolution block.
This block is used in the segmentation neck introduced in MOSAIC.
Reference:
[MOSAIC: Mobile Segmentation via decoding Aggregated Information and encoded
Context](https://arxiv.org/pdf/2112.11623.pdf)
"""
def __init__(
self,
output_filter_depths: Optional[List[int]] = None,
kernel_sizes: Optional[List[int]] = None,
use_sync_bn: bool = False,
batchnorm_momentum: float = 0.99,
batchnorm_epsilon: float = 0.001,
activation: str = 'relu',
kernel_initializer: str = 'GlorotUniform',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
use_depthwise_convolution: bool = True,
**kwargs):
"""Initializes a Multi-kernel Grouped Convolution Block.
Args:
output_filter_depths: A list of integers representing the numbers of
output channels or filter depths of convolution groups.
kernel_sizes: A list of integers denoting the convolution kernel sizes in
each convolution group.
use_sync_bn: A bool, whether or not to use sync batch normalization.
batchnorm_momentum: A float for the momentum in BatchNorm. Defaults to
0.99.
batchnorm_epsilon: A float for the epsilon value in BatchNorm. Defaults to
0.001.
activation: A `str` for the activation fuction type. Defaults to 'relu'.
kernel_initializer: Kernel initializer for conv layers. Defaults to
`glorot_uniform`.
kernel_regularizer: Kernel regularizer for conv layers. Defaults to None.
use_depthwise_convolution: Allows spatial pooling to be separable
depthwise convolusions.
**kwargs: Other keyword arguments for the layer.
"""
super(MultiKernelGroupConvBlock, self).__init__(**kwargs)
if output_filter_depths is None:
output_filter_depths = [64, 64]
if kernel_sizes is None:
kernel_sizes = [3, 5]
if len(output_filter_depths) != len(kernel_sizes):
raise ValueError('The number of output groups must match #kernels.')
self._output_filter_depths = output_filter_depths
self._kernel_sizes = kernel_sizes
self._num_groups = len(self._kernel_sizes)
self._use_sync_bn = use_sync_bn
self._batchnorm_momentum = batchnorm_momentum
self._batchnorm_epsilon = batchnorm_epsilon
self._activation = activation
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._use_depthwise_convolution = use_depthwise_convolution
# To apply BN before activation. Putting BN between conv and activation also
# helps quantization where conv+bn+activation are fused into a single op.
self._activation_fn = tf_utils.get_activation(activation)
if self._use_sync_bn:
self._bn_op = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._bn_op = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
self._group_split_axis = -1
else:
self._bn_axis = 1
self._group_split_axis = 1
def build(self, input_shape: tf.TensorShape) -> None:
"""Builds the block with the given input shape."""
input_channels = input_shape[self._group_split_axis]
if input_channels % self._num_groups != 0:
raise ValueError('The number of input channels must be divisible by '
'the number of groups for evenly group split.')
self._conv_branches = []
if self._use_depthwise_convolution:
for i, conv_kernel_size in enumerate(self._kernel_sizes):
depthwise_conv = tf.keras.layers.DepthwiseConv2D(
kernel_size=(conv_kernel_size, conv_kernel_size),
depth_multiplier=1,
padding='same',
depthwise_regularizer=self._kernel_regularizer,
depthwise_initializer=self._kernel_initializer,
use_bias=False)
# Add BN->RELU after depthwise convolution.
batchnorm_op_depthwise = self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon)
activation_depthwise = self._activation_fn
feature_conv = tf.keras.layers.Conv2D(
filters=self._output_filter_depths[i],
kernel_size=(1, 1),
padding='same',
kernel_regularizer=self._kernel_regularizer,
kernel_initializer=self._kernel_initializer,
activation=None,
use_bias=False)
batchnorm_op = self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon)
# Use list manually as current QAT API does not support sequential model
# within a tf.keras.Sequential block, e.g. conv_branch =
# tf.keras.Sequential([depthwise_conv, feature_conv, batchnorm_op,])
conv_branch = [
depthwise_conv,
batchnorm_op_depthwise,
activation_depthwise,
feature_conv,
batchnorm_op,
]
self._conv_branches.append(conv_branch)
else:
for i, conv_kernel_size in enumerate(self._kernel_sizes):
norm_conv = tf.keras.layers.Conv2D(
filters=self._output_filter_depths[i],
kernel_size=(conv_kernel_size, conv_kernel_size),
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
activation=None,
use_bias=False)
batchnorm_op = self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon)
conv_branch = [norm_conv, batchnorm_op]
self._conv_branches.append(conv_branch)
self._concat_groups = tf.keras.layers.Concatenate(
axis=self._group_split_axis)
def call(self,
inputs: tf.Tensor,
training: Optional[bool] = None) -> tf.Tensor:
"""Calls this group convolution block with the given inputs."""
inputs_splits = tf.split(inputs,
num_or_size_splits=self._num_groups,
axis=self._group_split_axis)
output_branches = []
for i, x in enumerate(inputs_splits):
conv_branch = self._conv_branches[i]
# Apply layers sequentially and manually.
for layer in conv_branch:
if isinstance(layer, tf.keras.layers.Layer):
x = layer(x, training=training)
else:
x = layer(x)
# Apply activation function after BN, which also helps quantization
# where conv+bn+activation are fused into a single op.
x = self._activation_fn(x)
output_branches.append(x)
x = self._concat_groups(output_branches)
return x
def get_config(self) -> Dict[str, Any]:
"""Returns a config dictionary for initialization from serialization."""
config = {
'output_filter_depths': self._output_filter_depths,
'kernel_sizes': self._kernel_sizes,
'num_groups': self._num_groups,
'use_sync_bn': self._use_sync_bn,
'batchnorm_momentum': self._batchnorm_momentum,
'batchnorm_epsilon': self._batchnorm_epsilon,
'activation': self._activation,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'use_depthwise_convolution': self._use_depthwise_convolution,
}
base_config = super(MultiKernelGroupConvBlock, self).get_config()
base_config.update(config)
return base_config
@tf.keras.utils.register_keras_serializable(package='Vision')
class MosaicEncoderBlock(tf.keras.layers.Layer):
"""Implements the encoder module/block of MOSAIC model.
Spatial Pyramid Pooling and Multi-kernel Conv layer
SpatialPyramidPoolingMultiKernelConv
References:
[MOSAIC: Mobile Segmentation via decoding Aggregated Information and encoded
context](https://arxiv.org/pdf/2112.11623.pdf)
"""
def __init__(
self,
encoder_input_level: Optional[Union[str, int]] = '4',
branch_filter_depths: Optional[List[int]] = None,
conv_kernel_sizes: Optional[List[int]] = None,
pyramid_pool_bin_nums: Optional[List[int]] = None,
use_sync_bn: bool = False,
batchnorm_momentum: float = 0.99,
batchnorm_epsilon: float = 0.001,
activation: str = 'relu',
dropout_rate: float = 0.1,
kernel_initializer: str = 'glorot_uniform',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
interpolation: str = 'bilinear',
use_depthwise_convolution: bool = True,
**kwargs):
"""Initializes a MOSAIC encoder block which is deployed after a backbone.
Args:
encoder_input_level: An optional `str` or integer specifying the level of
backbone outputs as the input to the encoder.
branch_filter_depths: A list of integers for the number of convolution
channels in each branch at a pyramid level after SpatialPyramidPooling.
conv_kernel_sizes: A list of integers representing the convolution kernel
sizes in the Multi-kernel Convolution blocks in the encoder.
pyramid_pool_bin_nums: A list of integers for the number of bins at each
level of the Spatial Pyramid Pooling.
use_sync_bn: A bool, whether or not to use sync batch normalization.
batchnorm_momentum: A float for the momentum in BatchNorm. Defaults to
0.99.
batchnorm_epsilon: A float for the epsilon value in BatchNorm. Defaults to
0.001.
activation: A `str` for the activation function type. Defaults to 'relu'.
dropout_rate: A float between 0 and 1. Fraction of the input units to drop
out, which will be used directly as the `rate` of the Dropout layer at
the end of the encoder. Defaults to 0.1.
kernel_initializer: Kernel initializer for conv layers. Defaults to
`glorot_uniform`.
kernel_regularizer: Kernel regularizer for conv layers. Defaults to None.
interpolation: The interpolation method for upsampling. Defaults to
`bilinear`.
use_depthwise_convolution: Use depthwise separable convolusions in the
Multi-kernel Convolution blocks in the encoder.
**kwargs: Other keyword arguments for the layer.
"""
super().__init__(**kwargs)
self._encoder_input_level = str(encoder_input_level)
if branch_filter_depths is None:
branch_filter_depths = [64, 64]
self._branch_filter_depths = branch_filter_depths
if conv_kernel_sizes is None:
conv_kernel_sizes = [3, 5]
self._conv_kernel_sizes = conv_kernel_sizes
if pyramid_pool_bin_nums is None:
pyramid_pool_bin_nums = [1, 4, 8, 16]
self._pyramid_pool_bin_nums = pyramid_pool_bin_nums
self._use_sync_bn = use_sync_bn
self._batchnorm_momentum = batchnorm_momentum
self._batchnorm_epsilon = batchnorm_epsilon
self._activation = activation
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._interpolation = interpolation
self._use_depthwise_convolution = use_depthwise_convolution
self._activation_fn = tf_utils.get_activation(activation)
if self._use_sync_bn:
self._bn_op = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._bn_op = tf.keras.layers.BatchNormalization
self._dropout_rate = dropout_rate
if dropout_rate:
self._encoder_end_dropout_layer = tf.keras.layers.Dropout(
rate=dropout_rate)
else:
self._encoder_end_dropout_layer = None
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
self._channel_axis = -1
else:
self._bn_axis = 1
self._channel_axis = 1
def _get_bin_pool_kernel_and_stride(
self,
input_size: int,
num_of_bin: int) -> Tuple[int, int]:
"""Calculates the kernel size and stride for spatial bin pooling.
Args:
input_size: Input dimension (a scalar).
num_of_bin: The number of bins used for spatial bin pooling.
Returns:
The Kernel and Stride for spatial bin pooling (a scalar).
"""
bin_overlap = int(input_size % num_of_bin)
pooling_stride = int(input_size // num_of_bin)
pooling_kernel = pooling_stride + bin_overlap
return pooling_kernel, pooling_stride
def build(
self, input_shape: Union[tf.TensorShape, Dict[str,
tf.TensorShape]]) -> None:
"""Builds this MOSAIC encoder block with the given single input shape."""
input_shape = (
input_shape[self._encoder_input_level]
if isinstance(input_shape, dict) else input_shape)
self._data_format = tf.keras.backend.image_data_format()
if self._data_format == 'channels_last':
height = input_shape[1]
width = input_shape[2]
else:
height = input_shape[2]
width = input_shape[3]
self._global_pool_branch = None
self._spatial_pyramid = []
for pyramid_pool_bin_num in self._pyramid_pool_bin_nums:
if pyramid_pool_bin_num == 1:
global_pool = tf.keras.layers.GlobalAveragePooling2D(
data_format=self._data_format, keepdims=True)
global_projection = tf.keras.layers.Conv2D(
filters=max(self._branch_filter_depths),
kernel_size=(1, 1),
padding='same',
activation=None,
kernel_regularizer=self._kernel_regularizer,
kernel_initializer=self._kernel_initializer,
use_bias=False)
batch_norm_global_branch = self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon)
# Use list manually instead of tf.keras.Sequential([])
self._global_pool_branch = [
global_pool,
global_projection,
batch_norm_global_branch,
]
else:
if height < pyramid_pool_bin_num or width < pyramid_pool_bin_num:
raise ValueError('The number of pooling bins must be smaller than '
'input sizes.')
assert pyramid_pool_bin_num >= 2, (
'Except for the gloabl pooling, the number of bins in pyramid '
'pooling must be at least two.')
pool_height, stride_height = self._get_bin_pool_kernel_and_stride(
height, pyramid_pool_bin_num)
pool_width, stride_width = self._get_bin_pool_kernel_and_stride(
width, pyramid_pool_bin_num)
bin_pool_level = tf.keras.layers.AveragePooling2D(
pool_size=(pool_height, pool_width),
strides=(stride_height, stride_width),
padding='valid',
data_format=self._data_format)
self._spatial_pyramid.append(bin_pool_level)
# Grouped multi-kernel Convolution.
self._multi_kernel_group_conv = MultiKernelGroupConvBlock(
output_filter_depths=self._branch_filter_depths,
kernel_sizes=self._conv_kernel_sizes,
use_sync_bn=self._use_sync_bn,
batchnorm_momentum=self._batchnorm_momentum,
batchnorm_epsilon=self._batchnorm_epsilon,
activation=self._activation,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_depthwise_convolution=self._use_depthwise_convolution)
# Encoder's final 1x1 feature projection.
# Considering the relatively large #channels merged before projection,
# enlarge the projection #channels to the sum of the filter depths of
# branches.
self._output_channels = sum(self._branch_filter_depths)
# Use list manually instead of tf.keras.Sequential([]).
self._encoder_projection = [
tf.keras.layers.Conv2D(
filters=self._output_channels,
kernel_size=(1, 1),
padding='same',
activation=None,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_bias=False),
self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon),
]
# Use the TF2 default feature alignment rule for bilinear resizing.
self._upsample = tf.keras.layers.Resizing(
height,
width,
interpolation=self._interpolation,
crop_to_aspect_ratio=False)
self._concat_layer = tf.keras.layers.Concatenate(axis=self._channel_axis)
def call(self,
inputs: Union[tf.Tensor, Dict[str, tf.Tensor]],
training: Optional[bool] = None) -> tf.Tensor:
"""Calls this MOSAIC encoder block with the given input."""
if training is None:
training = tf.keras.backend.learning_phase()
input_from_backbone_output = (
inputs[self._encoder_input_level]
if isinstance(inputs, dict) else inputs)
branches = []
# Original features from the final output of the backbone.
branches.append(input_from_backbone_output)
if self._spatial_pyramid:
for bin_pool_level in self._spatial_pyramid:
x = input_from_backbone_output
x = bin_pool_level(x)
x = self._multi_kernel_group_conv(x, training=training)
x = self._upsample(x)
branches.append(x)
if self._global_pool_branch is not None:
x = input_from_backbone_output
for layer in self._global_pool_branch:
x = layer(x, training=training)
x = self._activation_fn(x)
x = self._upsample(x)
branches.append(x)
x = self._concat_layer(branches)
for layer in self._encoder_projection:
x = layer(x, training=training)
x = self._activation_fn(x)
if self._encoder_end_dropout_layer is not None:
x = self._encoder_end_dropout_layer(x, training=training)
return x
def get_config(self) -> Dict[str, Any]:
"""Returns a config dictionary for initialization from serialization."""
config = {
'encoder_input_level': self._encoder_input_level,
'branch_filter_depths': self._branch_filter_depths,
'conv_kernel_sizes': self._conv_kernel_sizes,
'pyramid_pool_bin_nums': self._pyramid_pool_bin_nums,
'use_sync_bn': self._use_sync_bn,
'batchnorm_momentum': self._batchnorm_momentum,
'batchnorm_epsilon': self._batchnorm_epsilon,
'activation': self._activation,
'dropout_rate': self._dropout_rate,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'interpolation': self._interpolation,
'use_depthwise_convolution': self._use_depthwise_convolution,
}
base_config = super().get_config()
base_config.update(config)
return base_config
@tf.keras.utils.register_keras_serializable(package='Vision')
class DecoderSumMergeBlock(tf.keras.layers.Layer):
"""Implements the decoder feature sum merge block of MOSAIC model.
This block is used in the decoder of segmentation head introduced in MOSAIC.
It essentially merges a high-resolution feature map of a low semantic level
and a low-resolution feature map of a higher semantic level by 'Sum-Merge'.
"""
def __init__(
self,
decoder_projected_depth: int,
output_size: Tuple[int, int] = (0, 0),
use_sync_bn: bool = False,
batchnorm_momentum: float = 0.99,
batchnorm_epsilon: float = 0.001,
activation: str = 'relu',
kernel_initializer: str = 'GlorotUniform',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
interpolation: str = 'bilinear',
**kwargs):
"""Initialize a sum-merge block for one decoder stage.
Args:
decoder_projected_depth: An integer representing the number of output
channels of this sum-merge block in the decoder.
output_size: A Tuple of integers representing the output height and width
of the feature maps from this sum-merge block. Defaults to (0, 0),
where the output size is set the same as the high-resolution branch.
use_sync_bn: A bool, whether or not to use sync batch normalization.
batchnorm_momentum: A float for the momentum in BatchNorm. Defaults to
0.99.
batchnorm_epsilon: A float for the epsilon value in BatchNorm. Defaults to
0.001.
activation: A `str` for the activation function type. Defaults to 'relu'.
kernel_initializer: Kernel initializer for conv layers. Defaults to
`glorot_uniform`.
kernel_regularizer: Kernel regularizer for conv layers. Defaults to None.
interpolation: The interpolation method for upsampling. Defaults to
`bilinear`.
**kwargs: Other keyword arguments for the layer.
"""
super(DecoderSumMergeBlock, self).__init__(**kwargs)
self._decoder_projected_depth = decoder_projected_depth
self._output_size = output_size
self._low_res_branch = []
self._upsample_low_res = None
self._high_res_branch = []
self._upsample_high_res = None
self._use_sync_bn = use_sync_bn
self._batchnorm_momentum = batchnorm_momentum
self._batchnorm_epsilon = batchnorm_epsilon
self._activation = activation
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._interpolation = interpolation
# Apply BN before activation. Putting BN between conv and activation also
# helps quantization where conv+bn+activation are fused into a single op.
self._activation_fn = tf_utils.get_activation(activation)
if self._use_sync_bn:
self._bn_op = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._bn_op = tf.keras.layers.BatchNormalization
self._bn_axis = (
-1
if tf.keras.backend.image_data_format() == 'channels_last' else 1)
self._channel_axis = (
-1
if tf.keras.backend.image_data_format() == 'channels_last' else 1)
self._add_layer = tf.keras.layers.Add()
def build(
self,
input_shape: Tuple[tf.TensorShape, tf.TensorShape]) -> None:
"""Builds the block with the given input shape."""
# Assume backbone features of the same level are concated before input.
low_res_input_shape = input_shape[0]
high_res_input_shape = input_shape[1]
low_res_channels = low_res_input_shape[self._channel_axis]
high_res_channels = high_res_input_shape[self._channel_axis]
if low_res_channels != self._decoder_projected_depth:
low_res_feature_conv = tf.keras.layers.Conv2D(
filters=self._decoder_projected_depth,
kernel_size=(1, 1),
padding='same',
kernel_regularizer=self._kernel_regularizer,
kernel_initializer=self._kernel_initializer,
activation=None,
use_bias=False)
batchnorm_op = self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon)
self._low_res_branch.extend([
low_res_feature_conv,
batchnorm_op,
])
if high_res_channels != self._decoder_projected_depth:
high_res_feature_conv = tf.keras.layers.Conv2D(
filters=self._decoder_projected_depth,
kernel_size=(1, 1),
padding='same',
kernel_regularizer=self._kernel_regularizer,
kernel_initializer=self._kernel_initializer,
activation=None,
use_bias=False)
batchnorm_op_high = self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon)
self._high_res_branch.extend([
high_res_feature_conv,
batchnorm_op_high,
])
# Resize feature maps.
if tf.keras.backend.image_data_format() == 'channels_last':
low_res_height = low_res_input_shape[1]
low_res_width = low_res_input_shape[2]
high_res_height = high_res_input_shape[1]
high_res_width = high_res_input_shape[2]
else:
low_res_height = low_res_input_shape[2]
low_res_width = low_res_input_shape[3]
high_res_height = high_res_input_shape[2]
high_res_width = high_res_input_shape[3]
if (self._output_size[0] == 0 or self._output_size[1] == 0):
self._output_size = (high_res_height, high_res_width)
if (low_res_height != self._output_size[0] or
low_res_width != self._output_size[1]):
self._upsample_low_res = tf.keras.layers.Resizing(
self._output_size[0],
self._output_size[1],
interpolation=self._interpolation,
crop_to_aspect_ratio=False)
if (high_res_height != self._output_size[0] or
high_res_width != self._output_size[1]):
self._upsample_high_res = tf.keras.layers.Resizing(
self._output_size[0],
self._output_size[1],
interpolation=self._interpolation,
crop_to_aspect_ratio=False)
def call(self,
inputs: Tuple[tf.Tensor, tf.Tensor],
training: Optional[bool] = None) -> tf.Tensor:
"""Calls this decoder sum-merge block with the given input.
Args:
inputs: A Tuple of tensors consisting of a low-resolution higher-semantic
level feature map from the encoder as the first item and a higher
resolution lower-level feature map from the backbone as the second item.
training: a `bool` indicating whether it is in `training` mode.
Note: the first item of the input Tuple takes a lower-resolution feature map
and the second item of the input Tuple takes a higher-resolution branch.
Returns:
A tensor representing the sum-merged decoder feature map.
"""
if training is None:
training = tf.keras.backend.learning_phase()
x_low_res = inputs[0]
x_high_res = inputs[1]
if self._low_res_branch:
for layer in self._low_res_branch:
x_low_res = layer(x_low_res, training=training)
x_low_res = self._activation_fn(x_low_res)
if self._high_res_branch:
for layer in self._high_res_branch:
x_high_res = layer(x_high_res, training=training)
x_high_res = self._activation_fn(x_high_res)
if self._upsample_low_res is not None:
x_low_res = self._upsample_low_res(x_low_res)
if self._upsample_high_res is not None:
x_high_res = self._upsample_high_res(x_high_res)
output = self._add_layer([x_low_res, x_high_res])
return output
def get_config(self) -> Dict[str, Any]:
"""Returns a config dictionary for initialization from serialization."""
config = {
'decoder_projected_depth': self._decoder_projected_depth,
'output_size': self._output_size,
'use_sync_bn': self._use_sync_bn,
'batchnorm_momentum': self._batchnorm_momentum,
'batchnorm_epsilon': self._batchnorm_epsilon,
'activation': self._activation,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'interpolation': self._interpolation,
}
base_config = super(DecoderSumMergeBlock, self).get_config()
base_config.update(config)
return base_config
@tf.keras.utils.register_keras_serializable(package='Vision')
class DecoderConcatMergeBlock(tf.keras.layers.Layer):
"""Implements the decoder feature concat merge block of MOSAIC model.
This block is used in the decoder of segmentation head introduced in MOSAIC.
It essentially merges a high-resolution feature map of a low semantic level
and a low-resolution feature of a higher semantic level by 'Concat-Merge'.
"""
def __init__(
self,
decoder_internal_depth: int,
decoder_projected_depth: int,
output_size: Tuple[int, int] = (0, 0),
use_sync_bn: bool = False,
batchnorm_momentum: float = 0.99,
batchnorm_epsilon: float = 0.001,
activation: str = 'relu',
kernel_initializer: str = 'GlorotUniform',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
interpolation: str = 'bilinear',
**kwargs):
"""Initializes a concat-merge block for one decoder stage.
Args:
decoder_internal_depth: An integer representing the number of internal
channels of this concat-merge block in the decoder.
decoder_projected_depth: An integer representing the number of output
channels of this concat-merge block in the decoder.
output_size: A Tuple of integers representing the output height and width
of the feature maps from this concat-merge block. Defaults to (0, 0),
where the output size is set the same as the high-resolution branch.
use_sync_bn: A bool, whether or not to use sync batch normalization.
batchnorm_momentum: A float for the momentum in BatchNorm. Defaults to
0.99.
batchnorm_epsilon: A float for the epsilon value in BatchNorm. Defaults to
0.001.
activation: A `str` for the activation function type. Defaults to 'relu'.
kernel_initializer: Kernel initializer for conv layers. Defaults to
`glorot_uniform`.
kernel_regularizer: Kernel regularizer for conv layers. Defaults to None.
interpolation: The interpolation method for upsampling. Defaults to
`bilinear`.
**kwargs: Other keyword arguments for the layer.
"""
super(DecoderConcatMergeBlock, self).__init__(**kwargs)
self._decoder_internal_depth = decoder_internal_depth
self._decoder_projected_depth = decoder_projected_depth
self._output_size = output_size
self._upsample_low_res = None
self._upsample_high_res = None
self._use_sync_bn = use_sync_bn
self._batchnorm_momentum = batchnorm_momentum
self._batchnorm_epsilon = batchnorm_epsilon
self._activation = activation
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._interpolation = interpolation
# Apply BN before activation. Putting BN between conv and activation also
# helps quantization where conv+bn+activation are fused into a single op.
self._activation_fn = tf_utils.get_activation(activation)
if self._use_sync_bn:
self._bn_op = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._bn_op = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
self._channel_axis = -1
else:
self._bn_axis = 1
self._channel_axis = 1
def build(
self,
input_shape: Tuple[tf.TensorShape, tf.TensorShape]) -> None:
"""Builds this block with the given input shape."""
# Assume backbone features of the same level are concated before input.
low_res_input_shape = input_shape[0]
high_res_input_shape = input_shape[1]
# Set up resizing feature maps before concat.
if tf.keras.backend.image_data_format() == 'channels_last':
low_res_height = low_res_input_shape[1]
low_res_width = low_res_input_shape[2]
high_res_height = high_res_input_shape[1]
high_res_width = high_res_input_shape[2]
else:
low_res_height = low_res_input_shape[2]
low_res_width = low_res_input_shape[3]
high_res_height = high_res_input_shape[2]
high_res_width = high_res_input_shape[3]
if (self._output_size[0] == 0 or self._output_size[1] == 0):
self._output_size = (high_res_height, high_res_width)
if (low_res_height != self._output_size[0] or
low_res_width != self._output_size[1]):
self._upsample_low_res = tf.keras.layers.Resizing(
self._output_size[0],
self._output_size[1],
interpolation=self._interpolation,
crop_to_aspect_ratio=False)
if (high_res_height != self._output_size[0] or
high_res_width != self._output_size[1]):
self._upsample_high_res = tf.keras.layers.Resizing(
self._output_size[0],
self._output_size[1],
interpolation=self._interpolation,
crop_to_aspect_ratio=False)
# Set up a 3-layer separable convolution blocks, i.e.
# 1x1->BN->RELU + Depthwise->BN->RELU + 1x1->BN->RELU.
initial_feature_conv = tf.keras.layers.Conv2D(
filters=self._decoder_internal_depth,
kernel_size=(1, 1),
padding='same',
kernel_regularizer=self._kernel_regularizer,
kernel_initializer=self._kernel_initializer,
activation=None,
use_bias=False)
batchnorm_op1 = self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon)
activation1 = self._activation_fn
depthwise_conv = tf.keras.layers.DepthwiseConv2D(
kernel_size=(3, 3),
depth_multiplier=1,
padding='same',
depthwise_regularizer=self._kernel_regularizer,
depthwise_initializer=self._kernel_initializer,
use_bias=False)
batchnorm_op2 = self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon)
activation2 = self._activation_fn
project_feature_conv = tf.keras.layers.Conv2D(
filters=self._decoder_projected_depth,
kernel_size=(1, 1),
padding='same',
kernel_regularizer=self._kernel_regularizer,
kernel_initializer=self._kernel_initializer,
activation=None,
use_bias=False)
batchnorm_op3 = self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon)
activation3 = self._activation_fn
self._feature_fusion_block = [
initial_feature_conv,
batchnorm_op1,
activation1,
depthwise_conv,
batchnorm_op2,
activation2,
project_feature_conv,
batchnorm_op3,
activation3,
]
self._concat_layer = tf.keras.layers.Concatenate(axis=self._channel_axis)
def call(self,
inputs: Tuple[tf.Tensor, tf.Tensor],
training: Optional[bool] = None) -> tf.Tensor:
"""Calls this concat-merge block with the given inputs.
Args:
inputs: A Tuple of tensors consisting of a lower-level higher-resolution
feature map from the backbone as the first item and a higher-level
lower-resolution feature map from the encoder as the second item.
training: a `Boolean` indicating whether it is in `training` mode.
Returns:
A tensor representing the concat-merged decoder feature map.
"""
low_res_input = inputs[0]
high_res_input = inputs[1]
if self._upsample_low_res is not None:
low_res_input = self._upsample_low_res(low_res_input)
if self._upsample_high_res is not None:
high_res_input = self._upsample_high_res(high_res_input)
decoder_feature_list = [low_res_input, high_res_input]
x = self._concat_layer(decoder_feature_list)
for layer in self._feature_fusion_block:
if isinstance(layer, tf.keras.layers.Layer):
x = layer(x, training=training)
else:
x = layer(x)
return x
def get_config(self) -> Dict[str, Any]:
"""Returns a config dictionary for initialization from serialization."""
config = {
'decoder_internal_depth': self._decoder_internal_depth,
'decoder_projected_depth': self._decoder_projected_depth,
'output_size': self._output_size,
'use_sync_bn': self._use_sync_bn,
'batchnorm_momentum': self._batchnorm_momentum,
'batchnorm_epsilon': self._batchnorm_epsilon,
'activation': self._activation,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'interpolation': self._interpolation,
}
base_config = super(DecoderConcatMergeBlock, self).get_config()
base_config.update(config)
return base_config
| 36,947 | 40.702032 | 80 | py |
models | models-master/official/projects/mosaic/modeling/mosaic_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builds the overall MOSAIC segmentation models."""
from typing import Any, Dict, Optional, Union
import tensorflow as tf
from official.projects.mosaic.configs import mosaic_config
from official.projects.mosaic.modeling import mosaic_blocks
from official.projects.mosaic.modeling import mosaic_head
from official.vision.modeling import backbones
from official.vision.modeling.heads import segmentation_heads
@tf.keras.utils.register_keras_serializable(package='Vision')
class MosaicSegmentationModel(tf.keras.Model):
"""A model class for segmentation using MOSAIC.
Input images are passed through a backbone first. A MOSAIC neck encoder
network is then applied, and finally a MOSAIC segmentation head is applied on
the outputs of the backbone and neck encoder network. Feature fusion and
decoding is done in the segmentation head.
Reference:
[MOSAIC: Mobile Segmentation via decoding Aggregated Information and encoded
Context](https://arxiv.org/pdf/2112.11623.pdf)
"""
def __init__(self,
backbone: tf.keras.Model,
head: tf.keras.layers.Layer,
neck: Optional[tf.keras.layers.Layer] = None,
mask_scoring_head: Optional[tf.keras.layers.Layer] = None,
**kwargs):
"""Segmentation initialization function.
Args:
backbone: A backbone network.
head: A segmentation head, e.g. MOSAIC decoder.
neck: An optional neck encoder network, e.g. MOSAIC encoder. If it is not
provided, the decoder head will be connected directly with the backbone.
mask_scoring_head: An optional mask scoring head.
**kwargs: keyword arguments to be passed.
"""
super(MosaicSegmentationModel, self).__init__(**kwargs)
self._config_dict = {
'backbone': backbone,
'neck': neck,
'head': head,
'mask_scoring_head': mask_scoring_head,
}
self.backbone = backbone
self.neck = neck
self.head = head
self.mask_scoring_head = mask_scoring_head
def call(self, # pytype: disable=signature-mismatch # overriding-parameter-count-checks
inputs: tf.Tensor,
training: bool = None) -> Dict[str, tf.Tensor]:
backbone_features = self.backbone(inputs)
if self.neck is not None:
neck_features = self.neck(backbone_features, training=training)
else:
neck_features = backbone_features
logits = self.head([neck_features, backbone_features], training=training)
outputs = {'logits': logits}
if self.mask_scoring_head:
mask_scores = self.mask_scoring_head(logits)
outputs.update({'mask_scores': mask_scores})
return outputs
@property
def checkpoint_items(
self) -> Dict[str, Union[tf.keras.Model, tf.keras.layers.Layer]]:
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(backbone=self.backbone, head=self.head)
if self.neck is not None:
items.update(neck=self.neck)
if self.mask_scoring_head is not None:
items.update(mask_scoring_head=self.mask_scoring_head)
return items
def get_config(self) -> Dict[str, Any]:
"""Returns a config dictionary for initialization from serialization."""
base_config = super().get_config()
model_config = base_config
model_config.update(self._config_dict)
return model_config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
def build_mosaic_segmentation_model(
input_specs: tf.keras.layers.InputSpec,
model_config: mosaic_config.MosaicSemanticSegmentationModel,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
backbone: Optional[tf.keras.Model] = None,
neck: Optional[tf.keras.layers.Layer] = None
) -> tf.keras.Model:
"""Builds MOSAIC Segmentation model."""
norm_activation_config = model_config.norm_activation
if backbone is None:
backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=model_config.backbone,
norm_activation_config=norm_activation_config,
l2_regularizer=l2_regularizer)
if neck is None:
neck_config = model_config.neck
neck = mosaic_blocks.MosaicEncoderBlock(
encoder_input_level=neck_config.encoder_input_level,
branch_filter_depths=neck_config.branch_filter_depths,
conv_kernel_sizes=neck_config.conv_kernel_sizes,
pyramid_pool_bin_nums=neck_config.pyramid_pool_bin_nums,
use_sync_bn=norm_activation_config.use_sync_bn,
batchnorm_momentum=norm_activation_config.norm_momentum,
batchnorm_epsilon=norm_activation_config.norm_epsilon,
activation=neck_config.activation,
dropout_rate=neck_config.dropout_rate,
kernel_initializer=neck_config.kernel_initializer,
kernel_regularizer=l2_regularizer,
interpolation=neck_config.interpolation,
use_depthwise_convolution=neck_config.use_depthwise_convolution)
head_config = model_config.head
head = mosaic_head.MosaicDecoderHead(
num_classes=model_config.num_classes,
decoder_input_levels=head_config.decoder_input_levels,
decoder_stage_merge_styles=head_config.decoder_stage_merge_styles,
decoder_filters=head_config.decoder_filters,
decoder_projected_filters=head_config.decoder_projected_filters,
encoder_end_level=head_config.encoder_end_level,
use_additional_classifier_layer=head_config
.use_additional_classifier_layer,
classifier_kernel_size=head_config.classifier_kernel_size,
activation=head_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
batchnorm_momentum=norm_activation_config.norm_momentum,
batchnorm_epsilon=norm_activation_config.norm_epsilon,
kernel_initializer=head_config.kernel_initializer,
kernel_regularizer=l2_regularizer,
interpolation=head_config.interpolation)
mask_scoring_head = None
if model_config.mask_scoring_head:
mask_scoring_head = segmentation_heads.MaskScoring(
num_classes=model_config.num_classes,
**model_config.mask_scoring_head.as_dict(),
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
model = MosaicSegmentationModel(
backbone=backbone,
neck=neck,
head=head,
mask_scoring_head=mask_scoring_head)
return model
| 7,164 | 38.805556 | 91 | py |
models | models-master/official/projects/centernet/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/centernet/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Model Garden Vision Centernet trainer."""
from absl import app
from absl import flags
import gin
from official.common import distribute_utils
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
from official.projects.centernet.common import registry_imports # pylint: disable=unused-import
FLAGS = flags.FLAGS
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu)
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir)
train_utils.save_gin_config(FLAGS.mode, model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(main)
| 2,522 | 36.102941 | 96 | py |
models | models-master/official/projects/centernet/common/registry_imports.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All necessary imports for registration."""
# pylint: disable=unused-import
from official.projects.centernet.configs import centernet
from official.projects.centernet.modeling import centernet_model
from official.projects.centernet.modeling.backbones import hourglass
from official.projects.centernet.tasks import centernet as centernet_task
from official.vision import registry_imports
| 999 | 42.478261 | 74 | py |
models | models-master/official/projects/centernet/common/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/centernet/configs/centernet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CenterNet configuration definition."""
import dataclasses
import os
from typing import List, Optional, Tuple
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.projects.centernet.configs import backbones
from official.vision.configs import common
TfExampleDecoderLabelMap = common.TfExampleDecoderLabelMap
@dataclasses.dataclass
class TfExampleDecoder(hyperparams.Config):
regenerate_source_id: bool = False
@dataclasses.dataclass
class DataDecoder(hyperparams.OneOfConfig):
type: Optional[str] = 'simple_decoder'
simple_decoder: TfExampleDecoder = dataclasses.field(
default_factory=TfExampleDecoder
)
label_map_decoder: TfExampleDecoderLabelMap = dataclasses.field(
default_factory=TfExampleDecoderLabelMap
)
@dataclasses.dataclass
class Parser(hyperparams.Config):
"""Config for parser."""
bgr_ordering: bool = True
aug_rand_hflip: bool = True
aug_scale_min: float = 1.0
aug_scale_max: float = 1.0
aug_rand_saturation: bool = False
aug_rand_brightness: bool = False
aug_rand_hue: bool = False
aug_rand_contrast: bool = False
odapi_augmentation: bool = False
channel_means: Tuple[float, float, float] = dataclasses.field(
default_factory=lambda: (104.01362025, 114.03422265, 119.9165958))
channel_stds: Tuple[float, float, float] = dataclasses.field(
default_factory=lambda: (73.6027665, 69.89082075, 70.9150767))
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""Input config for training."""
input_path: str = ''
global_batch_size: int = 32
is_training: bool = True
dtype: str = 'float16'
decoder: DataDecoder = dataclasses.field(default_factory=DataDecoder)
parser: Parser = Parser()
shuffle_buffer_size: int = 10000
file_type: str = 'tfrecord'
drop_remainder: bool = True
@dataclasses.dataclass
class DetectionLoss(hyperparams.Config):
object_center_weight: float = 1.0
offset_weight: float = 1.0
scale_weight: float = 0.1
@dataclasses.dataclass
class Losses(hyperparams.Config):
detection: DetectionLoss = dataclasses.field(default_factory=DetectionLoss)
gaussian_iou: float = 0.7
class_offset: int = 1
@dataclasses.dataclass
class CenterNetHead(hyperparams.Config):
heatmap_bias: float = -2.19
input_levels: List[str] = dataclasses.field(
default_factory=lambda: ['2_0', '2'])
@dataclasses.dataclass
class CenterNetDetectionGenerator(hyperparams.Config):
max_detections: int = 100
peak_error: float = 1e-6
peak_extract_kernel_size: int = 3
class_offset: int = 1
use_nms: bool = False
nms_pre_thresh: float = 0.1
nms_thresh: float = 0.4
use_reduction_sum: bool = True
@dataclasses.dataclass
class CenterNetModel(hyperparams.Config):
"""Config for centernet model."""
num_classes: int = 90
max_num_instances: int = 128
input_size: List[int] = dataclasses.field(default_factory=list)
backbone: backbones.Backbone = dataclasses.field(
default_factory=lambda: backbones.Backbone( # pylint: disable=g-long-lambda
type='hourglass', hourglass=backbones.Hourglass(model_id=52)
)
)
head: CenterNetHead = dataclasses.field(default_factory=CenterNetHead)
# pylint: disable=line-too-long
detection_generator: CenterNetDetectionGenerator = dataclasses.field(
default_factory=CenterNetDetectionGenerator
)
norm_activation: common.NormActivation = dataclasses.field(
default_factory=lambda: common.NormActivation( # pylint: disable=g-long-lambda
norm_momentum=0.1, norm_epsilon=1e-5, use_sync_bn=True
)
)
@dataclasses.dataclass
class CenterNetDetection(hyperparams.Config):
# use_center is the only option implemented currently.
use_centers: bool = True
@dataclasses.dataclass
class CenterNetSubTasks(hyperparams.Config):
detection: CenterNetDetection = dataclasses.field(
default_factory=CenterNetDetection
)
@dataclasses.dataclass
class CenterNetTask(cfg.TaskConfig):
"""Config for centernet task."""
model: CenterNetModel = dataclasses.field(default_factory=CenterNetModel)
train_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=True)
)
validation_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=False)
)
subtasks: CenterNetSubTasks = dataclasses.field(
default_factory=CenterNetSubTasks
)
losses: Losses = dataclasses.field(default_factory=Losses)
gradient_clip_norm: float = 10.0
per_category_metrics: bool = False
weight_decay: float = 5e-4
# Load checkpoints
init_checkpoint: Optional[str] = None
init_checkpoint_modules: str = 'all'
annotation_file: Optional[str] = None
def get_output_length_dict(self):
task_outputs = {}
if self.subtasks.detection and self.subtasks.detection.use_centers:
task_outputs.update({
'ct_heatmaps': self.model.num_classes,
'ct_offset': 2,
'ct_size': 2
})
else:
raise ValueError('Detection with center point is only available ')
return task_outputs
COCO_INPUT_PATH_BASE = 'coco'
COCO_TRAIN_EXAMPLES = 118287
COCO_VAL_EXAMPLES = 5000
@exp_factory.register_config_factory('centernet_hourglass_coco')
def centernet_hourglass_coco() -> cfg.ExperimentConfig:
"""COCO object detection with CenterNet."""
train_batch_size = 128
eval_batch_size = 8
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
config = cfg.ExperimentConfig(
task=CenterNetTask(
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=CenterNetModel(),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(),
shuffle_buffer_size=2),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size,
shuffle_buffer_size=2),
),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=150 * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adam',
'adam': {
'epsilon': 1e-7
}
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate': 0.001,
'decay_steps': 150 * steps_per_epoch
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 2000,
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
| 7,972 | 31.279352 | 85 | py |
models | models-master/official/projects/centernet/configs/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.