repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/official/projects/deepmac_maskrcnn/modeling/maskrcnn_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for maskrcnn_model.py."""
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.deepmac_maskrcnn.modeling import maskrcnn_model
from official.projects.deepmac_maskrcnn.modeling.heads import instance_heads as deep_instance_heads
from official.vision.modeling.backbones import resnet
from official.vision.modeling.decoders import fpn
from official.vision.modeling.heads import dense_prediction_heads
from official.vision.modeling.heads import instance_heads
from official.vision.modeling.layers import detection_generator
from official.vision.modeling.layers import mask_sampler
from official.vision.modeling.layers import roi_aligner
from official.vision.modeling.layers import roi_generator
from official.vision.modeling.layers import roi_sampler
from official.vision.ops import anchor
def construct_model_and_anchors(image_size, use_gt_boxes_for_masks):
num_classes = 3
min_level = 3
max_level = 4
num_scales = 3
aspect_ratios = [1.0]
anchor_boxes = anchor.Anchor(
min_level=min_level,
max_level=max_level,
num_scales=num_scales,
aspect_ratios=aspect_ratios,
anchor_size=3,
image_size=image_size).multilevel_boxes
num_anchors_per_location = len(aspect_ratios) * num_scales
input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, 3])
backbone = resnet.ResNet(model_id=50, input_specs=input_specs)
decoder = fpn.FPN(
min_level=min_level,
max_level=max_level,
input_specs=backbone.output_specs)
rpn_head = dense_prediction_heads.RPNHead(
min_level=min_level,
max_level=max_level,
num_anchors_per_location=num_anchors_per_location)
detection_head = instance_heads.DetectionHead(
num_classes=num_classes)
roi_generator_obj = roi_generator.MultilevelROIGenerator()
roi_sampler_obj = roi_sampler.ROISampler()
roi_aligner_obj = roi_aligner.MultilevelROIAligner()
detection_generator_obj = detection_generator.DetectionGenerator()
mask_head = deep_instance_heads.DeepMaskHead(
num_classes=num_classes, upsample_factor=2)
mask_sampler_obj = mask_sampler.MaskSampler(
mask_target_size=28, num_sampled_masks=1)
mask_roi_aligner_obj = roi_aligner.MultilevelROIAligner(crop_size=14)
model = maskrcnn_model.DeepMaskRCNNModel(
backbone,
decoder,
rpn_head,
detection_head,
roi_generator_obj,
roi_sampler_obj,
roi_aligner_obj,
detection_generator_obj,
mask_head,
mask_sampler_obj,
mask_roi_aligner_obj,
use_gt_boxes_for_masks=use_gt_boxes_for_masks)
return model, anchor_boxes
class MaskRCNNModelTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(False, False, False),
(False, True, False),
(True, False, True),
(True, False, False),
(True, True, True),
(True, True, False),
)
def test_forward(self, use_gt_boxes_for_masks, training, use_outer_boxes):
image_size = (256, 256)
images = np.random.rand(2, image_size[0], image_size[1], 3)
image_shape = np.array([[224, 100], [100, 224]])
model, anchor_boxes = construct_model_and_anchors(
image_size, use_gt_boxes_for_masks)
gt_boxes = tf.zeros((2, 16, 4), dtype=tf.float32)
gt_outer_boxes = None
if use_outer_boxes:
gt_outer_boxes = tf.zeros((2, 16, 4), dtype=tf.float32)
gt_masks = tf.zeros((2, 16, 32, 32))
gt_classes = tf.zeros((2, 16), dtype=tf.int32)
results = model(images.astype(np.uint8),
image_shape,
anchor_boxes,
gt_boxes,
gt_classes,
gt_masks,
gt_outer_boxes,
training=training)
self.assertIn('rpn_boxes', results)
self.assertIn('rpn_scores', results)
if training:
self.assertIn('class_targets', results)
self.assertIn('box_targets', results)
self.assertIn('class_outputs', results)
self.assertIn('box_outputs', results)
self.assertIn('mask_outputs', results)
self.assertEqual(results['mask_targets'].shape,
results['mask_outputs'].shape)
else:
self.assertIn('detection_boxes', results)
self.assertIn('detection_scores', results)
self.assertIn('detection_classes', results)
self.assertIn('num_detections', results)
self.assertIn('detection_masks', results)
@parameterized.parameters(
[(1, 5), (1, 10), (1, 15), (2, 5), (2, 10), (2, 15)]
)
def test_image_and_boxes(self, batch_size, num_boxes):
image_size = (640, 640)
images = np.random.rand(batch_size, image_size[0], image_size[1], 3).astype(
np.float32)
model, _ = construct_model_and_anchors(
image_size, use_gt_boxes_for_masks=True)
boxes = np.zeros((batch_size, num_boxes, 4), dtype=np.float32)
boxes[:, :, [2, 3]] = 1.0
boxes = tf.constant(boxes)
results = model.call_images_and_boxes(images, boxes)
self.assertIn('detection_masks', results)
if __name__ == '__main__':
tf.test.main()
| 5,731 | 34.825 | 99 | py |
models | models-master/official/projects/deepmac_maskrcnn/modeling/maskrcnn_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mask R-CNN model."""
from typing import List, Mapping, Optional, Union
# Import libraries
from absl import logging
import tensorflow as tf
from official.vision.modeling import maskrcnn_model
from official.vision.ops import box_ops
def resize_as(source, size):
source = tf.transpose(source, (0, 2, 3, 1))
source = tf.image.resize(source, (size, size))
return tf.transpose(source, (0, 3, 1, 2))
class DeepMaskRCNNModel(maskrcnn_model.MaskRCNNModel):
"""The Mask R-CNN model."""
def __init__(self,
backbone: tf.keras.Model,
decoder: tf.keras.Model,
rpn_head: tf.keras.layers.Layer,
detection_head: Union[tf.keras.layers.Layer,
List[tf.keras.layers.Layer]],
roi_generator: tf.keras.layers.Layer,
roi_sampler: Union[tf.keras.layers.Layer,
List[tf.keras.layers.Layer]],
roi_aligner: tf.keras.layers.Layer,
detection_generator: tf.keras.layers.Layer,
mask_head: Optional[tf.keras.layers.Layer] = None,
mask_sampler: Optional[tf.keras.layers.Layer] = None,
mask_roi_aligner: Optional[tf.keras.layers.Layer] = None,
class_agnostic_bbox_pred: bool = False,
cascade_class_ensemble: bool = False,
min_level: Optional[int] = None,
max_level: Optional[int] = None,
num_scales: Optional[int] = None,
aspect_ratios: Optional[List[float]] = None,
anchor_size: Optional[float] = None,
outer_boxes_scale: float = 1.0,
use_gt_boxes_for_masks=False,
**kwargs):
"""Initializes the Mask R-CNN model.
Args:
backbone: `tf.keras.Model`, the backbone network.
decoder: `tf.keras.Model`, the decoder network.
rpn_head: the RPN head.
detection_head: the detection head or a list of heads.
roi_generator: the ROI generator.
roi_sampler: a single ROI sampler or a list of ROI samplers for cascade
detection heads.
roi_aligner: the ROI aligner.
detection_generator: the detection generator.
mask_head: the mask head.
mask_sampler: the mask sampler.
mask_roi_aligner: the ROI alginer for mask prediction.
class_agnostic_bbox_pred: if True, perform class agnostic bounding box
prediction. Needs to be `True` for Cascade RCNN models.
cascade_class_ensemble: if True, ensemble classification scores over all
detection heads.
min_level: Minimum level in output feature maps.
max_level: Maximum level in output feature maps.
num_scales: A number representing intermediate scales added on each level.
For instances, num_scales=2 adds one additional intermediate anchor
scales [2^0, 2^0.5] on each level.
aspect_ratios: A list representing the aspect raito anchors added on each
level. The number indicates the ratio of width to height. For instances,
aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each scale level.
anchor_size: A number representing the scale of size of the base anchor to
the feature stride 2^level.
outer_boxes_scale: a float to scale up the bounding boxes to generate
more inclusive masks. The scale is expected to be >=1.0.
use_gt_boxes_for_masks: bool, if set, crop using groundtruth boxes instead
of proposals for training mask head
**kwargs: keyword arguments to be passed.
"""
super().__init__(
backbone=backbone,
decoder=decoder,
rpn_head=rpn_head,
detection_head=detection_head,
roi_generator=roi_generator,
roi_sampler=roi_sampler,
roi_aligner=roi_aligner,
detection_generator=detection_generator,
mask_head=mask_head,
mask_sampler=mask_sampler,
mask_roi_aligner=mask_roi_aligner,
class_agnostic_bbox_pred=class_agnostic_bbox_pred,
cascade_class_ensemble=cascade_class_ensemble,
min_level=min_level,
max_level=max_level,
num_scales=num_scales,
aspect_ratios=aspect_ratios,
anchor_size=anchor_size,
outer_boxes_scale=outer_boxes_scale,
**kwargs)
self._config_dict['use_gt_boxes_for_masks'] = use_gt_boxes_for_masks
def call(self,
images: tf.Tensor,
image_shape: tf.Tensor,
anchor_boxes: Optional[Mapping[str, tf.Tensor]] = None,
gt_boxes: Optional[tf.Tensor] = None,
gt_classes: Optional[tf.Tensor] = None,
gt_masks: Optional[tf.Tensor] = None,
gt_outer_boxes: Optional[tf.Tensor] = None,
training: Optional[bool] = None) -> Mapping[str, tf.Tensor]:
call_box_outputs_kwargs = {
'images': images,
'image_shape': image_shape,
'anchor_boxes': anchor_boxes,
'gt_boxes': gt_boxes,
'gt_classes': gt_classes,
'training': training
}
if self.outer_boxes_scale > 1.0:
call_box_outputs_kwargs['gt_outer_boxes'] = gt_outer_boxes
model_outputs, intermediate_outputs = self._call_box_outputs(
**call_box_outputs_kwargs)
if not self._include_mask:
return model_outputs
if self.outer_boxes_scale == 1.0:
current_rois = intermediate_outputs['current_rois']
matched_gt_boxes = intermediate_outputs['matched_gt_boxes']
mask_head_gt_boxes = gt_boxes
else:
current_rois = box_ops.compute_outer_boxes(
intermediate_outputs['current_rois'],
tf.expand_dims(image_shape, axis=1), self.outer_boxes_scale)
matched_gt_boxes = intermediate_outputs['matched_gt_outer_boxes']
mask_head_gt_boxes = gt_outer_boxes
model_mask_outputs = self._call_mask_outputs(
model_box_outputs=model_outputs,
features=model_outputs['decoder_features'],
current_rois=current_rois,
matched_gt_indices=intermediate_outputs['matched_gt_indices'],
matched_gt_boxes=matched_gt_boxes,
matched_gt_classes=intermediate_outputs['matched_gt_classes'],
gt_masks=gt_masks,
gt_classes=gt_classes,
gt_boxes=mask_head_gt_boxes,
training=training)
model_outputs.update(model_mask_outputs)
return model_outputs
def call_images_and_boxes(self, images, boxes):
"""Predict masks given an image and bounding boxes."""
_, decoder_features = self._get_backbone_and_decoder_features(images)
boxes_shape = tf.shape(boxes)
batch_size, num_boxes = boxes_shape[0], boxes_shape[1]
classes = tf.zeros((batch_size, num_boxes), dtype=tf.int32)
_, mask_probs = self._features_to_mask_outputs(
decoder_features, boxes, classes)
return {
'detection_masks': mask_probs
}
def _call_mask_outputs(
self,
model_box_outputs: Mapping[str, tf.Tensor],
features: tf.Tensor,
current_rois: tf.Tensor,
matched_gt_indices: tf.Tensor,
matched_gt_boxes: tf.Tensor,
matched_gt_classes: tf.Tensor,
gt_masks: tf.Tensor,
gt_classes: tf.Tensor,
gt_boxes: tf.Tensor,
training: Optional[bool] = None) -> Mapping[str, tf.Tensor]:
model_outputs = dict(model_box_outputs)
if training:
if self._config_dict['use_gt_boxes_for_masks']:
mask_size = (
self.mask_roi_aligner._config_dict['crop_size'] * # pylint:disable=protected-access
self.mask_head._config_dict['upsample_factor'] # pylint:disable=protected-access
)
gt_masks = resize_as(source=gt_masks, size=mask_size)
logging.info('Using GT class and mask targets.')
model_outputs.update({
'mask_class_targets': gt_classes,
'mask_targets': gt_masks,
})
else:
rois, roi_classes, roi_masks = self.mask_sampler(
current_rois, matched_gt_boxes, matched_gt_classes,
matched_gt_indices, gt_masks)
roi_masks = tf.stop_gradient(roi_masks)
model_outputs.update({
'mask_class_targets': roi_classes,
'mask_targets': roi_masks,
})
else:
if self.outer_boxes_scale == 1.0:
rois = model_outputs['detection_boxes']
else:
rois = model_outputs['detection_outer_boxes']
roi_classes = model_outputs['detection_classes']
# Mask RoI align.
if training and self._config_dict['use_gt_boxes_for_masks']:
logging.info('Using GT mask roi features.')
roi_aligner_boxes = gt_boxes
mask_head_classes = gt_classes
else:
roi_aligner_boxes = rois
mask_head_classes = roi_classes
mask_logits, mask_probs = self._features_to_mask_outputs(
features, roi_aligner_boxes, mask_head_classes)
if training:
model_outputs.update({
'mask_outputs': mask_logits,
})
else:
model_outputs.update({
'detection_masks': mask_probs,
})
return model_outputs
| 9,656 | 37.628 | 96 | py |
models | models-master/official/projects/deepmac_maskrcnn/modeling/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/deepmac_maskrcnn/modeling/heads/hourglass_network.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Hourglass[1] network.
[1]: https://arxiv.org/abs/1603.06937
"""
import tensorflow as tf
BATCH_NORM_EPSILON = 1e-5
BATCH_NORM_MOMENTUM = 0.1
BATCH_NORM_FUSED = True
class IdentityLayer(tf.keras.layers.Layer):
"""A layer which passes through the input as it is."""
def call(self, inputs):
return inputs
def _get_padding_for_kernel_size(kernel_size):
if kernel_size == 7:
return (3, 3)
elif kernel_size == 3:
return (1, 1)
else:
raise ValueError('Padding for kernel size {} not known.'.format(
kernel_size))
def batchnorm():
try:
return tf.keras.layers.experimental.SyncBatchNormalization(
name='batchnorm', epsilon=1e-5, momentum=0.1)
except AttributeError:
return tf.keras.layers.BatchNormalization(
name='batchnorm', epsilon=1e-5, momentum=0.1, fused=BATCH_NORM_FUSED)
class ConvolutionalBlock(tf.keras.layers.Layer):
"""Block that aggregates Convolution + Norm layer + ReLU."""
def __init__(self, kernel_size, out_channels, stride=1, relu=True,
padding='same'):
"""Initializes the Convolutional block.
Args:
kernel_size: int, convolution kernel size.
out_channels: int, the desired number of output channels.
stride: Integer, stride used in the convolution.
relu: bool, whether to use relu at the end of the layer.
padding: str, the padding scheme to use when kernel_size <= 1
"""
super(ConvolutionalBlock, self).__init__()
if kernel_size > 1:
padding = 'valid'
padding_size = _get_padding_for_kernel_size(kernel_size)
# TODO(vighneshb) Explore if removing and using padding option in conv
# layer works.
self.pad = tf.keras.layers.ZeroPadding2D(padding_size)
else:
self.pad = IdentityLayer()
self.conv = tf.keras.layers.Conv2D(
filters=out_channels, kernel_size=kernel_size, use_bias=False,
strides=stride, padding=padding)
self.norm = batchnorm()
if relu:
self.relu = tf.keras.layers.ReLU()
else:
self.relu = IdentityLayer()
def call(self, inputs):
net = self.pad(inputs)
net = self.conv(net)
net = self.norm(net)
return self.relu(net)
class SkipConvolution(ConvolutionalBlock):
"""The skip connection layer for a ResNet."""
def __init__(self, out_channels, stride):
"""Initializes the skip convolution layer.
Args:
out_channels: int, the desired number of output channels.
stride: int, the stride for the layer.
"""
super(SkipConvolution, self).__init__(
out_channels=out_channels, kernel_size=1, stride=stride, relu=False)
class ResidualBlock(tf.keras.layers.Layer):
"""A Residual block."""
def __init__(self, out_channels, skip_conv=False, kernel_size=3, stride=1,
padding='same'):
"""Initializes the Residual block.
Args:
out_channels: int, the desired number of output channels.
skip_conv: bool, whether to use a conv layer for skip connections.
kernel_size: int, convolution kernel size.
stride: Integer, stride used in the convolution.
padding: str, the type of padding to use.
"""
super(ResidualBlock, self).__init__()
self.conv_block = ConvolutionalBlock(
kernel_size=kernel_size, out_channels=out_channels, stride=stride)
self.conv = tf.keras.layers.Conv2D(
filters=out_channels, kernel_size=kernel_size, use_bias=False,
strides=1, padding=padding)
self.norm = batchnorm()
if skip_conv:
self.skip = SkipConvolution(out_channels=out_channels,
stride=stride)
else:
self.skip = IdentityLayer()
self.relu = tf.keras.layers.ReLU()
def call(self, inputs):
net = self.conv_block(inputs)
net = self.conv(net)
net = self.norm(net)
net_skip = self.skip(inputs)
return self.relu(net + net_skip)
class InputDownsampleBlock(tf.keras.layers.Layer):
"""Block for the initial feature downsampling."""
def __init__(self, out_channels_initial_conv, out_channels_residual_block):
"""Initializes the downsample block.
Args:
out_channels_initial_conv: int, the desired number of output channels
in the initial conv layer.
out_channels_residual_block: int, the desired number of output channels
in the underlying residual block.
"""
super(InputDownsampleBlock, self).__init__()
self.conv_block = ConvolutionalBlock(
kernel_size=7, out_channels=out_channels_initial_conv, stride=2,
padding='valid')
self.residual_block = ResidualBlock(
out_channels=out_channels_residual_block, stride=2, skip_conv=True)
def call(self, inputs):
return self.residual_block(self.conv_block(inputs))
class InputConvBlock(tf.keras.layers.Layer):
"""Block for the initial feature convolution.
This block is used in the hourglass network when we don't want to downsample
the input.
"""
def __init__(self, out_channels_initial_conv, out_channels_residual_block):
"""Initializes the downsample block.
Args:
out_channels_initial_conv: int, the desired number of output channels
in the initial conv layer.
out_channels_residual_block: int, the desired number of output channels
in the underlying residual block.
"""
super(InputConvBlock, self).__init__()
self.conv_block = ConvolutionalBlock(
kernel_size=3, out_channels=out_channels_initial_conv, stride=1,
padding='valid')
self.residual_block = ResidualBlock(
out_channels=out_channels_residual_block, stride=1, skip_conv=True)
def call(self, inputs):
return self.residual_block(self.conv_block(inputs))
def _make_repeated_residual_blocks(out_channels, num_blocks,
initial_stride=1, residual_channels=None,
initial_skip_conv=False):
"""Stack Residual blocks one after the other.
Args:
out_channels: int, the desired number of output channels.
num_blocks: int, the number of residual blocks to be stacked.
initial_stride: int, the stride of the initial residual block.
residual_channels: int, the desired number of output channels in the
intermediate residual blocks. If not specifed, we use out_channels.
initial_skip_conv: bool, if set, the first residual block uses a skip
convolution. This is useful when the number of channels in the input
are not the same as residual_channels.
Returns:
blocks: A list of residual blocks to be applied in sequence.
"""
blocks = []
if residual_channels is None:
residual_channels = out_channels
for i in range(num_blocks - 1):
# Only use the stride at the first block so we don't repeatedly downsample
# the input
stride = initial_stride if i == 0 else 1
# If the stide is more than 1, we cannot use an identity layer for the
# skip connection and are forced to use a conv for the skip connection.
skip_conv = stride > 1
if i == 0 and initial_skip_conv:
skip_conv = True
blocks.append(
ResidualBlock(out_channels=residual_channels, stride=stride,
skip_conv=skip_conv)
)
if num_blocks == 1:
# If there is only 1 block, the for loop above is not run,
# therefore we honor the requested stride in the last residual block
stride = initial_stride
# We are forced to use a conv in the skip connection if stride > 1
skip_conv = stride > 1
else:
stride = 1
skip_conv = residual_channels != out_channels
blocks.append(ResidualBlock(out_channels=out_channels, skip_conv=skip_conv,
stride=stride))
return blocks
def _apply_blocks(inputs, blocks):
net = inputs
for block in blocks:
net = block(net)
return net
class EncoderDecoderBlock(tf.keras.layers.Layer):
"""An encoder-decoder block which recursively defines the hourglass network."""
def __init__(self, num_stages, channel_dims, blocks_per_stage,
stagewise_downsample=True, encoder_decoder_shortcut=True):
"""Initializes the encoder-decoder block.
Args:
num_stages: int, Number of stages in the network. At each stage we have 2
encoder and 1 decoder blocks. The second encoder block downsamples the
input.
channel_dims: int list, the output channels dimensions of stages in
the network. `channel_dims[0]` is used to define the number of
channels in the first encoder block and `channel_dims[1]` is used to
define the number of channels in the second encoder block. The channels
in the recursive inner layers are defined using `channel_dims[1:]`
blocks_per_stage: int list, number of residual blocks to use at each
stage. `blocks_per_stage[0]` defines the number of blocks at the
current stage and `blocks_per_stage[1:]` is used at further stages.
stagewise_downsample: bool, whether or not to downsample before passing
inputs to the next stage.
encoder_decoder_shortcut: bool, whether or not to use shortcut
connections between encoder and decoder.
"""
super(EncoderDecoderBlock, self).__init__()
out_channels = channel_dims[0]
out_channels_downsampled = channel_dims[1]
self.encoder_decoder_shortcut = encoder_decoder_shortcut
if encoder_decoder_shortcut:
self.merge_features = tf.keras.layers.Add()
self.encoder_block1 = _make_repeated_residual_blocks(
out_channels=out_channels, num_blocks=blocks_per_stage[0],
initial_stride=1)
initial_stride = 2 if stagewise_downsample else 1
self.encoder_block2 = _make_repeated_residual_blocks(
out_channels=out_channels_downsampled,
num_blocks=blocks_per_stage[0], initial_stride=initial_stride,
initial_skip_conv=out_channels != out_channels_downsampled)
if num_stages > 1:
self.inner_block = [
EncoderDecoderBlock(num_stages - 1, channel_dims[1:],
blocks_per_stage[1:],
stagewise_downsample=stagewise_downsample,
encoder_decoder_shortcut=encoder_decoder_shortcut)
]
else:
self.inner_block = _make_repeated_residual_blocks(
out_channels=out_channels_downsampled,
num_blocks=blocks_per_stage[1])
self.decoder_block = _make_repeated_residual_blocks(
residual_channels=out_channels_downsampled,
out_channels=out_channels, num_blocks=blocks_per_stage[0])
self.upsample = tf.keras.layers.UpSampling2D(initial_stride)
def call(self, inputs):
if self.encoder_decoder_shortcut:
encoded_outputs = _apply_blocks(inputs, self.encoder_block1)
encoded_downsampled_outputs = _apply_blocks(inputs, self.encoder_block2)
inner_block_outputs = _apply_blocks(
encoded_downsampled_outputs, self.inner_block)
decoded_outputs = _apply_blocks(inner_block_outputs, self.decoder_block)
upsampled_outputs = self.upsample(decoded_outputs)
if self.encoder_decoder_shortcut:
return self.merge_features([encoded_outputs, upsampled_outputs])
else:
return upsampled_outputs
class HourglassNetwork(tf.keras.Model):
"""The hourglass network."""
def __init__(self, num_stages, input_channel_dims, channel_dims_per_stage,
blocks_per_stage, num_hourglasses, initial_downsample=True,
stagewise_downsample=True, encoder_decoder_shortcut=True):
"""Intializes the feature extractor.
Args:
num_stages: int, Number of stages in the network. At each stage we have 2
encoder and 1 decoder blocks. The second encoder block downsamples the
input.
input_channel_dims: int, the number of channels in the input conv blocks.
channel_dims_per_stage: int list, the output channel dimensions of each
stage in the hourglass network.
blocks_per_stage: int list, number of residual blocks to use at each
stage in the hourglass network
num_hourglasses: int, number of hourglas networks to stack
sequentially.
initial_downsample: bool, if set, downsamples the input by a factor of 4
before applying the rest of the network. Downsampling is done with a 7x7
convolution kernel, otherwise a 3x3 kernel is used.
stagewise_downsample: bool, whether or not to downsample before passing
inputs to the next stage.
encoder_decoder_shortcut: bool, whether or not to use shortcut
connections between encoder and decoder.
"""
super(HourglassNetwork, self).__init__()
self.num_hourglasses = num_hourglasses
self.initial_downsample = initial_downsample
if initial_downsample:
self.downsample_input = InputDownsampleBlock(
out_channels_initial_conv=input_channel_dims,
out_channels_residual_block=channel_dims_per_stage[0]
)
else:
self.conv_input = InputConvBlock(
out_channels_initial_conv=input_channel_dims,
out_channels_residual_block=channel_dims_per_stage[0]
)
self.hourglass_network = []
self.output_conv = []
for _ in range(self.num_hourglasses):
self.hourglass_network.append(
EncoderDecoderBlock(
num_stages=num_stages, channel_dims=channel_dims_per_stage,
blocks_per_stage=blocks_per_stage,
stagewise_downsample=stagewise_downsample,
encoder_decoder_shortcut=encoder_decoder_shortcut)
)
self.output_conv.append(
ConvolutionalBlock(kernel_size=3,
out_channels=channel_dims_per_stage[0])
)
self.intermediate_conv1 = []
self.intermediate_conv2 = []
self.intermediate_residual = []
for _ in range(self.num_hourglasses - 1):
self.intermediate_conv1.append(
ConvolutionalBlock(
kernel_size=1, out_channels=channel_dims_per_stage[0], relu=False)
)
self.intermediate_conv2.append(
ConvolutionalBlock(
kernel_size=1, out_channels=channel_dims_per_stage[0], relu=False)
)
self.intermediate_residual.append(
ResidualBlock(out_channels=channel_dims_per_stage[0])
)
self.intermediate_relu = tf.keras.layers.ReLU()
def call(self, inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
if self.initial_downsample:
inputs = self.downsample_input(inputs)
else:
inputs = self.conv_input(inputs)
outputs = []
for i in range(self.num_hourglasses):
hourglass_output = self.hourglass_network[i](inputs)
output = self.output_conv[i](hourglass_output)
outputs.append(output)
if i < self.num_hourglasses - 1:
secondary_output = (self.intermediate_conv1[i](inputs) +
self.intermediate_conv2[i](output))
secondary_output = self.intermediate_relu(secondary_output)
inputs = self.intermediate_residual[i](secondary_output)
return outputs
@property
def out_stride(self):
"""The stride in the output image of the network."""
return 4
@property
def num_feature_outputs(self):
"""Ther number of feature outputs returned by the feature extractor."""
return self.num_hourglasses
def _layer_depth(layer):
"""Compute depth of Conv/Residual blocks or lists of them."""
if isinstance(layer, list):
return sum([_layer_depth(l) for l in layer])
elif isinstance(layer, ConvolutionalBlock):
return 1
elif isinstance(layer, ResidualBlock):
return 2
else:
raise ValueError('Unknown layer - {}'.format(layer))
def _encoder_decoder_depth(network):
"""Helper function to compute depth of encoder-decoder blocks."""
encoder_block2_layers = _layer_depth(network.encoder_block2)
decoder_block_layers = _layer_depth(network.decoder_block)
if isinstance(network.inner_block[0], EncoderDecoderBlock):
assert len(network.inner_block) == 1, 'Inner block is expected as length 1.'
inner_block_layers = _encoder_decoder_depth(network.inner_block[0])
return inner_block_layers + encoder_block2_layers + decoder_block_layers
elif isinstance(network.inner_block[0], ResidualBlock):
return (encoder_block2_layers + decoder_block_layers +
_layer_depth(network.inner_block))
else:
raise ValueError('Unknown inner block type.')
def hourglass_depth(network):
"""Helper function to verify depth of hourglass backbone."""
input_conv_layers = 3 # 1 ResidualBlock and 1 ConvBlock
# Only intermediate_conv2 and intermediate_residual are applied before
# sending inputs to the later stages.
intermediate_layers = (
_layer_depth(network.intermediate_conv2) +
_layer_depth(network.intermediate_residual)
)
# network.output_conv is applied before sending input to the later stages
output_layers = _layer_depth(network.output_conv)
encoder_decoder_layers = sum(_encoder_decoder_depth(net) for net in
network.hourglass_network)
return (input_conv_layers + encoder_decoder_layers + intermediate_layers
+ output_layers)
def hourglass_104():
"""The Hourglass-104 backbone.
The architecture parameters are taken from [1].
Returns:
network: An HourglassNetwork object implementing the Hourglass-104
backbone.
[1]: https://arxiv.org/abs/1904.07850
"""
return HourglassNetwork(
input_channel_dims=128,
channel_dims_per_stage=[256, 256, 384, 384, 384, 512],
num_hourglasses=2,
num_stages=5,
blocks_per_stage=[2, 2, 2, 2, 2, 4],
)
def single_stage_hourglass(input_channel_dims, channel_dims_per_stage,
blocks_per_stage, initial_downsample=True,
stagewise_downsample=True,
encoder_decoder_shortcut=True):
assert len(channel_dims_per_stage) == len(blocks_per_stage)
return HourglassNetwork(
input_channel_dims=input_channel_dims,
channel_dims_per_stage=channel_dims_per_stage,
num_hourglasses=1,
num_stages=len(channel_dims_per_stage) - 1,
blocks_per_stage=blocks_per_stage,
initial_downsample=initial_downsample,
stagewise_downsample=stagewise_downsample,
encoder_decoder_shortcut=encoder_decoder_shortcut
)
def hourglass_10(num_channels, initial_downsample=True):
nc = num_channels
return single_stage_hourglass(
input_channel_dims=nc,
initial_downsample=initial_downsample,
blocks_per_stage=[1, 1],
channel_dims_per_stage=[nc * 2, nc * 2])
def hourglass_20(num_channels, initial_downsample=True):
nc = num_channels
return single_stage_hourglass(
input_channel_dims=nc,
initial_downsample=initial_downsample,
blocks_per_stage=[1, 2, 2],
channel_dims_per_stage=[nc * 2, nc * 2, nc * 3])
def hourglass_32(num_channels, initial_downsample=True):
nc = num_channels
return single_stage_hourglass(
input_channel_dims=nc,
initial_downsample=initial_downsample,
blocks_per_stage=[2, 2, 2, 2],
channel_dims_per_stage=[nc * 2, nc * 2, nc * 3, nc * 3])
def hourglass_52(num_channels, initial_downsample=True):
nc = num_channels
return single_stage_hourglass(
input_channel_dims=nc,
initial_downsample=initial_downsample,
blocks_per_stage=[2, 2, 2, 2, 2, 4],
channel_dims_per_stage=[nc * 2, nc * 2, nc * 3, nc * 3, nc * 3, nc*4])
def hourglass_100(num_channels, initial_downsample=True):
nc = num_channels
return single_stage_hourglass(
input_channel_dims=nc,
initial_downsample=initial_downsample,
blocks_per_stage=[4, 4, 4, 4, 4, 8],
channel_dims_per_stage=[nc * 2, nc * 2, nc * 3, nc * 3, nc * 3, nc*4])
def hourglass_20_uniform_size(num_channels):
nc = num_channels
return single_stage_hourglass(
input_channel_dims=nc,
blocks_per_stage=[1, 2, 2],
channel_dims_per_stage=[nc * 2, nc * 2, nc * 3],
initial_downsample=False,
stagewise_downsample=False)
def hourglass_20_no_shortcut(num_channels):
nc = num_channels
return single_stage_hourglass(
input_channel_dims=nc,
blocks_per_stage=[1, 2, 2],
channel_dims_per_stage=[nc * 2, nc * 2, nc * 3],
initial_downsample=False,
encoder_decoder_shortcut=False)
| 21,705 | 33.021944 | 100 | py |
models | models-master/official/projects/deepmac_maskrcnn/modeling/heads/instance_heads_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for instance_heads.py."""
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.deepmac_maskrcnn.modeling.heads import instance_heads as deep_instance_heads
class MaskHeadTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(1, 1, False),
(1, 2, False),
(2, 1, False),
(2, 2, False),
)
def test_forward(self, upsample_factor, num_convs, use_sync_bn):
mask_head = deep_instance_heads.DeepMaskHead(
num_classes=3,
upsample_factor=upsample_factor,
num_convs=num_convs,
num_filters=16,
use_separable_conv=False,
activation='relu',
use_sync_bn=use_sync_bn,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_regularizer=None,
bias_regularizer=None,
)
roi_features = np.random.rand(2, 10, 14, 14, 16)
roi_classes = np.zeros((2, 10))
masks = mask_head([roi_features, roi_classes])
self.assertAllEqual(
masks.numpy().shape,
[2, 10, 14 * upsample_factor, 14 * upsample_factor])
def test_serialize_deserialize(self):
mask_head = deep_instance_heads.DeepMaskHead(
num_classes=3,
upsample_factor=2,
num_convs=1,
num_filters=256,
use_separable_conv=False,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_regularizer=None,
bias_regularizer=None,
)
config = mask_head.get_config()
new_mask_head = deep_instance_heads.DeepMaskHead.from_config(config)
self.assertAllEqual(
mask_head.get_config(), new_mask_head.get_config())
def test_forward_class_agnostic(self):
mask_head = deep_instance_heads.DeepMaskHead(
num_classes=3,
class_agnostic=True
)
roi_features = np.random.rand(2, 10, 14, 14, 16)
roi_classes = np.zeros((2, 10))
masks = mask_head([roi_features, roi_classes])
self.assertAllEqual(masks.numpy().shape, [2, 10, 28, 28])
def test_instance_head_hourglass(self):
mask_head = deep_instance_heads.DeepMaskHead(
num_classes=3,
class_agnostic=True,
convnet_variant='hourglass20',
num_filters=32,
upsample_factor=2
)
roi_features = np.random.rand(2, 10, 16, 16, 16)
roi_classes = np.zeros((2, 10))
masks = mask_head([roi_features, roi_classes])
self.assertAllEqual(masks.numpy().shape, [2, 10, 32, 32])
if __name__ == '__main__':
tf.test.main()
| 3,172 | 31.050505 | 99 | py |
models | models-master/official/projects/deepmac_maskrcnn/modeling/heads/instance_heads.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Instance prediction heads."""
# Import libraries
from absl import logging
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.deepmac_maskrcnn.modeling.heads import hourglass_network
class DeepMaskHead(tf.keras.layers.Layer):
"""Creates a mask head."""
def __init__(self,
num_classes,
upsample_factor=2,
num_convs=4,
num_filters=256,
use_separable_conv=False,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_regularizer=None,
bias_regularizer=None,
class_agnostic=False,
convnet_variant='default',
**kwargs):
"""Initializes a mask head.
Args:
num_classes: An `int` of the number of classes.
upsample_factor: An `int` that indicates the upsample factor to generate
the final predicted masks. It should be >= 1.
num_convs: An `int` number that represents the number of the intermediate
convolution layers before the mask prediction layers.
num_filters: An `int` number that represents the number of filters of the
intermediate convolution layers.
use_separable_conv: A `bool` that indicates whether the separable
convolution layers is used.
activation: A `str` that indicates which activation is used, e.g. 'relu',
'swish', etc.
use_sync_bn: A `bool` that indicates whether to use synchronized batch
normalization across different replicas.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
class_agnostic: A `bool`. If set, we use a single channel mask head that
is shared between all classes.
convnet_variant: A `str` denoting the architecture of network used in the
head. Supported options are 'default', 'hourglass20', 'hourglass52'
and 'hourglass100'.
**kwargs: Additional keyword arguments to be passed.
"""
super(DeepMaskHead, self).__init__(**kwargs)
self._config_dict = {
'num_classes': num_classes,
'upsample_factor': upsample_factor,
'num_convs': num_convs,
'num_filters': num_filters,
'use_separable_conv': use_separable_conv,
'activation': activation,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
'class_agnostic': class_agnostic,
'convnet_variant': convnet_variant,
}
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation = tf_utils.get_activation(activation)
def _get_conv_op_and_kwargs(self):
conv_op = (tf.keras.layers.SeparableConv2D
if self._config_dict['use_separable_conv']
else tf.keras.layers.Conv2D)
conv_kwargs = {
'filters': self._config_dict['num_filters'],
'kernel_size': 3,
'padding': 'same',
}
if self._config_dict['use_separable_conv']:
conv_kwargs.update({
'depthwise_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'pointwise_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'bias_initializer': tf.zeros_initializer(),
'depthwise_regularizer': self._config_dict['kernel_regularizer'],
'pointwise_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
})
else:
conv_kwargs.update({
'kernel_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'bias_initializer': tf.zeros_initializer(),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
})
return conv_op, conv_kwargs
def _get_bn_op_and_kwargs(self):
bn_op = (tf.keras.layers.experimental.SyncBatchNormalization
if self._config_dict['use_sync_bn']
else tf.keras.layers.BatchNormalization)
bn_kwargs = {
'axis': self._bn_axis,
'momentum': self._config_dict['norm_momentum'],
'epsilon': self._config_dict['norm_epsilon'],
}
return bn_op, bn_kwargs
def build(self, input_shape):
"""Creates the variables of the head."""
conv_op, conv_kwargs = self._get_conv_op_and_kwargs()
self._build_convnet_variant()
self._deconv = tf.keras.layers.Conv2DTranspose(
filters=self._config_dict['num_filters'],
kernel_size=self._config_dict['upsample_factor'],
strides=self._config_dict['upsample_factor'],
padding='valid',
kernel_initializer=tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'],
name='mask-upsampling')
bn_op, bn_kwargs = self._get_bn_op_and_kwargs()
self._deconv_bn = bn_op(name='mask-deconv-bn', **bn_kwargs)
if self._config_dict['class_agnostic']:
num_filters = 1
else:
num_filters = self._config_dict['num_classes']
conv_kwargs = {
'filters': num_filters,
'kernel_size': 1,
'padding': 'valid',
}
if self._config_dict['use_separable_conv']:
conv_kwargs.update({
'depthwise_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'pointwise_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'bias_initializer': tf.zeros_initializer(),
'depthwise_regularizer': self._config_dict['kernel_regularizer'],
'pointwise_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
})
else:
conv_kwargs.update({
'kernel_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'bias_initializer': tf.zeros_initializer(),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
})
self._mask_regressor = conv_op(name='mask-logits', **conv_kwargs)
super(DeepMaskHead, self).build(input_shape)
def call(self, inputs, training=None):
"""Forward pass of mask branch for the Mask-RCNN model.
Args:
inputs: A `list` of two tensors where
inputs[0]: A `tf.Tensor` of shape [batch_size, num_instances,
roi_height, roi_width, roi_channels], representing the ROI features.
inputs[1]: A `tf.Tensor` of shape [batch_size, num_instances],
representing the classes of the ROIs.
training: A `bool` indicating whether it is in `training` mode.
Returns:
mask_outputs: A `tf.Tensor` of shape
[batch_size, num_instances, roi_height * upsample_factor,
roi_width * upsample_factor], representing the mask predictions.
"""
roi_features, roi_classes = inputs
features_shape = tf.shape(roi_features)
num_rois, height, width, filters = (
features_shape[1],
features_shape[2],
features_shape[3],
features_shape[4],
)
x = tf.reshape(roi_features, [-1, height, width, filters])
x = self._call_convnet_variant(x)
x = self._deconv(x)
x = self._deconv_bn(x)
x = self._activation(x)
logits = self._mask_regressor(x)
mask_height = height * self._config_dict['upsample_factor']
mask_width = width * self._config_dict['upsample_factor']
if self._config_dict['class_agnostic']:
return tf.reshape(logits, [-1, num_rois, mask_height, mask_width])
else:
logits = tf.reshape(
logits,
[-1, num_rois, mask_height, mask_width,
self._config_dict['num_classes']])
return tf.gather(
logits, tf.cast(roi_classes, dtype=tf.int32), axis=-1, batch_dims=2
)
def _build_convnet_variant(self):
variant = self._config_dict['convnet_variant']
if variant == 'default':
bn_op, bn_kwargs = self._get_bn_op_and_kwargs()
self._convs = []
self._conv_norms = []
for i in range(self._config_dict['num_convs']):
conv_name = 'mask-conv_{}'.format(i)
conv_op, conv_kwargs = self._get_conv_op_and_kwargs()
self._convs.append(conv_op(name=conv_name, **conv_kwargs))
bn_name = 'mask-conv-bn_{}'.format(i)
self._conv_norms.append(bn_op(name=bn_name, **bn_kwargs))
elif variant == 'hourglass20':
logging.info('Using hourglass 20 network.')
self._hourglass = hourglass_network.hourglass_20(
self._config_dict['num_filters'], initial_downsample=False)
elif variant == 'hourglass52':
logging.info('Using hourglass 52 network.')
self._hourglass = hourglass_network.hourglass_52(
self._config_dict['num_filters'], initial_downsample=False)
elif variant == 'hourglass100':
logging.info('Using hourglass 100 network.')
self._hourglass = hourglass_network.hourglass_100(
self._config_dict['num_filters'], initial_downsample=False)
else:
raise ValueError('Unknown ConvNet variant - {}'.format(variant))
def _call_convnet_variant(self, x):
variant = self._config_dict['convnet_variant']
if variant == 'default':
for conv, bn in zip(self._convs, self._conv_norms):
x = conv(x)
x = bn(x)
x = self._activation(x)
return x
elif variant == 'hourglass20':
return self._hourglass(x)[-1]
elif variant == 'hourglass52':
return self._hourglass(x)[-1]
elif variant == 'hourglass100':
return self._hourglass(x)[-1]
else:
raise ValueError('Unknown ConvNet variant - {}'.format(variant))
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
| 11,466 | 37.351171 | 80 | py |
models | models-master/official/projects/deepmac_maskrcnn/modeling/heads/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/deepmac_maskrcnn/tasks/deep_mask_head_rcnn.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mask R-CNN variant with support for deep mask heads."""
import tensorflow as tf
from official.core import task_factory
from official.projects.deepmac_maskrcnn.configs import deep_mask_head_rcnn as deep_mask_head_rcnn_config
from official.projects.deepmac_maskrcnn.modeling import maskrcnn_model as deep_maskrcnn_model
from official.projects.deepmac_maskrcnn.modeling.heads import instance_heads as deep_instance_heads
from official.vision.modeling import backbones
from official.vision.modeling.decoders import factory as decoder_factory
from official.vision.modeling.heads import dense_prediction_heads
from official.vision.modeling.heads import instance_heads
from official.vision.modeling.layers import detection_generator
from official.vision.modeling.layers import mask_sampler
from official.vision.modeling.layers import roi_aligner
from official.vision.modeling.layers import roi_generator
from official.vision.modeling.layers import roi_sampler
from official.vision.tasks import maskrcnn
# Taken from modeling/factory.py
def build_maskrcnn(input_specs: tf.keras.layers.InputSpec,
model_config: deep_mask_head_rcnn_config.DeepMaskHeadRCNN,
l2_regularizer: tf.keras.regularizers.Regularizer = None): # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds Mask R-CNN model."""
norm_activation_config = model_config.norm_activation
backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=model_config.backbone,
norm_activation_config=norm_activation_config,
l2_regularizer=l2_regularizer)
decoder = decoder_factory.build_decoder(
input_specs=backbone.output_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
rpn_head_config = model_config.rpn_head
roi_generator_config = model_config.roi_generator
roi_sampler_config = model_config.roi_sampler
roi_aligner_config = model_config.roi_aligner
detection_head_config = model_config.detection_head
generator_config = model_config.detection_generator
num_anchors_per_location = (
len(model_config.anchor.aspect_ratios) * model_config.anchor.num_scales)
rpn_head = dense_prediction_heads.RPNHead(
min_level=model_config.min_level,
max_level=model_config.max_level,
num_anchors_per_location=num_anchors_per_location,
num_convs=rpn_head_config.num_convs,
num_filters=rpn_head_config.num_filters,
use_separable_conv=rpn_head_config.use_separable_conv,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
detection_head = instance_heads.DetectionHead(
num_classes=model_config.num_classes,
num_convs=detection_head_config.num_convs,
num_filters=detection_head_config.num_filters,
use_separable_conv=detection_head_config.use_separable_conv,
num_fcs=detection_head_config.num_fcs,
fc_dims=detection_head_config.fc_dims,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
roi_generator_obj = roi_generator.MultilevelROIGenerator(
pre_nms_top_k=roi_generator_config.pre_nms_top_k,
pre_nms_score_threshold=roi_generator_config.pre_nms_score_threshold,
pre_nms_min_size_threshold=(
roi_generator_config.pre_nms_min_size_threshold),
nms_iou_threshold=roi_generator_config.nms_iou_threshold,
num_proposals=roi_generator_config.num_proposals,
test_pre_nms_top_k=roi_generator_config.test_pre_nms_top_k,
test_pre_nms_score_threshold=(
roi_generator_config.test_pre_nms_score_threshold),
test_pre_nms_min_size_threshold=(
roi_generator_config.test_pre_nms_min_size_threshold),
test_nms_iou_threshold=roi_generator_config.test_nms_iou_threshold,
test_num_proposals=roi_generator_config.test_num_proposals,
use_batched_nms=roi_generator_config.use_batched_nms)
roi_sampler_obj = roi_sampler.ROISampler(
mix_gt_boxes=roi_sampler_config.mix_gt_boxes,
num_sampled_rois=roi_sampler_config.num_sampled_rois,
foreground_fraction=roi_sampler_config.foreground_fraction,
foreground_iou_threshold=roi_sampler_config.foreground_iou_threshold,
background_iou_high_threshold=(
roi_sampler_config.background_iou_high_threshold),
background_iou_low_threshold=(
roi_sampler_config.background_iou_low_threshold))
roi_aligner_obj = roi_aligner.MultilevelROIAligner(
crop_size=roi_aligner_config.crop_size,
sample_offset=roi_aligner_config.sample_offset)
detection_generator_obj = detection_generator.DetectionGenerator(
apply_nms=True,
pre_nms_top_k=generator_config.pre_nms_top_k,
pre_nms_score_threshold=generator_config.pre_nms_score_threshold,
nms_iou_threshold=generator_config.nms_iou_threshold,
max_num_detections=generator_config.max_num_detections,
nms_version=generator_config.nms_version,
use_sigmoid_probability=generator_config.use_sigmoid_probability)
if model_config.include_mask:
mask_head = deep_instance_heads.DeepMaskHead(
num_classes=model_config.num_classes,
upsample_factor=model_config.mask_head.upsample_factor,
num_convs=model_config.mask_head.num_convs,
num_filters=model_config.mask_head.num_filters,
use_separable_conv=model_config.mask_head.use_separable_conv,
activation=model_config.norm_activation.activation,
norm_momentum=model_config.norm_activation.norm_momentum,
norm_epsilon=model_config.norm_activation.norm_epsilon,
kernel_regularizer=l2_regularizer,
class_agnostic=model_config.mask_head.class_agnostic,
convnet_variant=model_config.mask_head.convnet_variant)
mask_sampler_obj = mask_sampler.MaskSampler(
mask_target_size=(
model_config.mask_roi_aligner.crop_size *
model_config.mask_head.upsample_factor),
num_sampled_masks=model_config.mask_sampler.num_sampled_masks)
mask_roi_aligner_obj = roi_aligner.MultilevelROIAligner(
crop_size=model_config.mask_roi_aligner.crop_size,
sample_offset=model_config.mask_roi_aligner.sample_offset)
else:
mask_head = None
mask_sampler_obj = None
mask_roi_aligner_obj = None
model = deep_maskrcnn_model.DeepMaskRCNNModel(
backbone=backbone,
decoder=decoder,
rpn_head=rpn_head,
detection_head=detection_head,
roi_generator=roi_generator_obj,
roi_sampler=roi_sampler_obj,
roi_aligner=roi_aligner_obj,
detection_generator=detection_generator_obj,
mask_head=mask_head,
mask_sampler=mask_sampler_obj,
mask_roi_aligner=mask_roi_aligner_obj,
class_agnostic_bbox_pred=detection_head_config.class_agnostic_bbox_pred,
cascade_class_ensemble=detection_head_config.cascade_class_ensemble,
min_level=model_config.min_level,
max_level=model_config.max_level,
num_scales=model_config.anchor.num_scales,
aspect_ratios=model_config.anchor.aspect_ratios,
anchor_size=model_config.anchor.anchor_size,
outer_boxes_scale=model_config.outer_boxes_scale,
use_gt_boxes_for_masks=model_config.use_gt_boxes_for_masks)
return model
@task_factory.register_task_cls(deep_mask_head_rcnn_config.DeepMaskHeadRCNNTask)
class DeepMaskHeadRCNNTask(maskrcnn.MaskRCNNTask):
"""Mask R-CNN with support for deep mask heads."""
def build_model(self):
"""Builds Mask R-CNN model."""
input_specs = tf.keras.layers.InputSpec(
shape=[None] + self.task_config.model.input_size)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (tf.keras.regularizers.l2(
l2_weight_decay / 2.0) if l2_weight_decay else None)
model = build_maskrcnn(
input_specs=input_specs,
model_config=self.task_config.model,
l2_regularizer=l2_regularizer)
if self.task_config.freeze_backbone:
model.backbone.trainable = False
# Builds the model through warm-up call.
dummy_images = tf.keras.Input(self.task_config.model.input_size)
dummy_image_shape = tf.keras.layers.Input([2])
_ = model(dummy_images, image_shape=dummy_image_shape, training=False)
return model
| 9,426 | 44.105263 | 136 | py |
models | models-master/official/projects/deepmac_maskrcnn/tasks/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/unified_detector/run_inference.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""A binary to run unified detector."""
import json
import os
from typing import Any, Dict, Sequence, Union
from absl import app
from absl import flags
from absl import logging
import cv2
import gin
import numpy as np
import tensorflow as tf
import tqdm
from official.projects.unified_detector import external_configurables # pylint: disable=unused-import
from official.projects.unified_detector.modeling import universal_detector
from official.projects.unified_detector.utils import utilities
# group two lines into a paragraph if affinity score higher than this
_PARA_GROUP_THR = 0.5
# MODEL spec
_GIN_FILE = flags.DEFINE_string(
'gin_file', None, 'Path to the Gin file that defines the model.')
_CKPT_PATH = flags.DEFINE_string(
'ckpt_path', None, 'Path to the checkpoint directory.')
_IMG_SIZE = flags.DEFINE_integer(
'img_size', 1024, 'Size of the image fed to the model.')
# Input & Output
# Note that, all images specified by `img_file` and `img_dir` will be processed.
_IMG_FILE = flags.DEFINE_multi_string('img_file', [], 'Paths to the images.')
_IMG_DIR = flags.DEFINE_multi_string(
'img_dir', [], 'Paths to the image directories.')
_OUTPUT_PATH = flags.DEFINE_string('output_path', None, 'Path for the output.')
_VIS_DIR = flags.DEFINE_string(
'vis_dir', None, 'Path for the visualization output.')
def _preprocess(raw_image: np.ndarray) -> Union[np.ndarray, float]:
"""Convert a raw image to properly resized, padded, and normalized ndarray."""
# (1) convert to tf.Tensor and float32.
img_tensor = tf.convert_to_tensor(raw_image, dtype=tf.float32)
# (2) pad to square.
height, width = img_tensor.shape[:2]
maximum_side = tf.maximum(height, width)
height_pad = maximum_side - height
width_pad = maximum_side - width
img_tensor = tf.pad(
img_tensor, [[0, height_pad], [0, width_pad], [0, 0]],
constant_values=127)
ratio = maximum_side / _IMG_SIZE.value
# (3) resize long side to the maximum length.
img_tensor = tf.image.resize(
img_tensor, (_IMG_SIZE.value, _IMG_SIZE.value))
img_tensor = tf.cast(img_tensor, tf.uint8)
# (4) normalize
img_tensor = utilities.normalize_image_to_range(img_tensor)
# (5) Add batch dimension and return as numpy array.
return tf.expand_dims(img_tensor, 0).numpy(), float(ratio)
def load_model() -> tf.keras.layers.Layer:
gin.parse_config_file(_GIN_FILE.value)
model = universal_detector.UniversalDetector()
ckpt = tf.train.Checkpoint(model=model)
ckpt_path = _CKPT_PATH.value
logging.info('Load ckpt from: %s', ckpt_path)
ckpt.restore(ckpt_path).expect_partial()
return model
def inference(img_file: str, model: tf.keras.layers.Layer) -> Dict[str, Any]:
"""Inference step."""
img = cv2.cvtColor(cv2.imread(img_file), cv2.COLOR_BGR2RGB)
img_ndarray, ratio = _preprocess(img)
output_dict = model.serve(img_ndarray)
class_tensor = output_dict['classes'].numpy()
mask_tensor = output_dict['masks'].numpy()
group_tensor = output_dict['groups'].numpy()
indices = np.where(class_tensor[0])[0].tolist() # indices of positive slots.
mask_list = [
mask_tensor[0, :, :, index] for index in indices] # List of mask ndarray.
# Form lines and words
lines = []
line_indices = []
for index, mask in tqdm.tqdm(zip(indices, mask_list)):
line = {
'words': [],
'text': '',
}
contours, _ = cv2.findContours(
(mask > 0.).astype(np.uint8),
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)[-2:]
for contour in contours:
if (isinstance(contour, np.ndarray) and
len(contour.shape) == 3 and
contour.shape[0] > 2 and
contour.shape[1] == 1 and
contour.shape[2] == 2):
cnt_list = (contour[:, 0] * ratio).astype(np.int32).tolist()
line['words'].append({'text': '', 'vertices': cnt_list})
else:
logging.error('Invalid contour: %s, discarded', str(contour))
if line['words']:
lines.append(line)
line_indices.append(index)
# Form paragraphs
line_grouping = utilities.DisjointSet(len(line_indices))
affinity = group_tensor[0][line_indices][:, line_indices]
for i1, i2 in zip(*np.where(affinity > _PARA_GROUP_THR)):
line_grouping.union(i1, i2)
line_groups = line_grouping.to_group()
paragraphs = []
for line_group in line_groups:
paragraph = {'lines': []}
for id_ in line_group:
paragraph['lines'].append(lines[id_])
if paragraph:
paragraphs.append(paragraph)
return paragraphs
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
# Get list of images
img_lists = []
img_lists.extend(_IMG_FILE.value)
for img_dir in _IMG_DIR.value:
img_lists.extend(tf.io.gfile.glob(os.path.join(img_dir, '*')))
logging.info('Total number of input images: %d', len(img_lists))
model = load_model()
vis_dis = _VIS_DIR.value
output = {'annotations': []}
for img_file in tqdm.tqdm(img_lists):
output['annotations'].append({
'image_id': img_file.split('/')[-1].split('.')[0],
'paragraphs': inference(img_file, model),
})
if vis_dis:
key = output['annotations'][-1]['image_id']
paragraphs = output['annotations'][-1]['paragraphs']
img = cv2.cvtColor(cv2.imread(img_file), cv2.COLOR_BGR2RGB)
word_bnds = []
line_bnds = []
para_bnds = []
for paragraph in paragraphs:
paragraph_points_list = []
for line in paragraph['lines']:
line_points_list = []
for word in line['words']:
word_bnds.append(
np.array(word['vertices'], np.int32).reshape((-1, 1, 2)))
line_points_list.extend(word['vertices'])
paragraph_points_list.extend(line_points_list)
line_points = np.array(line_points_list, np.int32) # (N,2)
left = int(np.min(line_points[:, 0]))
top = int(np.min(line_points[:, 1]))
right = int(np.max(line_points[:, 0]))
bottom = int(np.max(line_points[:, 1]))
line_bnds.append(
np.array([[[left, top]], [[right, top]], [[right, bottom]],
[[left, bottom]]], np.int32))
para_points = np.array(paragraph_points_list, np.int32) # (N,2)
left = int(np.min(para_points[:, 0]))
top = int(np.min(para_points[:, 1]))
right = int(np.max(para_points[:, 0]))
bottom = int(np.max(para_points[:, 1]))
para_bnds.append(
np.array([[[left, top]], [[right, top]], [[right, bottom]],
[[left, bottom]]], np.int32))
for name, bnds in zip(['paragraph', 'line', 'word'],
[para_bnds, line_bnds, word_bnds]):
vis = cv2.polylines(img, bnds, True, (0, 0, 255), 2)
cv2.imwrite(os.path.join(vis_dis, f'{key}-{name}.jpg'),
cv2.cvtColor(vis, cv2.COLOR_RGB2BGR))
with tf.io.gfile.GFile(_OUTPUT_PATH.value, mode='w') as f:
f.write(json.dumps(output, ensure_ascii=False, indent=2))
if __name__ == '__main__':
flags.mark_flags_as_required(['gin_file', 'ckpt_path', 'output_path'])
app.run(main)
| 7,791 | 33.941704 | 102 | py |
models | models-master/official/projects/unified_detector/registry_imports.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All necessary imports for registration."""
# pylint: disable=unused-import
from official.projects.unified_detector import external_configurables
from official.projects.unified_detector.configs import ocr_config
from official.projects.unified_detector.tasks import ocr_task
from official.vision import registry_imports
| 931 | 41.363636 | 74 | py |
models | models-master/official/projects/unified_detector/external_configurables.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrap external code in gin."""
import gin
import gin.tf.external_configurables
import tensorflow as tf
# Tensorflow.
gin.external_configurable(tf.keras.layers.experimental.SyncBatchNormalization)
| 809 | 34.217391 | 78 | py |
models | models-master/official/projects/unified_detector/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Model Garden Vision training driver."""
from absl import app
from absl import flags
import gin
from official.common import distribute_utils
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
# pylint: disable=unused-import
from official.projects.unified_detector import registry_imports
# pylint: enable=unused-import
FLAGS = flags.FLAGS
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu)
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir)
train_utils.save_gin_config(FLAGS.mode, model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
flags.mark_flags_as_required(['experiment', 'mode', 'model_dir'])
app.run(main)
| 2,618 | 35.887324 | 80 | py |
models | models-master/official/projects/unified_detector/data_conversion/utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to convert data to TFExamples and store in TFRecords."""
from typing import Any, Dict, List, Tuple, Union
import cv2
import numpy as np
import tensorflow as tf
def encode_image(
image_tensor: np.ndarray,
encoding_type: str = 'png') -> Union[np.ndarray, tf.Tensor]:
"""Encode image tensor into byte string."""
if encoding_type == 'jpg':
image_encoded = tf.image.encode_jpeg(tf.constant(image_tensor))
elif encoding_type == 'png':
image_encoded = tf.image.encode_png(tf.constant(image_tensor))
else:
raise ValueError('Invalid encoding type.')
if tf.executing_eagerly():
image_encoded = image_encoded.numpy()
else:
image_encoded = image_encoded.eval()
return image_encoded
def int64_feature(value: Union[int, List[int]]) -> tf.train.Feature:
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def float_feature(value: Union[float, List[float]]) -> tf.train.Feature:
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def bytes_feature(value: Union[Union[bytes, str], List[Union[bytes, str]]]
) -> tf.train.Feature:
if not isinstance(value, list):
value = [value]
for i in range(len(value)):
if not isinstance(value[i], bytes):
value[i] = value[i].encode('utf-8')
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def annotation_to_entities(annotation: Dict[str, Any]) -> List[Dict[str, Any]]:
"""Flatten the annotation dict to a list of 'entities'."""
entities = []
for paragraph in annotation['paragraphs']:
paragraph_id = len(entities)
paragraph['type'] = 3 # 3 for paragraph
paragraph['parent_id'] = -1
entities.append(paragraph)
for line in paragraph['lines']:
line_id = len(entities)
line['type'] = 2 # 2 for line
line['parent_id'] = paragraph_id
entities.append(line)
for word in line['words']:
word['type'] = 1 # 1 for word
word['parent_id'] = line_id
entities.append(word)
return entities
def draw_entity_mask(
entities: List[Dict[str, Any]],
image_shape: Tuple[int, int, int]) -> np.ndarray:
"""Draw entity id mask.
Args:
entities: A list of entity objects. Should be output from
`annotation_to_entities`.
image_shape: The shape of the input image.
Returns:
A (H, W, 3) entity id mask of the same height/width as the image. Each pixel
(i, j, :) encodes the entity id of one pixel. Only word entities are
rendered. 0 for non-text pixels; word entity ids start from 1.
"""
instance_mask = np.zeros(image_shape, dtype=np.uint8)
for i, entity in enumerate(entities):
# only draw word masks
if entity['type'] != 1:
continue
vertices = np.array(entity['vertices'])
# the pixel value is actually 1 + position in entities
entity_id = i + 1
if entity_id >= 65536:
# As entity_id is encoded in the last two channels, it should be less than
# 256**2=65536.
raise ValueError(
(f'Entity ID overflow: {entity_id}. Currently only entity_id<65536 '
'are supported.'))
# use the last two channels to encode the entity id.
color = [0, entity_id // 256, entity_id % 256]
instance_mask = cv2.fillPoly(instance_mask,
[np.round(vertices).astype('int32')], color)
return instance_mask
def convert_to_tfe(img_file_name: str,
annotation: Dict[str, Any]) -> tf.train.Example:
"""Convert the annotation dict into a TFExample."""
img = cv2.imread(img_file_name)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
h, w, c = img.shape
encoded_img = encode_image(img)
entities = annotation_to_entities(annotation)
masks = draw_entity_mask(entities, img.shape)
encoded_mask = encode_image(masks)
# encode attributes
parent = []
classes = []
content_type = []
text = []
vertices = []
for entity in entities:
parent.append(entity['parent_id'])
classes.append(entity['type'])
# 0 for annotated; 8 for not annotated
content_type.append((0 if entity['legible'] else 8))
text.append(entity.get('text', ''))
v = np.array(entity['vertices'])
vertices.append(','.join(str(float(n)) for n in v.reshape(-1)))
example = tf.train.Example(
features=tf.train.Features(
feature={
# input images
'image/encoded': bytes_feature(encoded_img),
# image format
'image/format': bytes_feature('png'),
# image width
'image/width': int64_feature([w]),
# image height
'image/height': int64_feature([h]),
# image channels
'image/channels': int64_feature([c]),
# image key
'image/source_id': bytes_feature(annotation['image_id']),
# HxWx3 tensors: channel 2-3 encodes the id of the word entity.
'image/additional_channels/encoded': bytes_feature(encoded_mask),
# format of the additional channels
'image/additional_channels/format': bytes_feature('png'),
'image/object/parent': int64_feature(parent),
# word / line / paragraph / symbol / ...
'image/object/classes': int64_feature(classes),
# text / handwritten / not-annotated / ...
'image/object/content_type': int64_feature(content_type),
# string text transcription
'image/object/text': bytes_feature(text),
# comma separated coordinates, (x,y) * n
'image/object/vertices': bytes_feature(vertices),
})).SerializeToString()
return example
| 6,426 | 34.120219 | 80 | py |
models | models-master/official/projects/unified_detector/data_conversion/convert.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Script to convert HierText to TFExamples.
This script is only intended to run locally.
python3 data_preprocess/convert.py \
--gt_file=/path/to/gt.jsonl \
--img_dir=/path/to/image \
--out_file=/path/to/tfrecords/file-prefix
"""
import json
import os
import random
from absl import app
from absl import flags
import tensorflow as tf
import tqdm
import utils
_GT_FILE = flags.DEFINE_string('gt_file', None, 'Path to the GT file')
_IMG_DIR = flags.DEFINE_string('img_dir', None, 'Path to the image folder.')
_OUT_FILE = flags.DEFINE_string('out_file', None, 'Path for the tfrecords.')
_NUM_SHARD = flags.DEFINE_integer(
'num_shard', 100, 'The number of shards of tfrecords.')
def main(unused_argv) -> None:
annotations = json.load(open(_GT_FILE.value))['annotations']
random.shuffle(annotations)
n_sample = len(annotations)
n_shards = _NUM_SHARD.value
n_sample_per_shard = (n_sample - 1) // n_shards + 1
for shard in tqdm.tqdm(range(n_shards)):
output_path = f'{_OUT_FILE.value}-{shard:05}-{n_shards:05}.tfrecords'
annotation_subset = annotations[
shard * n_sample_per_shard : (shard + 1) * n_sample_per_shard]
with tf.io.TFRecordWriter(output_path) as file_writer:
for annotation in annotation_subset:
img_file_path = os.path.join(_IMG_DIR.value,
f"{annotation['image_id']}.jpg")
tfexample = utils.convert_to_tfe(img_file_path, annotation)
file_writer.write(tfexample)
if __name__ == '__main__':
flags.mark_flags_as_required(['gt_file', 'img_dir', 'out_file'])
app.run(main)
| 2,205 | 31.925373 | 76 | py |
models | models-master/official/projects/unified_detector/configs/ocr_config.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCR tasks and models configurations."""
import dataclasses
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
@dataclasses.dataclass
class OcrTaskConfig(cfg.TaskConfig):
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
model_call_needs_labels: bool = False
@exp_factory.register_config_factory('unified_detector')
def unified_detector() -> cfg.ExperimentConfig:
"""Configurations for trainer of unified detector."""
total_train_steps = 100000
summary_interval = steps_per_loop = 200
checkpoint_interval = 2000
warmup_steps = 1000
config = cfg.ExperimentConfig(
# Input pipeline and model are configured through Gin.
task=OcrTaskConfig(train_data=cfg.DataConfig(is_training=True)),
trainer=cfg.TrainerConfig(
train_steps=total_train_steps,
steps_per_loop=steps_per_loop,
summary_interval=summary_interval,
checkpoint_interval=checkpoint_interval,
max_to_keep=1,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
'adamw': {
'weight_decay_rate': 0.05,
'include_in_weight_decay': [
'^((?!depthwise).)*(kernel|weights):0$',
],
'exclude_from_weight_decay': [
'(^((?!kernel).)*:0)|(depthwise_kernel)',
],
'gradient_clip_norm': 10.,
},
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate': 1e-3,
'decay_steps': total_train_steps - warmup_steps,
'alpha': 1e-2,
'offset': warmup_steps,
},
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_learning_rate': 1e-5,
'warmup_steps': warmup_steps,
}
},
}),
),
)
return config
| 2,873 | 35.379747 | 80 | py |
models | models-master/official/projects/unified_detector/utils/typing.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Typing extension."""
from typing import Dict, Union
import numpy as np
import tensorflow as tf
NpDict = Dict[str, np.ndarray]
FeaturesAndLabelsType = Dict[str, Dict[str, tf.Tensor]]
TensorDict = Dict[Union[str, int], tf.Tensor]
NestedTensorDict = Dict[
Union[str, int],
Union[tf.Tensor,
TensorDict]]
| 933 | 31.206897 | 74 | py |
models | models-master/official/projects/unified_detector/utils/utilities.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import collections
from typing import List, Optional, Union
import tensorflow as tf
def resolve_shape(
tensor: tf.Tensor,
resolve_batch_size: bool = True) -> List[Union[tf.Tensor, int]]:
"""Fully resolves the shape of the tensor.
Args:
tensor: The tensor for which to resolve the shape.
resolve_batch_size: If True, fully resolve the batch size. If False,
return the batch size if it is statically known and -1 otherwise. This
can be more efficient when converting a model to TFLite.
Returns:
A list containing the static dimension where possible and the dynamic
dimension otherwise.
"""
with tf.name_scope('resolve_shape'):
shape = tensor.get_shape().as_list()
if None in shape:
shape_dynamic = tf.shape(tensor)
if shape[0] is None:
shape[0] = shape_dynamic[0] if resolve_batch_size else -1
for i in range(1, len(shape)):
if shape[i] is None:
shape[i] = shape_dynamic[i]
return shape
def set_shape_dim(tensor: tf.Tensor, index: int, size: int) -> None:
"""Set value of index-th element of tensor shape to size."""
shape = tensor.get_shape().as_list()
if len(shape) <= index:
raise ValueError(
'Tensor rank must be at least %d. Got %d' % (index + 1, len(shape)))
shape[index] = size
tensor.set_shape(shape)
def truncate_or_pad(input_tensor: tf.Tensor,
new_size: int,
axis: int = 1,
constant_value: Union[int, float] = 0) -> tf.Tensor:
"""Truncate or zeros pad the axis of input tensor to new size."""
rank = len(input_tensor.shape)
if rank <= axis:
raise ValueError(
'Tensor rank must be at least %d. Got %d' % (axis + 1, rank))
orig_size = tf.shape(input_tensor)[axis]
def _new_size(dim):
if dim == axis:
return new_size
n = tf.shape(input_tensor)[dim]
return -1 if n is None else n
def _truncate():
begin = [0] * rank
size = [_new_size(dim) for dim in range(rank)]
return tf.slice(input_tensor, begin, size)
def _pad():
padding = [[0, 0] for _ in range(rank)]
padding[axis][1] = new_size - orig_size
return tf.pad(input_tensor, padding, constant_values=constant_value)
output = tf.cond(orig_size >= new_size, _truncate, _pad)
if isinstance(new_size, int):
set_shape_dim(output, axis, new_size)
return output
def rotate_rboxes90(rboxes: tf.Tensor,
image_width: int,
image_height: int,
rotation_count: int = 1) -> tf.Tensor:
"""Rotate oriented rectangles counter-clockwise by multiples of 90 degrees."""
image_width = tf.cast(image_width, dtype=tf.float32)
image_height = tf.cast(image_height, dtype=tf.float32)
rotation_count = rotation_count % 4
x, y, w, h, angle = tf.split(rboxes, 5, axis=1)
if rotation_count == 0:
return rboxes
elif rotation_count == 1:
angle = tf.where(angle < -90.0, angle + 270, angle - 90)
return tf.concat([y, image_width - x - 1, w, h, angle], axis=1)
elif rotation_count == 2:
angle = tf.where(angle < 0.0, angle + 180, angle - 180)
return tf.concat([image_width - x - 1, image_height - y - 1, w, h, angle],
axis=1)
else:
angle = tf.where(angle > 90.0, angle - 270, angle + 90)
return tf.concat([image_height - y - 1, x, w, h, angle], axis=1)
def normalize_image_to_range(image: tf.Tensor,
original_minval: int = 0,
original_maxval: int = 255,
target_minval: float = -1.0,
target_maxval: float = 1.0) -> tf.Tensor:
"""Normalizes pixel values in the image.
Moves the pixel values from the current [original_minval, original_maxval]
range to the [target_minval, target_maxval] range.
Args:
image: A tensor of shape [height, width, channels]. Input will be converted
to float32 type before normalization.
original_minval: current image minimum value.
original_maxval: current image maximum value.
target_minval: target image minimum value.
target_maxval: target image maximum value.
Returns:
A float tensor with the same shape as the input image.
"""
if image.dtype is not tf.float32:
image = tf.cast(image, dtype=tf.float32)
original_minval = float(original_minval)
original_maxval = float(original_maxval)
target_minval = float(target_minval)
target_maxval = float(target_maxval)
image = tf.cast(image, dtype=tf.float32)
image = tf.subtract(image, original_minval)
image = tf.multiply(image, (target_maxval - target_minval) /
(original_maxval - original_minval))
image = tf.add(image, target_minval)
return image
def get_padding_mask_from_valid_lengths(
valid_lengths: tf.Tensor,
max_length: Optional[int] = None,
dtype: tf.dtypes.DType = tf.bool) -> tf.Tensor:
"""Gets a 2D mask of the padded region from valid lengths.
Args:
valid_lengths: A 1D int tensor containing the valid length of each row.
max_length: (optional, int) The maximum length of each row. If `None`, the
maximum value in `valid_lengths` will be used.
dtype: The output dtype.
Returns:
2D padded region mask.
"""
with tf.name_scope('get_padding_mask_from_valid_lengths'):
if max_length is None:
max_length = tf.reduce_max(valid_lengths)
padding_mask = tf.logical_not(tf.sequence_mask(valid_lengths, max_length))
return tf.cast(padding_mask, dtype=dtype)
def get_transformer_attention_bias(padding_mask: tf.Tensor) -> tf.Tensor:
"""Gets attention bias.
Bias tensor that is added to the pre-softmax multi-headed attention logits,
which has shape [batch_size, num_attention_heads, max_length, max_length].
The tensor is zero at non-padded locations, and -1e9 (negative infinity) at
padded locations.
Args:
padding_mask: A [batch_size, max_length] float tensor, the padding mask.
Returns:
Attention bias tensor of shape [batch_size, 1, 1, max_length].
"""
with tf.name_scope('attention_bias'):
# Uses -1e9 to represent -infinity. We do not actually use -Inf, since we
# want to be able to multiply these values by zero to get zero.
# (-Inf * 0 = NaN)
attention_bias = padding_mask * -1e9
attention_bias = tf.expand_dims(
tf.expand_dims(attention_bias, axis=1), axis=1)
return attention_bias
class DisjointSet:
"""A disjoint set implementation."""
def __init__(self, num_elements: int):
self._num_elements = num_elements
self._parent = list(range(num_elements))
def find(self, item: int) -> int:
if self._parent[item] == item:
return item
else:
self._parent[item] = self.find(self._parent[item])
return self._parent[item]
def union(self, i1: int, i2: int) -> None:
r1 = self.find(i1)
r2 = self.find(i2)
self._parent[r1] = r2
def to_group(self) -> List[List[int]]:
"""Return the grouping results.
Returns:
A list of integer lists. Each list represents the IDs belonging to the
same group.
"""
groups = collections.defaultdict(list)
for i in range(self._num_elements):
r = self.find(i)
groups[r].append(i)
return list(groups.values())
| 7,922 | 32.572034 | 80 | py |
models | models-master/official/projects/unified_detector/data_loaders/input_reader.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input data reader.
Creates a tf.data.Dataset object from multiple input sstables and use a
provided data parser function to decode the serialized tf.Example and optionally
run data augmentation.
"""
import os
from typing import Any, Callable, List, Optional, Sequence, Union
import gin
from six.moves import map
import tensorflow as tf
from official.common import dataset_fn
from research.object_detection.utils import label_map_util
from official.core import config_definitions as cfg
from official.projects.unified_detector.data_loaders import universal_detection_parser # pylint: disable=unused-import
FuncType = Callable[..., Any]
@gin.configurable(denylist=['is_training'])
class InputFn(object):
"""Input data reader class.
Creates a tf.data.Dataset object from multiple datasets (optionally performs
weighted sampling between different datasets), parses the tf.Example message
using `parser_fn`. The datasets can either be stored in SSTable or TfRecord.
"""
def __init__(self,
is_training: bool,
batch_size: Optional[int] = None,
data_root: str = '',
input_paths: List[str] = gin.REQUIRED,
dataset_type: str = 'tfrecord',
use_sampling: bool = False,
sampling_weights: Optional[Sequence[Union[int, float]]] = None,
cycle_length: Optional[int] = 64,
shuffle_buffer_size: Optional[int] = 512,
parser_fn: Optional[FuncType] = None,
parser_num_parallel_calls: Optional[int] = 64,
max_intra_op_parallelism: Optional[int] = None,
label_map_proto_path: Optional[str] = None,
input_filter_fns: Optional[List[FuncType]] = None,
input_training_filter_fns: Optional[Sequence[FuncType]] = None,
dense_to_ragged_batch: bool = False,
data_validator_fn: Optional[Callable[[Sequence[str]],
None]] = None):
"""Input reader constructor.
Args:
is_training: Boolean indicating TRAIN or EVAL.
batch_size: Input data batch size. Ignored if batch size is passed through
params. In that case, this can be None.
data_root: All the relative input paths are based on this location.
input_paths: Input file patterns.
dataset_type: Can be 'sstable' or 'tfrecord'.
use_sampling: Whether to perform weighted sampling between different
datasets.
sampling_weights: Unnormalized sampling weights. The length should be
equal to `input_paths`.
cycle_length: The number of input Datasets to interleave from in parallel.
If set to None tf.data experimental autotuning is used.
shuffle_buffer_size: The random shuffle buffer size.
parser_fn: The function to run decoding and data augmentation. The
function takes `is_training` as an input, which is passed from here.
parser_num_parallel_calls: The number of parallel calls for `parser_fn`.
The number of CPU cores is the suggested value. If set to None tf.data
experimental autotuning is used.
max_intra_op_parallelism: if set limits the max intra op parallelism of
functions run on slices of the input.
label_map_proto_path: Path to a StringIntLabelMap which will be used to
decode the input data.
input_filter_fns: A list of functions on the dataset points which returns
true for valid data.
input_training_filter_fns: A list of functions on the dataset points which
returns true for valid data used only for training.
dense_to_ragged_batch: Whether to use ragged batching for MPNN format.
data_validator_fn: If not None, used to validate the data specified by
input_paths.
Raises:
ValueError for invalid input_paths.
"""
self._is_training = is_training
if data_root:
# If an input path is absolute this does not change it.
input_paths = [os.path.join(data_root, value) for value in input_paths]
self._input_paths = input_paths
# Disables datasets sampling during eval.
self._batch_size = batch_size
if is_training:
self._use_sampling = use_sampling
else:
self._use_sampling = False
self._sampling_weights = sampling_weights
self._cycle_length = (cycle_length if cycle_length else tf.data.AUTOTUNE)
self._shuffle_buffer_size = shuffle_buffer_size
self._parser_num_parallel_calls = (
parser_num_parallel_calls
if parser_num_parallel_calls else tf.data.AUTOTUNE)
self._max_intra_op_parallelism = max_intra_op_parallelism
self._label_map_proto_path = label_map_proto_path
if label_map_proto_path:
name_to_id = label_map_util.get_label_map_dict(label_map_proto_path)
self._lookup_str_keys = list(name_to_id.keys())
self._lookup_int_values = list(name_to_id.values())
self._parser_fn = parser_fn
self._input_filter_fns = input_filter_fns or []
if is_training and input_training_filter_fns:
self._input_filter_fns.extend(input_training_filter_fns)
self._dataset_type = dataset_type
self._dense_to_ragged_batch = dense_to_ragged_batch
if data_validator_fn is not None:
data_validator_fn(self._input_paths)
@property
def batch_size(self):
return self._batch_size
def __call__(
self,
params: cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None
) -> tf.data.Dataset:
"""Read and parse input datasets, return a tf.data.Dataset object."""
# TPUEstimator passes the batch size through params.
if params is not None and 'batch_size' in params:
batch_size = params['batch_size']
else:
batch_size = self._batch_size
per_replica_batch_size = input_context.get_per_replica_batch_size(
batch_size) if input_context else batch_size
with tf.name_scope('input_reader'):
dataset = self._build_dataset_from_records()
dataset_parser_fn = self._build_dataset_parser_fn()
dataset = dataset.map(
dataset_parser_fn, num_parallel_calls=self._parser_num_parallel_calls)
for filter_fn in self._input_filter_fns:
dataset = dataset.filter(filter_fn)
if self._dense_to_ragged_batch:
dataset = dataset.apply(
tf.data.experimental.dense_to_ragged_batch(
batch_size=per_replica_batch_size, drop_remainder=True))
else:
dataset = dataset.batch(per_replica_batch_size, drop_remainder=True)
dataset = dataset.prefetch(tf.data.AUTOTUNE)
return dataset
def _fetch_dataset(self, filename: str) -> tf.data.Dataset:
"""Fetch dataset depending on type.
Args:
filename: Location of dataset.
Returns:
Tf Dataset.
"""
data_cls = dataset_fn.pick_dataset_fn(self._dataset_type)
data = data_cls([filename])
return data
def _build_dataset_parser_fn(self) -> Callable[..., tf.Tensor]:
"""Depending on label_map and storage type, build a parser_fn."""
# Parse the fetched records to input tensors for model function.
if self._label_map_proto_path:
lookup_initializer = tf.lookup.KeyValueTensorInitializer(
keys=tf.constant(self._lookup_str_keys, dtype=tf.string),
values=tf.constant(self._lookup_int_values, dtype=tf.int32))
name_to_id_table = tf.lookup.StaticHashTable(
initializer=lookup_initializer, default_value=0)
parser_fn = self._parser_fn(
is_training=self._is_training, label_lookup_table=name_to_id_table)
else:
parser_fn = self._parser_fn(is_training=self._is_training)
return parser_fn
def _build_dataset_from_records(self) -> tf.data.Dataset:
"""Build a tf.data.Dataset object from input SSTables.
If the input data come from multiple SSTables, use the user defined sampling
weights to perform sampling. For example, if the sampling weights is
[1., 2.], the second dataset will be sampled twice more often than the first
one.
Returns:
Dataset built from SSTables.
Raises:
ValueError for inability to find SSTable files.
"""
all_file_patterns = []
if self._use_sampling:
for file_pattern in self._input_paths:
all_file_patterns.append([file_pattern])
# Normalize sampling probabilities.
total_weight = sum(self._sampling_weights)
sampling_probabilities = [
float(w) / total_weight for w in self._sampling_weights
]
else:
all_file_patterns.append(self._input_paths)
datasets = []
for file_pattern in all_file_patterns:
filenames = sum(list(map(tf.io.gfile.glob, file_pattern)), [])
if not filenames:
raise ValueError(
f'Error trying to read input files for file pattern {file_pattern}')
# Create a dataset of filenames and shuffle the files. In each epoch,
# the file order is shuffled again. This may help if
# per_host_input_for_training = false on TPU.
dataset = tf.data.Dataset.list_files(
file_pattern, shuffle=self._is_training)
if self._is_training:
dataset = dataset.repeat()
if self._max_intra_op_parallelism:
# Disable intra-op parallelism to optimize for throughput instead of
# latency.
options = tf.data.Options()
options.experimental_threading.max_intra_op_parallelism = 1
dataset = dataset.with_options(options)
dataset = dataset.interleave(
self._fetch_dataset,
cycle_length=self._cycle_length,
num_parallel_calls=self._cycle_length,
deterministic=(not self._is_training))
if self._is_training:
dataset = dataset.shuffle(self._shuffle_buffer_size)
datasets.append(dataset)
if self._use_sampling:
assert len(datasets) == len(sampling_probabilities)
dataset = tf.data.experimental.sample_from_datasets(
datasets, sampling_probabilities)
else:
dataset = datasets[0]
return dataset
| 10,681 | 38.416974 | 119 | py |
models | models-master/official/projects/unified_detector/data_loaders/tf_example_decoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow Example proto decoder for GOCR."""
from typing import List, Optional, Sequence, Tuple, Union
import tensorflow as tf
from official.projects.unified_detector.utils.typing import TensorDict
from official.vision.dataloaders import decoder
class TfExampleDecoder(decoder.Decoder):
"""Tensorflow Example proto decoder."""
def __init__(self,
use_instance_mask: bool = False,
additional_class_names: Optional[Sequence[str]] = None,
additional_regression_names: Optional[Sequence[str]] = None,
num_additional_channels: int = 0):
"""Constructor.
keys_to_features is a dictionary mapping the names of the tf.Example
fields to tf features, possibly with defaults.
Uses fixed length for scalars and variable length for vectors.
Args:
use_instance_mask: if False, prevents decoding of the instance mask, which
can take a lot of resources.
additional_class_names: If not none, a list of additional class names. For
additional class name n, named image/object/${n} are expected to be an
int vector of length one, and are mapped to tensor dict key
groundtruth_${n}.
additional_regression_names: If not none, a list of additional regression
output names. For additional class name n, named image/object/${n} are
expected to be a float vector, and are mapped to tensor dict key
groundtruth_${n}.
num_additional_channels: The number of additional channels of information
present in the tf.Example proto.
"""
self._num_additional_channels = num_additional_channels
self._use_instance_mask = use_instance_mask
self.keys_to_features = {}
# Map names in the final tensor dict (output of `self.decode()`) to names in
# tf examples, e.g. 'groundtruth_text' -> 'image/object/text'
self.name_to_key = {}
if use_instance_mask:
self.keys_to_features.update({
'image/object/mask': tf.io.VarLenFeature(tf.string),
})
# Now we have lists of standard types.
# To add new features, just add entries here.
# The tuple elements are (example name, tensor name, default value).
# If the items_to_handlers part is already set up use None for
# the tensor name.
# There are other tensor names listed as None which we probably
# want to discuss and specify.
scalar_strings = [
('image/encoded', None, ''),
('image/format', None, 'jpg'),
('image/additional_channels/encoded', None, ''),
('image/additional_channels/format', None, 'png'),
('image/label_type', 'label_type', ''),
('image/key', 'key', ''),
('image/source_id', 'source_id', ''),
]
vector_strings = [
('image/attributes', None, ''),
('image/object/text', 'groundtruth_text', ''),
('image/object/encoded_text', 'groundtruth_encoded_text', ''),
('image/object/vertices', 'groundtruth_vertices', ''),
('image/object/object_type', None, ''),
('image/object/language', 'language', ''),
('image/object/reorderer_type', None, ''),
('image/label_map_path', 'label_map_path', '')
]
scalar_ints = [
('image/height', None, 1),
('image/width', None, 1),
('image/channels', None, 3),
]
vector_ints = [
('image/object/classes', 'groundtruth_classes', 0),
('image/object/frame_id', 'frame_id', 0),
('image/object/track_id', 'track_id', 0),
('image/object/content_type', 'groundtruth_content_type', 0),
]
if additional_class_names:
vector_ints += [('image/object/%s' % name, 'groundtruth_%s' % name, 0)
for name in additional_class_names]
# This one is not yet needed:
# scalar_floats = [
# ]
vector_floats = [
('image/object/weight', 'groundtruth_weight', 0),
('image/object/rbox_tl_x', None, 0),
('image/object/rbox_tl_y', None, 0),
('image/object/rbox_width', None, 0),
('image/object/rbox_height', None, 0),
('image/object/rbox_angle', None, 0),
('image/object/bbox/xmin', None, 0),
('image/object/bbox/xmax', None, 0),
('image/object/bbox/ymin', None, 0),
('image/object/bbox/ymax', None, 0),
]
if additional_regression_names:
vector_floats += [('image/object/%s' % name, 'groundtruth_%s' % name, 0)
for name in additional_regression_names]
self._init_scalar_features(scalar_strings, tf.string)
self._init_vector_features(vector_strings, tf.string)
self._init_scalar_features(scalar_ints, tf.int64)
self._init_vector_features(vector_ints, tf.int64)
self._init_vector_features(vector_floats, tf.float32)
def _init_scalar_features(
self,
feature_list: List[Tuple[str, Optional[str], Union[str, int, float]]],
ftype: tf.dtypes.DType) -> None:
for entry in feature_list:
self.keys_to_features[entry[0]] = tf.io.FixedLenFeature(
(), ftype, default_value=entry[2])
if entry[1] is not None:
self.name_to_key[entry[1]] = entry[0]
def _init_vector_features(
self,
feature_list: List[Tuple[str, Optional[str], Union[str, int, float]]],
ftype: tf.dtypes.DType) -> None:
for entry in feature_list:
self.keys_to_features[entry[0]] = tf.io.VarLenFeature(ftype)
if entry[1] is not None:
self.name_to_key[entry[1]] = entry[0]
def _decode_png_instance_masks(self, keys_to_tensors: TensorDict)-> tf.Tensor:
"""Decode PNG instance segmentation masks and stack into dense tensor.
The instance segmentation masks are reshaped to [num_instances, height,
width].
Args:
keys_to_tensors: A dictionary from keys to tensors.
Returns:
A 3-D float tensor of shape [num_instances, height, width] with values
in {0, 1}.
"""
def decode_png_mask(image_buffer):
image = tf.squeeze(
tf.image.decode_image(image_buffer, channels=1), axis=2)
image.set_shape([None, None])
image = tf.to_float(tf.greater(image, 0))
return image
png_masks = keys_to_tensors['image/object/mask']
height = keys_to_tensors['image/height']
width = keys_to_tensors['image/width']
if isinstance(png_masks, tf.SparseTensor):
png_masks = tf.sparse_tensor_to_dense(png_masks, default_value='')
return tf.cond(
tf.greater(tf.size(png_masks), 0),
lambda: tf.map_fn(decode_png_mask, png_masks, dtype=tf.float32),
lambda: tf.zeros(tf.to_int32(tf.stack([0, height, width]))))
def _decode_image(self,
parsed_tensors: TensorDict,
channel: int = 3) -> TensorDict:
"""Decodes the image and set its shape (H, W are dynamic; C is fixed)."""
image = tf.io.decode_image(parsed_tensors['image/encoded'],
channels=channel)
image.set_shape([None, None, channel])
return {'image': image}
def _decode_additional_channels(self,
parsed_tensors: TensorDict,
channel: int = 3) -> TensorDict:
"""Decodes the additional channels and set its static shape."""
channels = tf.io.decode_image(
parsed_tensors['image/additional_channels/encoded'], channels=channel)
channels.set_shape([None, None, channel])
return {'additional_channels': channels}
def _decode_boxes(self, parsed_tensors: TensorDict) -> TensorDict:
"""Concat box coordinates in the format of [ymin, xmin, ymax, xmax]."""
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
return {
'groundtruth_aligned_boxes': tf.stack([ymin, xmin, ymax, xmax], axis=-1)
}
def _decode_rboxes(self, parsed_tensors: TensorDict) -> TensorDict:
"""Concat rbox coordinates: [left, top, box_width, box_height, angle]."""
top_left_x = parsed_tensors['image/object/rbox_tl_x']
top_left_y = parsed_tensors['image/object/rbox_tl_y']
width = parsed_tensors['image/object/rbox_width']
height = parsed_tensors['image/object/rbox_height']
angle = parsed_tensors['image/object/rbox_angle']
return {
'groundtruth_boxes':
tf.stack([top_left_x, top_left_y, width, height, angle], axis=-1)
}
def _decode_masks(self, parsed_tensors: TensorDict) -> TensorDict:
"""Decode a set of PNG masks to the tf.float32 tensors."""
def _decode_png_mask(png_bytes):
mask = tf.squeeze(
tf.io.decode_png(png_bytes, channels=1, dtype=tf.uint8), axis=-1)
mask = tf.cast(mask, dtype=tf.float32)
mask.set_shape([None, None])
return mask
height = parsed_tensors['image/height']
width = parsed_tensors['image/width']
masks = parsed_tensors['image/object/mask']
masks = tf.cond(
pred=tf.greater(tf.size(input=masks), 0),
true_fn=lambda: tf.map_fn(_decode_png_mask, masks, dtype=tf.float32),
false_fn=lambda: tf.zeros([0, height, width], dtype=tf.float32))
return {'groundtruth_instance_masks': masks}
def decode(self, tf_example_string_tensor: tf.string):
"""Decodes serialized tensorflow example and returns a tensor dictionary.
Args:
tf_example_string_tensor: A string tensor holding a serialized tensorflow
example proto.
Returns:
A dictionary contains a subset of the following, depends on the inputs:
image: A uint8 tensor of shape [height, width, 3] containing the image.
source_id: A string tensor contains image fingerprint.
key: A string tensor contains the unique sha256 hash key.
label_type: Either `full` or `partial`. `full` means all the text are
fully labeled, `partial` otherwise. Currently, this is used by E2E
model. If an input image is fully labeled, we update the weights of
both the detection and the recognizer. Otherwise, only recognizer part
of the model is trained.
groundtruth_text: A string tensor list, the original transcriptions.
groundtruth_encoded_text: A string tensor list, the class ids for the
atoms in the text, after applying the reordering algorithm, in string
form. For example "90,71,85,69,86,85,93,90,71,91,1,71,85,93,90,71".
This depends on the class label map provided to the conversion
program. These are 0 based, with -1 for OOV symbols.
groundtruth_classes: A int32 tensor of shape [num_boxes] contains the
class id. Note this is 1 based, 0 is reserved for background class.
groundtruth_content_type: A int32 tensor of shape [num_boxes] contains
the content type. Values correspond to PageLayoutEntity::ContentType.
groundtruth_weight: A int32 tensor of shape [num_boxes], either 0 or 1.
If a region has weight 0, it will be ignored when computing the
losses.
groundtruth_boxes: A float tensor of shape [num_boxes, 5] contains the
groundtruth rotated rectangles. Each row is in [left, top, box_width,
box_height, angle] order, absolute coordinates are used.
groundtruth_aligned_boxes: A float tensor of shape [num_boxes, 4]
contains the groundtruth axis-aligned rectangles. Each row is in
[ymin, xmin, ymax, xmax] order. Currently, this is used to store
groundtruth symbol boxes.
groundtruth_vertices: A string tensor list contains encoded normalized
box or polygon coordinates. E.g. `x1,y1,x2,y2,x3,y3,x4,y4`.
groundtruth_instance_masks: A float tensor of shape [num_boxes, height,
width] contains binarized image sized instance segmentation masks.
`1.0` for positive region, `0.0` otherwise. None if not in tfe.
frame_id: A int32 tensor of shape [num_boxes], either `0` or `1`.
`0` means object comes from first image, `1` means second.
track_id: A int32 tensor of shape [num_boxes], where value indicates
identity across frame indices.
additional_channels: A uint8 tensor of shape [H, W, C] representing some
features.
"""
parsed_tensors = tf.io.parse_single_example(
serialized=tf_example_string_tensor, features=self.keys_to_features)
for k in parsed_tensors:
if isinstance(parsed_tensors[k], tf.SparseTensor):
if parsed_tensors[k].dtype == tf.string:
parsed_tensors[k] = tf.sparse.to_dense(
parsed_tensors[k], default_value='')
else:
parsed_tensors[k] = tf.sparse.to_dense(
parsed_tensors[k], default_value=0)
decoded_tensors = {}
decoded_tensors.update(self._decode_image(parsed_tensors))
decoded_tensors.update(self._decode_rboxes(parsed_tensors))
decoded_tensors.update(self._decode_boxes(parsed_tensors))
if self._use_instance_mask:
decoded_tensors[
'groundtruth_instance_masks'] = self._decode_png_instance_masks(
parsed_tensors)
if self._num_additional_channels:
decoded_tensors.update(self._decode_additional_channels(
parsed_tensors, self._num_additional_channels))
# other attributes:
for key in self.name_to_key:
if key not in decoded_tensors:
decoded_tensors[key] = parsed_tensors[self.name_to_key[key]]
if 'groundtruth_instance_masks' not in decoded_tensors:
decoded_tensors['groundtruth_instance_masks'] = None
return decoded_tensors
| 14,249 | 43.392523 | 80 | py |
models | models-master/official/projects/unified_detector/data_loaders/autoaugment.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoAugment and RandAugment policies for enhanced image preprocessing.
AutoAugment Reference: https://arxiv.org/abs/1805.09501
RandAugment Reference: https://arxiv.org/abs/1909.13719
This library is adapted from:
`https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py`.
Several changes are made. They are inspired by the TIMM library:
https://github.com/rwightman/pytorch-image-models/tree/master/timm/data
Changes include:
(1) Random Erasing / Cutout is added, and separated from the random augmentation
pool (not sampled as an operation).
(2) For `posterize` and `solarize`, the arguments are changed such that the
level of corruption increases as the `magnitude` argument increases.
(3) `color`, `contrast`, `brightness`, `sharpness` are randomly enhanced or
diminished.
(4) Magnitude is randomly sampled from a normal distribution.
(5) Operations are applied with a probability.
"""
import inspect
import math
import tensorflow as tf
import tensorflow_addons.image as tfa_image
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
def policy_v0():
"""Autoaugment policy that was used in AutoAugment Paper."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('Equalize', 0.8, 1), ('ShearY', 0.8, 4)],
[('Color', 0.4, 9), ('Equalize', 0.6, 3)],
[('Color', 0.4, 1), ('Rotate', 0.6, 8)],
[('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
[('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
[('Color', 0.2, 0), ('Equalize', 0.8, 8)],
[('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
[('ShearX', 0.2, 9), ('Rotate', 0.6, 8)],
[('Color', 0.6, 1), ('Equalize', 1.0, 2)],
[('Invert', 0.4, 9), ('Rotate', 0.6, 0)],
[('Equalize', 1.0, 9), ('ShearY', 0.6, 3)],
[('Color', 0.4, 7), ('Equalize', 0.6, 0)],
[('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],
[('Solarize', 0.6, 8), ('Color', 0.6, 9)],
[('Solarize', 0.2, 4), ('Rotate', 0.8, 9)],
[('Rotate', 1.0, 7), ('TranslateY', 0.8, 9)],
[('ShearX', 0.0, 0), ('Solarize', 0.8, 4)],
[('ShearY', 0.8, 0), ('Color', 0.6, 4)],
[('Color', 1.0, 0), ('Rotate', 0.6, 2)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
[('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
[('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)],
[('Posterize', 0.8, 2), ('Solarize', 0.6, 10)],
[('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
[('Color', 0.8, 6), ('Rotate', 0.4, 5)],
]
return policy
def policy_vtest():
"""Autoaugment test policy for debugging."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX', 1.0, 4), ('Equalize', 1.0, 10)],
]
return policy
# pylint: disable=g-long-lambda
blend = tf.function(lambda i1, i2, factor: tf.cast(
tfa_image.blend(tf.cast(i1, tf.float32), tf.cast(i2, tf.float32), factor),
tf.uint8))
# pylint: enable=g-long-lambda
def random_erase(image,
prob,
min_area=0.02,
max_area=1 / 3,
min_aspect=1 / 3,
max_aspect=10 / 3,
mode='pixel'):
"""The random erasing augmentations: https://arxiv.org/pdf/1708.04896.pdf.
This augmentation is applied after image normalization.
Args:
image: Input image after all other augmentation and normalization. It has
type tf.float32.
prob: Probability of applying the random erasing operation.
min_area: As named.
max_area: As named.
min_aspect: As named.
max_aspect: As named.
mode: How the erased area is filled. 'pixel' means white noise (uniform
dist).
Returns:
Randomly erased image.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
image_area = tf.cast(image_width * image_height, tf.float32)
# Sample width, height
erase_area = tf.random.uniform([], min_area, max_area) * image_area
log_max_target_ar = tf.math.log(
tf.minimum(
tf.math.divide(
tf.math.square(tf.cast(image_width, tf.float32)), erase_area),
max_aspect))
log_min_target_ar = tf.math.log(
tf.maximum(
tf.math.divide(erase_area,
tf.math.square(tf.cast(image_height, tf.float32))),
min_aspect))
erase_aspect_ratio = tf.math.exp(
tf.random.uniform([], log_min_target_ar, log_max_target_ar))
erase_h = tf.cast(tf.math.sqrt(erase_area / erase_aspect_ratio), tf.int32)
erase_w = tf.cast(tf.math.sqrt(erase_area * erase_aspect_ratio), tf.int32)
# Sample (left, top) of the rectangle to erase
erase_left = tf.random.uniform(
shape=[], minval=0, maxval=image_width - erase_w, dtype=tf.int32)
erase_top = tf.random.uniform(
shape=[], minval=0, maxval=image_height - erase_h, dtype=tf.int32)
pad_right = image_width - erase_w - erase_left
pad_bottom = image_height - erase_h - erase_top
mask = tf.pad(
tf.zeros([erase_h, erase_w], dtype=image.dtype),
[[erase_top, pad_bottom], [erase_left, pad_right]],
constant_values=1)
mask = tf.expand_dims(mask, -1) # [H, W, 1]
if mode == 'pixel':
fill = tf.random.truncated_normal(
tf.shape(image), 0.0, 1.0, dtype=image.dtype)
else:
fill = tf.zeros(tf.shape(image), dtype=image.dtype)
should_apply_op = tf.cast(
tf.floor(tf.random.uniform([], dtype=tf.float32) + prob), tf.bool)
augmented_image = tf.cond(should_apply_op,
lambda: mask * image + (1 - mask) * fill,
lambda: image)
return augmented_image
def solarize(image, threshold=128):
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
return tf.where(image < threshold, image, 255 - image)
def solarize_add(image, addition=0, threshold=128):
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128.
added_image = tf.cast(image, tf.int64) + addition
added_image = tf.cast(tf.clip_by_value(added_image, 0, 255), tf.uint8)
return tf.where(image < threshold, added_image, image)
def color(image, factor):
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image, factor):
"""Equivalent of PIL Contrast."""
degenerate = tf.image.rgb_to_grayscale(image)
# Cast before calling tf.histogram.
degenerate = tf.cast(degenerate, tf.int32)
# Compute the grayscale histogram, then compute the mean pixel value,
# and create a constant image size of that value. Use that as the
# blending degenerate target of the original image.
hist = tf.histogram_fixed_width(degenerate, [0, 255], nbins=256)
mean = tf.reduce_sum(tf.cast(hist, tf.float32)) / 256.0
degenerate = tf.ones_like(degenerate, dtype=tf.float32) * mean
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.image.grayscale_to_rgb(tf.cast(degenerate, tf.uint8))
return blend(degenerate, image, factor)
def brightness(image, factor):
"""Equivalent of PIL Brightness."""
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image, bits):
"""Equivalent of PIL Posterize. Smaller `bits` means larger degradation."""
shift = 8 - bits
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def rotate(image, degrees, replace):
"""Rotates the image by degrees either clockwise or counterclockwise.
Args:
image: An image Tensor of type uint8.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels caused by the
rotate operation.
Returns:
The rotated version of image.
"""
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
if isinstance(replace, list) or isinstance(replace, tuple):
replace = replace[0]
image = tfa_image.rotate(image, radians, fill_value=replace)
return image
def translate_x(image, pixels, replace):
"""Equivalent of PIL Translate in X dimension."""
return tfa_image.translate_xy(image, [-pixels, 0], replace)
def translate_y(image, pixels, replace):
"""Equivalent of PIL Translate in Y dimension."""
return tfa_image.translate_xy(image, [0, -pixels], replace)
def autocontrast(image):
"""Implements Autocontrast function from PIL using TF ops.
Args:
image: A 3D uint8 tensor.
Returns:
The image after it has had autocontrast applied to it and will be of type
uint8.
"""
def scale_channel(image):
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.cast(tf.reduce_min(image), tf.float32)
hi = tf.cast(tf.reduce_max(image), tf.float32)
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.cast(im, tf.float32) * scale + offset
im = tf.clip_by_value(im, 0.0, 255.0)
return tf.cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
def sharpness(image, factor):
"""Implements Sharpness function from PIL using TF ops."""
orig_image = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation.
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel.
kernel = tf.constant([[1, 1, 1], [1, 5, 1], [1, 1, 1]],
dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension.
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
with tf.device('/cpu:0'):
# Some augmentation that uses depth-wise conv will cause crashing when
# training on GPU. See (b/156242594) for details.
degenerate = tf.nn.depthwise_conv2d(image, kernel, strides, padding='VALID')
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.squeeze(tf.cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)
# Blend the final result.
return blend(result, orig_image, factor)
def equalize(image):
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(
tf.equal(step, 0), lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def invert(image):
"""Inverts the image pixels."""
image = tf.convert_to_tensor(image)
return 255 - image
NAME_TO_FUNC = {
'AutoContrast': autocontrast,
'Equalize': equalize,
'Invert': invert,
'Rotate': rotate,
'Posterize': posterize,
'PosterizeIncreasing': posterize,
'Solarize': solarize,
'SolarizeIncreasing': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'ColorIncreasing': color,
'Contrast': contrast,
'ContrastIncreasing': contrast,
'Brightness': brightness,
'BrightnessIncreasing': brightness,
'Sharpness': sharpness,
'SharpnessIncreasing': sharpness,
'ShearX': tfa_image.shear_x,
'ShearY': tfa_image.shear_y,
'TranslateX': translate_x,
'TranslateY': translate_y,
'Cutout': tfa_image.random_cutout,
'Hue': tf.image.adjust_hue,
}
def _randomly_negate_tensor(tensor):
"""With 50% prob turn the tensor negative."""
should_flip = tf.cast(tf.floor(tf.random.uniform([]) + 0.5), tf.bool)
final_tensor = tf.cond(should_flip, lambda: -tensor, lambda: tensor)
return final_tensor
def _rotate_level_to_arg(level):
level = (level / _MAX_LEVEL) * 30.
level = _randomly_negate_tensor(level)
return (level,)
def _shrink_level_to_arg(level):
"""Converts level to ratio by which we shrink the image content."""
if level == 0:
return (1.0,) # if level is zero, do not shrink the image
# Maximum shrinking ratio is 2.9.
level = 2. / (_MAX_LEVEL / level) + 0.9
return (level,)
def _enhance_level_to_arg(level):
return ((level / _MAX_LEVEL) * 1.8 + 0.1,)
def _enhance_increasing_level_to_arg(level):
level = (level / _MAX_LEVEL) * .9
level = 1.0 + _randomly_negate_tensor(level)
return (level,)
def _shear_level_to_arg(level):
level = (level / _MAX_LEVEL) * 0.3
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _translate_level_to_arg(level, translate_const):
level = level / _MAX_LEVEL * translate_const
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _posterize_level_to_arg(level):
return (tf.cast(level / _MAX_LEVEL * 4, tf.uint8),)
def _posterize_increase_level_to_arg(level):
return (4 - _posterize_level_to_arg(level)[0],)
def _solarize_level_to_arg(level):
return (tf.cast(level / _MAX_LEVEL * 256, tf.uint8),)
def _solarize_increase_level_to_arg(level):
return (256 - _solarize_level_to_arg(level)[0],)
def _solarize_add_level_to_arg(level):
return (tf.cast(level / _MAX_LEVEL * 110, tf.int64),)
def _cutout_arg(level, cutout_size):
pad_size = tf.cast(level / _MAX_LEVEL * cutout_size, tf.int32)
return (2 * pad_size, 2 * pad_size)
def level_to_arg(hparams):
return {
'AutoContrast':
lambda level: (),
'Equalize':
lambda level: (),
'Invert':
lambda level: (),
'Rotate':
_rotate_level_to_arg,
'Posterize':
_posterize_level_to_arg,
'PosterizeIncreasing':
_posterize_increase_level_to_arg,
'Solarize':
_solarize_level_to_arg,
'SolarizeIncreasing':
_solarize_increase_level_to_arg,
'SolarizeAdd':
_solarize_add_level_to_arg,
'Color':
_enhance_level_to_arg,
'ColorIncreasing':
_enhance_increasing_level_to_arg,
'Contrast':
_enhance_level_to_arg,
'ContrastIncreasing':
_enhance_increasing_level_to_arg,
'Brightness':
_enhance_level_to_arg,
'BrightnessIncreasing':
_enhance_increasing_level_to_arg,
'Sharpness':
_enhance_level_to_arg,
'SharpnessIncreasing':
_enhance_increasing_level_to_arg,
'ShearX':
_shear_level_to_arg,
'ShearY':
_shear_level_to_arg,
# pylint:disable=g-long-lambda
'Cutout':
lambda level: _cutout_arg(level, hparams['cutout_const']),
# pylint:disable=g-long-lambda
'TranslateX':
lambda level: _translate_level_to_arg(level, hparams['translate_const'
]),
'TranslateY':
lambda level: _translate_level_to_arg(level, hparams['translate_const'
]),
'Hue':
lambda level: ((level / _MAX_LEVEL) * 0.25,),
# pylint:enable=g-long-lambda
}
def _parse_policy_info(name, prob, level, replace_value, augmentation_hparams):
"""Return the function that corresponds to `name` and update `level` param."""
func = NAME_TO_FUNC[name]
args = level_to_arg(augmentation_hparams)[name](level)
# Add in replace arg if it is required for the function that is being called.
# pytype:disable=wrong-arg-types
if 'replace' in inspect.signature(func).parameters.keys(): # pylint: disable=deprecated-method
args = tuple(list(args) + [replace_value])
# pytype:enable=wrong-arg-types
return (func, prob, args)
def _apply_func_with_prob(func, image, args, prob):
"""Apply `func` to image w/ `args` as input with probability `prob`."""
assert isinstance(args, tuple)
# Apply the function with probability `prob`.
should_apply_op = tf.cast(
tf.floor(tf.random.uniform([], dtype=tf.float32) + prob), tf.bool)
augmented_image = tf.cond(should_apply_op, lambda: func(image, *args),
lambda: image)
return augmented_image
def select_and_apply_random_policy(policies, image):
"""Select a random policy from `policies` and apply it to `image`."""
policy_to_select = tf.random.uniform([], maxval=len(policies), dtype=tf.int32)
# Note that using tf.case instead of tf.conds would result in significantly
# larger graphs and would even break export for some larger policies.
for (i, policy) in enumerate(policies):
image = tf.cond(
tf.equal(i, policy_to_select),
lambda selected_policy=policy: selected_policy(image),
lambda: image)
return image
def build_and_apply_nas_policy(policies, image, augmentation_hparams):
"""Build a policy from the given policies passed in and apply to image.
Args:
policies: list of lists of tuples in the form `(func, prob, level)`, `func`
is a string name of the augmentation function, `prob` is the probability
of applying the `func` operation, `level` is the input argument for
`func`.
image: tf.Tensor that the resulting policy will be applied to.
augmentation_hparams: Hparams associated with the NAS learned policy.
Returns:
A version of image that now has data augmentation applied to it based on
the `policies` pass into the function.
"""
replace_value = [128, 128, 128]
# func is the string name of the augmentation function, prob is the
# probability of applying the operation and level is the parameter associated
# with the tf op.
# tf_policies are functions that take in an image and return an augmented
# image.
tf_policies = []
for policy in policies:
tf_policy = []
# Link string name to the correct python function and make sure the correct
# argument is passed into that function.
for policy_info in policy:
policy_info = list(policy_info) + [replace_value, augmentation_hparams]
tf_policy.append(_parse_policy_info(*policy_info))
# Now build the tf policy that will apply the augmentation procedue
# on image.
def make_final_policy(tf_policy_):
def final_policy(image_):
for func, prob, args in tf_policy_:
image_ = _apply_func_with_prob(func, image_, args, prob)
return image_
return final_policy
tf_policies.append(make_final_policy(tf_policy))
augmented_image = select_and_apply_random_policy(tf_policies, image)
return augmented_image
def distort_image_with_autoaugment(image, augmentation_name):
"""Applies the AutoAugment policy to `image`.
AutoAugment is from the paper: https://arxiv.org/abs/1805.09501.
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
augmentation_name: The name of the AutoAugment policy to use. The available
options are `v0` and `test`. `v0` is the policy used for all of the
results in the paper and was found to achieve the best results on the COCO
dataset. `v1`, `v2` and `v3` are additional good policies found on the
COCO dataset that have slight variation in what operations were used
during the search procedure along with how many operations are applied in
parallel to a single image (2 vs 3).
Returns:
A tuple containing the augmented versions of `image`.
"""
available_policies = {'v0': policy_v0, 'test': policy_vtest}
if augmentation_name not in available_policies:
raise ValueError('Invalid augmentation_name: {}'.format(augmentation_name))
policy = available_policies[augmentation_name]()
# Hparams that will be used for AutoAugment.
augmentation_hparams = dict(cutout_const=100, translate_const=250)
return build_and_apply_nas_policy(policy, image, augmentation_hparams)
# Cutout is implemented separately.
_RAND_TRANSFORMS = [
'AutoContrast',
'Equalize',
'Invert',
'Rotate',
'Posterize',
'Solarize',
'Color',
'Contrast',
'Brightness',
'Sharpness',
'ShearX',
'ShearY',
'TranslateX',
'TranslateY',
'SolarizeAdd',
'Hue',
]
# Cutout is implemented separately.
_RAND_INCREASING_TRANSFORMS = [
'AutoContrast',
'Equalize',
'Invert',
'Rotate',
'PosterizeIncreasing',
'SolarizeIncreasing',
'SolarizeAdd',
'ColorIncreasing',
'ContrastIncreasing',
'BrightnessIncreasing',
'SharpnessIncreasing',
'ShearX',
'ShearY',
'TranslateX',
'TranslateY',
'Hue',
]
# These augmentations are not suitable for detection task.
_NON_COLOR_DISTORTION_OPS = [
'Rotate',
'ShearX',
'ShearY',
'TranslateX',
'TranslateY',
]
def distort_image_with_randaugment(image,
num_layers,
magnitude,
mag_std,
inc,
prob,
color_only=False):
"""Applies the RandAugment policy to `image`.
RandAugment is from the paper https://arxiv.org/abs/1909.13719,
Args:
image: `Tensor` of shape [height, width, 3] representing an image. The image
should have uint8 type in [0, 255].
num_layers: Integer, the number of augmentation transformations to apply
sequentially to an image. Represented as (N) in the paper. Usually best
values will be in the range [1, 3].
magnitude: Integer, shared magnitude across all augmentation operations.
Represented as (M) in the paper. Usually best values are in the range [5,
30].
mag_std: Randomness of magnitude. The magnitude will be sampled from a
normal distribution on the fly.
inc: Whether to select aug that increases as magnitude increases.
prob: Probability of any aug being applied.
color_only: Whether only apply operations that distort color and do not
change spatial layouts.
Returns:
The augmented version of `image`.
"""
replace_value = [128] * 3
augmentation_hparams = dict(cutout_const=40, translate_const=100)
available_ops = _RAND_INCREASING_TRANSFORMS if inc else _RAND_TRANSFORMS
if color_only:
available_ops = list(
filter(lambda op: op not in _NON_COLOR_DISTORTION_OPS, available_ops))
for layer_num in range(num_layers):
op_to_select = tf.random.uniform([],
maxval=len(available_ops),
dtype=tf.int32)
random_magnitude = tf.clip_by_value(
tf.random.normal([], magnitude, mag_std), 0., _MAX_LEVEL)
with tf.name_scope('randaug_layer_{}'.format(layer_num)):
for (i, op_name) in enumerate(available_ops):
func, _, args = _parse_policy_info(op_name, prob, random_magnitude,
replace_value, augmentation_hparams)
image = tf.cond(
tf.equal(i, op_to_select),
# pylint:disable=g-long-lambda
lambda s_func=func, s_args=args: _apply_func_with_prob(
s_func, image, s_args, prob),
# pylint:enable=g-long-lambda
lambda: image)
return image
| 26,254 | 33.820955 | 97 | py |
models | models-master/official/projects/unified_detector/data_loaders/universal_detection_parser.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data parser for universal detector."""
import enum
import functools
from typing import Any, Tuple
import gin
import tensorflow as tf
from official.projects.unified_detector.data_loaders import autoaugment
from official.projects.unified_detector.data_loaders import tf_example_decoder
from official.projects.unified_detector.utils import utilities
from official.projects.unified_detector.utils.typing import NestedTensorDict
from official.projects.unified_detector.utils.typing import TensorDict
@gin.constants_from_enum
class DetectionClass(enum.IntEnum):
"""As in `PageLayoutEntity.EntityType`."""
WORD = 0
LINE = 2
PARAGRAPH = 3
BLOCK = 4
NOT_ANNOTATED_ID = 8
def _erase(mask: tf.Tensor,
feature: tf.Tensor,
min_val: float = 0.,
max_val: float = 256.) -> tf.Tensor:
"""Erase the feature maps with a mask.
Erase feature maps with a mask and replace the erased area with uniform random
noise. The mask can have different size from the feature maps.
Args:
mask: an (h, w) binay mask for pixels to erase with. Value 1 represents
pixels to erase.
feature: the (H, W, C) feature maps to erase from.
min_val: The minimum value of random noise.
max_val: The maximum value of random noise.
Returns:
The (H, W, C) feature maps, with pixels in mask replaced with noises. It's
equal to mask * noise + (1 - mask) * feature.
"""
h, w, c = utilities.resolve_shape(feature)
resized_mask = tf.image.resize(
tf.tile(tf.expand_dims(tf.cast(mask, tf.float32), -1), (1, 1, c)), (h, w))
erased = tf.where(
condition=(resized_mask > 0.5),
x=tf.cast(tf.random.uniform((h, w, c), min_val, max_val), feature.dtype),
y=feature)
return erased
@gin.configurable(denylist=['is_training'])
class UniDetectorParserFn(object):
"""Data parser for universal detector."""
def __init__(
self,
is_training: bool,
output_dimension: int = 1025,
mask_dimension: int = -1,
max_num_instance: int = 128,
rot90_probability: float = 0.5,
use_color_distortion: bool = True,
randaug_mag: float = 5.,
randaug_std: float = 0.5,
randaug_layer: int = 2,
randaug_prob: float = 0.5,
use_cropping: bool = True,
crop_min_scale: float = 0.5,
crop_max_scale: float = 1.5,
crop_min_aspect: float = 4 / 5,
crop_max_aspect: float = 5 / 4,
is_shape_defined: bool = True,
use_tpu: bool = True,
detection_unit: DetectionClass = DetectionClass.LINE,
):
"""Constructor.
Args:
is_training: bool indicating TRAIN or EVAL.
output_dimension: The size of input images.
mask_dimension: The size of the output mask. If negative or zero, it will
be set the same as output_dimension.
max_num_instance: The maximum number of instances to output. If it's
negative, padding or truncating will not be performed.
rot90_probability: The probability of rotating multiples of 90 degrees.
use_color_distortion: Whether to apply color distortions to images (via
autoaugment).
randaug_mag: (autoaugment parameter) Color distortion magnitude. Note
that, this value should be set conservatively, as some color distortions
can easily make text illegible e.g. posterize.
randaug_std: (autoaugment parameter) Randomness in color distortion
magnitude.
randaug_layer: (autoaugment parameter) Number of color distortion
operations.
randaug_prob: (autoaugment parameter) Probabilily of applying each
distortion operation.
use_cropping: Bool, whether to use random cropping and resizing in
training.
crop_min_scale: The minimum scale of a random crop.
crop_max_scale: The maximum scale of a random crop. If >1, it means the
images are downsampled.
crop_min_aspect: The minimum aspect ratio of a random crop.
crop_max_aspect: The maximum aspect ratio of a random crop.
is_shape_defined: Whether to define the static shapes for all features and
labels. This must be set to True in TPU training as it requires static
shapes for all tensors.
use_tpu: Whether the inputs are fed to a TPU device.
detection_unit: Whether word or line (or else) is regarded as an entity.
The instance masks will be at word or line level.
"""
if is_training and max_num_instance < 0:
raise ValueError('In TRAIN mode, padding/truncation is required.')
self._is_training = is_training
self._output_dimension = output_dimension
self._mask_dimension = (
mask_dimension if mask_dimension > 0 else output_dimension)
self._max_num_instance = max_num_instance
self._decoder = tf_example_decoder.TfExampleDecoder(
num_additional_channels=3, additional_class_names=['parent'])
self._use_color_distortion = use_color_distortion
self._rot90_probability = rot90_probability
self._randaug_mag = randaug_mag
self._randaug_std = randaug_std
self._randaug_layer = randaug_layer
self._randaug_prob = randaug_prob
self._use_cropping = use_cropping
self._crop_min_scale = crop_min_scale
self._crop_max_scale = crop_max_scale
self._crop_min_aspect = crop_min_aspect
self._crop_max_aspect = crop_max_aspect
self._is_shape_defined = is_shape_defined
self._use_tpu = use_tpu
self._detection_unit = detection_unit
def __call__(self, value: str) -> Tuple[TensorDict, NestedTensorDict]:
"""Parsing the data.
Args:
value: The serialized data sample.
Returns:
Two dicts for features and labels.
features:
'source_id': id of the sample; only in EVAL mode
'images': the normalized images, (output_dimension, output_dimension, 3)
labels:
See `_prepare_labels` for its content.
"""
data = self._decoder.decode(value)
features = {}
labels = {}
self._preprocess(data, features, labels)
self._rot90k(data, features, labels)
self._crop_and_resize(data, features, labels)
self._color_distortion_and_normalize(data, features, labels)
self._prepare_labels(data, features, labels)
self._define_shapes(features, labels)
return features, labels
def _preprocess(self, data: TensorDict, features: TensorDict,
unused_labels: TensorDict):
"""All kinds of preprocessing of the decoded data dict."""
# (1) Decode the entity_id_mask: a H*W*1 mask, each pixel equals to
# (1 + position) of the entity in the GT entity list. The IDs
# (which can be larger than 255) are stored in the last two channels.
data['additional_channels'] = tf.cast(data['additional_channels'], tf.int32)
entity_id_mask = (
data['additional_channels'][:, :, -2:-1] * 256 +
data['additional_channels'][:, :, -1:])
data['entity_id_mask'] = entity_id_mask
# (2) Write image id. Used in evaluation.
if not self._use_tpu:
features['source_id'] = data['source_id']
# (3) Block mask: area without annotation
data['image'] = _erase(
data['additional_channels'][:, :, 0],
data['image'],
min_val=0.,
max_val=256.)
def _rot90k(self, data: TensorDict, unused_features: TensorDict,
unused_labels: TensorDict):
"""Rotate the image, gt_bboxes, masks by 90k degrees."""
if not self._is_training:
return
rotate_90_choice = tf.random.uniform([])
def _rotate():
"""Rotation.
These will be rotated:
image,
rbox,
entity_id_mask,
TODO(longshangbang): rotate vertices.
Returns:
The rotated tensors of the above fields.
"""
k = tf.random.uniform([], 1, 4, dtype=tf.int32)
h, w, _ = utilities.resolve_shape(data['image'])
# Image
rotated_img = tf.image.rot90(data['image'], k=k, name='image_rot90k')
# Box
rotate_box_op = functools.partial(
utilities.rotate_rboxes90,
rboxes=data['groundtruth_boxes'],
image_width=w,
image_height=h)
rotated_boxes = tf.switch_case(
k - 1, # Indices start with 1.
branch_fns=[
lambda: rotate_box_op(rotation_count=1),
lambda: rotate_box_op(rotation_count=2),
lambda: rotate_box_op(rotation_count=3)
])
# Mask
rotated_mask = tf.image.rot90(
data['entity_id_mask'], k=k, name='mask_rot90k')
return rotated_img, rotated_boxes, rotated_mask
# pylint: disable=g-long-lambda
(data['image'], data['groundtruth_boxes'],
data['entity_id_mask']) = tf.cond(
rotate_90_choice < self._rot90_probability, _rotate, lambda:
(data['image'], data['groundtruth_boxes'], data['entity_id_mask']))
# pylint: enable=g-long-lambda
def _crop_and_resize(self, data: TensorDict, unused_features: TensorDict,
unused_labels: TensorDict):
"""Perform random cropping and resizing."""
# TODO(longshangbang): resize & translate box as well
# TODO(longshangbang): resize & translate vertices as well
# Get cropping target.
h, w = utilities.resolve_shape(data['image'])[:2]
left, top, crop_w, crop_h, pad_w, pad_h = self._get_crop_box(
tf.cast(h, tf.float32), tf.cast(w, tf.float32))
# Crop the image. (Pad the images if the crop box is larger than image.)
if self._is_training:
# padding left, top, right, bottom
pad_left = tf.random.uniform([], 0, pad_w + 1, dtype=tf.int32)
pad_top = tf.random.uniform([], 0, pad_h + 1, dtype=tf.int32)
else:
pad_left = 0
pad_top = 0
cropped_img = tf.image.crop_to_bounding_box(data['image'], top, left,
crop_h, crop_w)
padded_img = tf.pad(
cropped_img,
[[pad_top, pad_h - pad_top], [pad_left, pad_w - pad_left], [0, 0]],
constant_values=127)
# Resize images
data['resized_image'] = tf.image.resize(
padded_img, (self._output_dimension, self._output_dimension))
data['resized_image'] = tf.cast(data['resized_image'], tf.uint8)
# Crop the masks
cropped_masks = tf.image.crop_to_bounding_box(data['entity_id_mask'], top,
left, crop_h, crop_w)
padded_masks = tf.pad(
cropped_masks,
[[pad_top, pad_h - pad_top], [pad_left, pad_w - pad_left], [0, 0]])
# Resize masks
data['resized_masks'] = tf.image.resize(
padded_masks, (self._mask_dimension, self._mask_dimension),
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
data['resized_masks'] = tf.squeeze(data['resized_masks'], -1)
def _get_crop_box(
self, h: tf.Tensor,
w: tf.Tensor) -> Tuple[Any, Any, tf.Tensor, tf.Tensor, Any, Any]:
"""Get the cropping box.
Args:
h: The height of the image to crop. Should be float type.
w: The width of the image to crop. Should be float type.
Returns:
A tuple representing (left, top, crop_w, crop_h, pad_w, pad_h).
Then in `self._crop_and_resize`, a crop will be extracted with bounding
box from top-left corner (left, top) and with size (crop_w, crop_h). This
crop will then be padded with (pad_w, pad_h) to square sizes.
The outputs also are re-cast to int32 type.
"""
if not self._is_training or not self._use_cropping:
# cast back to integers.
w = tf.cast(w, tf.int32)
h = tf.cast(h, tf.int32)
side = tf.maximum(w, h)
return 0, 0, w, h, side - w, side - h
# Get box size
scale = tf.random.uniform([], self._crop_min_scale, self._crop_max_scale)
max_edge = tf.maximum(w, h)
long_edge = max_edge * scale
sqrt_aspect_ratio = tf.math.sqrt(
tf.random.uniform([], self._crop_min_aspect, self._crop_max_aspect))
box_h = long_edge / sqrt_aspect_ratio
box_w = long_edge * sqrt_aspect_ratio
# Get box location
left = tf.random.uniform([], 0., tf.maximum(0., w - box_w))
top = tf.random.uniform([], 0., tf.maximum(0., h - box_h))
# Get crop & pad
crop_w = tf.minimum(box_w, w - left)
crop_h = tf.minimum(box_h, h - top)
pad_w = box_w - crop_w
pad_h = box_h - crop_h
return (tf.cast(left, tf.int32), tf.cast(top, tf.int32),
tf.cast(crop_w, tf.int32), tf.cast(crop_h, tf.int32),
tf.cast(pad_w, tf.int32), tf.cast(pad_h, tf.int32))
def _color_distortion_and_normalize(self, data: TensorDict,
features: TensorDict,
unused_labels: TensorDict):
"""Distort colors."""
if self._is_training and self._use_color_distortion:
data['resized_image'] = autoaugment.distort_image_with_randaugment(
data['resized_image'], self._randaug_layer, self._randaug_mag,
self._randaug_std, True, self._randaug_prob, True)
# Normalize
features['images'] = utilities.normalize_image_to_range(
data['resized_image'])
def _prepare_labels(self, data: TensorDict, features: TensorDict,
labels: TensorDict):
"""This function prepares the labels.
These following targets are added to labels['segmentation_output']:
'gt_word_score': A (h, w) float32 mask for textness score. 1 for word,
0 for bkg.
These following targets are added to labels['instance_labels']:
'num_instance': A float scalar tensor for the total number of
instances. It is bounded by the maximum number of instances allowed.
It includes the special background instance, so it equals to
(1 + entity numbers).
'masks': A (h, w) int32 mask for entity IDs. The value of each pixel is
the id of the entity it belongs to. A value of `0` means the bkg mask.
'classes': A (max_num,) int tensor indicating the classes of each
instance:
2 for background
1 for text entity
0 for non-object
'masks_sizes': A (max_num,) float tensor for the size of all masks.
'gt_weights': Whether it's difficult / does not have text annotation.
These following targets are added to labels['paragraph_labels']:
'paragraph_ids': A (max_num,) integer tensor for paragprah id. if `-1`,
then no paragraph label for this text.
'has_para_ids': A float scalar; 1.0 if the sample has paragraph labels.
Args:
data: The data dictionary.
features: The feature dict.
labels: The label dict.
"""
# Segmentation labels:
self._get_segmentation_labels(data, features, labels)
# Instance labels:
self._get_instance_labels(data, features, labels)
def _get_segmentation_labels(self, data: TensorDict,
unused_features: TensorDict,
labels: NestedTensorDict):
labels['segmentation_output'] = {
'gt_word_score': tf.cast((data['resized_masks'] > 0), tf.float32)
}
def _get_instance_labels(self, data: TensorDict, features: TensorDict,
labels: NestedTensorDict):
"""Generate the labels for text entity detection."""
labels['instance_labels'] = {}
# (1) Depending on `detection_unit`:
# Convert the word-id map to line-id map or use the word-id map directly
# Word entity ids start from 1 in the map, so pad a -1 at the beginning of
# the parent list to counter this offset.
padded_parent = tf.concat(
[tf.constant([-1]),
tf.cast(data['groundtruth_parent'], tf.int32)], 0)
if self._detection_unit == DetectionClass.WORD:
entity_id_mask = data['resized_masks']
elif self._detection_unit == DetectionClass.LINE:
# The pixel value is entity_id + 1, shape = [H, W]; 0 for background.
# correctness:
# 0s in data['resized_masks'] --> padded_parent[0] == -1
# i-th entity in plp.entities --> i+1 in data['resized_masks']
# --> padded_parent[i+1]
# --> data['groundtruth_parent'][i]
# --> the parent of i-th entity
entity_id_mask = tf.gather(padded_parent, data['resized_masks']) + 1
elif self._detection_unit == DetectionClass.PARAGRAPH:
# directly segmenting paragraphs; two hops here.
entity_id_mask = tf.gather(padded_parent, data['resized_masks']) + 1
entity_id_mask = tf.gather(padded_parent, entity_id_mask) + 1
else:
raise ValueError(f'No such detection unit: {self._detection_unit}')
data['entity_id_mask'] = entity_id_mask
# (2) Get individual masks for entities.
entity_selection_mask = tf.equal(data['groundtruth_classes'],
self._detection_unit)
num_all_entity = utilities.resolve_shape(data['groundtruth_classes'])[0]
# entity_ids is a 1-D tensor for IDs of all entities of a certain type.
entity_ids = tf.boolean_mask(
tf.range(num_all_entity, dtype=tf.int32), entity_selection_mask) # (N,)
# +1 to match the entity ids in entity_id_mask
entity_ids = tf.reshape(entity_ids, (-1, 1, 1)) + 1
individual_masks = tf.expand_dims(entity_id_mask, 0)
individual_masks = tf.equal(entity_ids, individual_masks) # (N, H, W), bool
# TODO(longshangbang): replace with real mask sizes computing.
# Currently, we use full-resolution masks for individual_masks. In order to
# compute mask sizes, we need to convert individual_masks to int/float type.
# This will cause OOM because the mask is too large.
masks_sizes = tf.cast(
tf.reduce_any(individual_masks, axis=[1, 2]), tf.float32)
# remove empty masks (usually caused by cropping)
non_empty_masks_ids = tf.not_equal(masks_sizes, 0)
valid_masks = tf.boolean_mask(individual_masks, non_empty_masks_ids)
valid_entity_ids = tf.boolean_mask(entity_ids, non_empty_masks_ids)[:, 0, 0]
# (3) Write num of instance
num_instance = tf.reduce_sum(tf.cast(non_empty_masks_ids, tf.float32))
num_instance_and_bkg = num_instance + 1
if self._max_num_instance >= 0:
num_instance_and_bkg = tf.minimum(num_instance_and_bkg,
self._max_num_instance)
labels['instance_labels']['num_instance'] = num_instance_and_bkg
# (4) Write instance masks
num_entity_int = tf.cast(num_instance, tf.int32)
max_num_entities = self._max_num_instance - 1 # Spare 1 for bkg.
pad_num = tf.maximum(max_num_entities - num_entity_int, 0)
padded_valid_masks = tf.pad(valid_masks, [[0, pad_num], [0, 0], [0, 0]])
# If there are more instances than allowed, randomly sample some.
# `random_selection_mask` is a 0/1 array; the maximum number of 1 is
# `self._max_num_instance`; if not bound, it's an array with all 1s.
if self._max_num_instance >= 0:
padded_size = num_entity_int + pad_num
random_selection = tf.random.uniform((padded_size,), dtype=tf.float32)
selected_indices = tf.math.top_k(random_selection, k=max_num_entities)[1]
random_selection_mask = tf.scatter_nd(
indices=tf.expand_dims(selected_indices, axis=-1),
updates=tf.ones((max_num_entities,), dtype=tf.bool),
shape=(padded_size,))
else:
random_selection_mask = tf.ones((num_entity_int,), dtype=tf.bool)
random_discard_mask = tf.logical_not(random_selection_mask)
kept_masks = tf.boolean_mask(padded_valid_masks, random_selection_mask)
erased_masks = tf.boolean_mask(padded_valid_masks, random_discard_mask)
erased_masks = tf.cast(tf.reduce_any(erased_masks, axis=0), tf.float32)
# erase text instances that are obmitted.
features['images'] = _erase(erased_masks, features['images'], -1., 1.)
labels['segmentation_output']['gt_word_score'] *= 1. - erased_masks
kept_masks_and_bkg = tf.concat(
[
tf.math.logical_not(
tf.reduce_any(kept_masks, axis=0, keepdims=True)), # bkg
kept_masks,
],
0)
labels['instance_labels']['masks'] = tf.argmax(kept_masks_and_bkg, axis=0)
# (5) Write mask size
# TODO(longshangbang): replace with real masks sizes
masks_sizes = tf.cast(
tf.reduce_any(kept_masks_and_bkg, axis=[1, 2]), tf.float32)
labels['instance_labels']['masks_sizes'] = masks_sizes
# (6) Write classes.
classes = tf.ones((num_instance,), dtype=tf.int32)
classes = tf.concat([tf.constant(2, tf.int32, (1,)), classes], 0) # bkg
if self._max_num_instance >= 0:
classes = utilities.truncate_or_pad(classes, self._max_num_instance, 0)
labels['instance_labels']['classes'] = classes
# (7) gt-weights
selected_ids = tf.boolean_mask(valid_entity_ids,
random_selection_mask[:num_entity_int])
if self._detection_unit != DetectionClass.PARAGRAPH:
gt_text = tf.gather(data['groundtruth_text'], selected_ids - 1)
gt_weights = tf.cast(tf.strings.length(gt_text) > 0, tf.float32)
else:
text_types = tf.concat(
[
tf.constant([8]),
tf.cast(data['groundtruth_content_type'], tf.int32),
# TODO(longshangbang): temp solution for tfes with no para labels
tf.constant(8, shape=(1000,)),
],
0)
para_types = tf.gather(text_types, selected_ids)
gt_weights = tf.cast(
tf.not_equal(para_types, NOT_ANNOTATED_ID), tf.float32)
gt_weights = tf.concat([tf.constant(1., shape=(1,)), gt_weights], 0) # bkg
if self._max_num_instance >= 0:
gt_weights = utilities.truncate_or_pad(
gt_weights, self._max_num_instance, 0)
labels['instance_labels']['gt_weights'] = gt_weights
# (8) get paragraph label
# In this step, an array `{p_i}` is generated. `p_i` is an integer that
# indicates the group of paragraph which i-th text belongs to. `p_i` == -1
# if this instance is non-text or it has no paragraph labels.
# word -> line -> paragraph
if self._detection_unit == DetectionClass.WORD:
num_hop = 2
elif self._detection_unit == DetectionClass.LINE:
num_hop = 1
elif self._detection_unit == DetectionClass.PARAGRAPH:
num_hop = 0
else:
raise ValueError(f'No such detection unit: {self._detection_unit}. '
'Note that this error should have been raised in '
'previous lines, not here!')
para_ids = tf.identity(selected_ids) # == id in plp + 1
for _ in range(num_hop):
para_ids = tf.gather(padded_parent, para_ids) + 1
text_types = tf.concat(
[
tf.constant([8]),
tf.cast(data['groundtruth_content_type'], tf.int32),
# TODO(longshangbang): tricks for tfes that have not para labels
tf.constant(8, shape=(1000,)),
],
0)
para_types = tf.gather(text_types, para_ids)
para_ids = para_ids - 1 # revert to id in plp.entities; -1 for no labels
valid_para = tf.cast(tf.not_equal(para_types, NOT_ANNOTATED_ID), tf.int32)
para_ids = valid_para * para_ids + (1 - valid_para) * (-1)
para_ids = tf.concat([tf.constant([-1]), para_ids], 0) # add bkg
has_para_ids = tf.cast(tf.reduce_sum(valid_para) > 0, tf.float32)
if self._max_num_instance >= 0:
para_ids = utilities.truncate_or_pad(
para_ids, self._max_num_instance, 0, -1)
labels['paragraph_labels'] = {
'paragraph_ids': para_ids,
'has_para_ids': has_para_ids
}
def _define_shapes(self, features: TensorDict, labels: TensorDict):
"""Define the tensor shapes for TPU compiling."""
if not self._is_shape_defined:
return
features['images'] = tf.ensure_shape(
features['images'], (self._output_dimension, self._output_dimension, 3))
labels['segmentation_output']['gt_word_score'] = tf.ensure_shape(
labels['segmentation_output']['gt_word_score'],
(self._mask_dimension, self._mask_dimension))
labels['instance_labels']['num_instance'] = tf.ensure_shape(
labels['instance_labels']['num_instance'], [])
if self._max_num_instance >= 0:
labels['instance_labels']['masks_sizes'] = tf.ensure_shape(
labels['instance_labels']['masks_sizes'], (self._max_num_instance,))
labels['instance_labels']['masks'] = tf.ensure_shape(
labels['instance_labels']['masks'],
(self._mask_dimension, self._mask_dimension))
labels['instance_labels']['classes'] = tf.ensure_shape(
labels['instance_labels']['classes'], (self._max_num_instance,))
labels['instance_labels']['gt_weights'] = tf.ensure_shape(
labels['instance_labels']['gt_weights'], (self._max_num_instance,))
labels['paragraph_labels']['paragraph_ids'] = tf.ensure_shape(
labels['paragraph_labels']['paragraph_ids'],
(self._max_num_instance,))
labels['paragraph_labels']['has_para_ids'] = tf.ensure_shape(
labels['paragraph_labels']['has_para_ids'], [])
| 25,691 | 41.326194 | 80 | py |
models | models-master/official/projects/unified_detector/modeling/universal_detector.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Universal detector implementation."""
from typing import Any, Dict, Optional, Sequence, Tuple, Union
import gin
import tensorflow as tf
from deeplab2 import config_pb2
from deeplab2.model.decoder import max_deeplab as max_deeplab_head
from deeplab2.model.encoder import axial_resnet_instances
from deeplab2.model.loss import matchers_ops
from official.legacy.transformer import transformer
from official.projects.unified_detector.utils import typing
from official.projects.unified_detector.utils import utilities
EPSILON = 1e-6
@gin.configurable
def universal_detection_loss_weights(
loss_segmentation_word: float = 1e0,
loss_inst_dist: float = 1e0,
loss_mask_id: float = 1e-4,
loss_pq: float = 3e0,
loss_para: float = 1e0) -> Dict[str, float]:
"""A function that returns a dict for the weights of loss terms."""
return {
"loss_segmentation_word": loss_segmentation_word,
"loss_inst_dist": loss_inst_dist,
"loss_mask_id": loss_mask_id,
"loss_pq": loss_pq,
"loss_para": loss_para,
}
@gin.configurable
class LayerNorm(tf.keras.layers.LayerNormalization):
"""A wrapper to allow passing the `training` argument.
The normalization layers in the MaX-DeepLab implementation are passed with
the `training` argument. This wrapper enables the usage of LayerNorm.
"""
def call(self,
inputs: tf.Tensor,
training: Optional[bool] = None) -> tf.Tensor:
del training
return super().call(inputs)
@gin.configurable
def get_max_deep_lab_backbone(num_slots: int = 128):
return axial_resnet_instances.get_model(
"max_deeplab_s",
bn_layer=LayerNorm,
block_group_config={
"drop_path_schedule": "linear",
"axial_use_recompute_grad": False
},
backbone_use_transformer_beyond_stride=16,
extra_decoder_use_transformer_beyond_stride=16,
num_mask_slots=num_slots,
max_num_mask_slots=num_slots)
@gin.configurable
class UniversalDetector(tf.keras.layers.Layer):
"""Univeral Detector."""
loss_items = ("loss_pq", "loss_inst_dist", "loss_para", "loss_mask_id",
"loss_segmentation_word")
def __init__(self,
backbone_fn: tf.keras.layers.Layer = get_max_deep_lab_backbone,
mask_threshold: float = 0.4,
class_threshold: float = 0.5,
filter_area: float = 32,
**kwargs: Any):
"""Constructor.
Args:
backbone_fn: The function to initialize a backbone.
mask_threshold: Masks are thresholded with this value.
class_threshold: Classification heads are thresholded with this value.
filter_area: In inference, detections with area smaller than this
threshold will be removed.
**kwargs: other keyword arguments passed to the base class.
"""
super().__init__(**kwargs)
# Model
self._backbone_fn = backbone_fn()
self._decoder = _get_decoder_head()
self._class_embed_head, self._para_embed_head = _get_embed_head()
self._para_head, self._para_proj = _get_para_head()
# Losses
# self._max_deeplab_loss = _get_max_deeplab_loss()
self._loss_weights = universal_detection_loss_weights()
# Post-processing
self._mask_threshold = mask_threshold
self._class_threshold = class_threshold
self._filter_area = filter_area
def _preprocess_labels(self, labels: typing.TensorDict):
# Preprocessing
# Converted the integer mask to one-hot embedded masks.
num_instances = utilities.resolve_shape(
labels["instance_labels"]["masks_sizes"])[1]
labels["instance_labels"]["masks"] = tf.one_hot(
labels["instance_labels"]["masks"],
depth=num_instances,
axis=1,
dtype=tf.float32) # (B, N, H, W)
def compute_losses(
self, labels: typing.NestedTensorDict, outputs: typing.NestedTensorDict
) -> Tuple[tf.Tensor, typing.NestedTensorDict]:
"""Computes the loss.
Args:
labels: A dictionary of ground-truth labels.
outputs: Output from self.call().
Returns:
A scalar total loss tensor and a dictionary for individual losses.
"""
loss_dict = {}
self._preprocess_labels(labels)
# Main loss: PQ loss.
_entity_mask_loss(loss_dict, labels["instance_labels"],
outputs["instance_output"])
# Auxiliary loss 1: semantic loss
_semantic_loss(loss_dict, labels["segmentation_output"],
outputs["segmentation_output"])
# Auxiliary loss 2: instance discrimination
_instance_discrimination_loss(loss_dict, labels["instance_labels"], outputs)
# Auxiliary loss 3: mask id
_mask_id_xent_loss(loss_dict, labels["instance_labels"], outputs)
# Auxiliary loss 4: paragraph grouping
_paragraph_grouping_loss(loss_dict, labels, outputs)
weighted_loss = [self._loss_weights[k] * v for k, v in loss_dict.items()]
total_loss = sum(weighted_loss)
return total_loss, loss_dict
def call(self,
features: typing.TensorDict,
training: bool = False) -> typing.NestedTensorDict:
"""Forward pass of the model.
Args:
features: The input features: {"images": tf.Tensor}. Shape = [B, H, W, C]
training: Whether it's training mode.
Returns:
A dictionary of output with this structure:
{
"max_deep_lab": {
All the max deeplab outputs are here, including both backbone and
decoder.
}
"segmentation_output": {
"word_score": tf.Tensor, [B, h, w],
}
"instance_output": {
"cls_logits": tf.Tensor, [B, N, C],
"mask_id_logits": tf.Tensor, [B, H, W, N],
"cls_prob": tf.Tensor, [B, N, C],
"mask_id_prob": tf.Tensor, [B, H, W, N],
}
"postprocessed": {
"classes": A (B, N) tensor for the class ids. Zero for non-firing
slots.
"binary_masks": A (B, H, W, N) tensor for the N binary masks. Masks
for void cls are set to zero.
"confidence": A (B, N) float tensor for the confidence of "classes".
"mask_area": A (B, N) float tensor for the area of each mask.
}
"transformer_group_feature": (B, N, C) float tensor (normalized),
"para_affinity": (B, N, N) float tensor.
}
Class-0 is for void. Class-(C-1) is for background. Class-1~(C-2) is for
valid classes.
"""
# backbone
backbone_output = self._backbone_fn(features["images"], training)
# split instance embedding and paragraph embedding;
# then perform paragraph grouping
para_fts = self._get_para_outputs(backbone_output, training)
affinity = tf.linalg.matmul(para_fts, para_fts, transpose_b=True)
# text detection head
decoder_output = self._decoder(backbone_output, training)
output_dict = {
"max_deep_lab": decoder_output,
"transformer_group_feature": para_fts,
"para_affinity": affinity,
}
input_shape = utilities.resolve_shape(features["images"])
self._get_semantic_outputs(output_dict, input_shape)
self._get_instance_outputs(output_dict, input_shape)
self._postprocess(output_dict)
return output_dict
def _get_para_outputs(self, outputs: typing.TensorDict,
training: bool) -> tf.Tensor:
"""Apply the paragraph head.
This function first splits the features for instance classification and
instance grouping. Then, the additional grouping branch (transformer layers)
is applied to further encode the grouping features. Finally, a tensor of
normalized grouping features is returned.
Args:
outputs: output dictionary from the backbone.
training: training / eval mode mark.
Returns:
The normalized paragraph embedding vector of shape (B, N, C).
"""
# Project the object embeddings into classification feature and grouping
# feature.
fts = outputs["transformer_class_feature"] # B,N,C
class_feature = self._class_embed_head(fts, training)
group_feature = self._para_embed_head(fts, training)
outputs["transformer_class_feature"] = class_feature
outputs["transformer_group_feature"] = group_feature
# Feed the grouping features into additional group encoding branch.
# First we need to build the attention_bias which is used the standard
# transformer encoder.
input_shape = utilities.resolve_shape(group_feature)
b = input_shape[0]
n = int(input_shape[1])
seq_len = tf.constant(n, shape=(b,))
padding_mask = utilities.get_padding_mask_from_valid_lengths(
seq_len, n, tf.float32)
attention_bias = utilities.get_transformer_attention_bias(padding_mask)
group_feature = self._para_proj(
self._para_head(group_feature, attention_bias, None, training))
return tf.math.l2_normalize(group_feature, axis=-1)
def _get_semantic_outputs(self, outputs: typing.NestedTensorDict,
input_shape: tf.TensorShape):
"""Add `segmentation_output` to outputs.
Args:
outputs: A dictionary of outputs.
input_shape: The shape of the input images.
"""
h, w = input_shape[1:3]
# B, H/4, W/4, C
semantic_logits = outputs["max_deep_lab"]["semantic_logits"]
textness, unused_logits = tf.split(semantic_logits, [2, -1], -1)
# Channel[0:2], textness. c0: non-textness, c1: textness.
word_score = tf.nn.softmax(textness, -1, "word_score")[:, :, :, 1:2]
word_score = tf.squeeze(tf.image.resize(word_score, (h, w)), -1)
# Channel[2:] not used yet
outputs["segmentation_output"] = {"word_score": word_score}
def _get_instance_outputs(self, outputs: typing.NestedTensorDict,
input_shape: tf.TensorShape):
"""Add `instance_output` to outputs.
Args:
outputs: A dictionary of outputs.
input_shape: The shape of the input images.
These following fields are added to outputs["instance_output"]:
"cls_logits": tf.Tensor, [B, N, C].
"mask_id_logits": tf.Tensor, [B, H, W, N].
"cls_prob": tf.Tensor, [B, N, C], softmax probability.
"mask_id_prob": tf.Tensor, [B, H, W, N], softmax probability. They are
used in training. Masks are all resized to full resolution.
"""
# Get instance_output
h, w = input_shape[1:3]
## Classes
class_logits = outputs["max_deep_lab"]["transformer_class_logits"]
# The MaX-DeepLab repo uses the last logit for void; but we use 0.
# Therefore we shift the logits here.
class_logits = tf.roll(class_logits, shift=1, axis=-1)
class_prob = tf.nn.softmax(class_logits)
## Masks
mask_id_logits = outputs["max_deep_lab"]["pixel_space_mask_logits"]
mask_id_prob = tf.nn.softmax(mask_id_logits)
mask_id_logits = tf.image.resize(mask_id_logits, (h, w))
mask_id_prob = tf.image.resize(mask_id_prob, (h, w))
outputs["instance_output"] = {
"cls_logits": class_logits,
"mask_id_logits": mask_id_logits,
"cls_prob": class_prob,
"mask_id_prob": mask_id_prob,
}
def _postprocess(self, outputs: typing.NestedTensorDict):
"""Post-process (filtering) the outputs.
Args:
outputs: A dictionary of outputs.
These following fields are added to outputs["postprocessed"]:
"classes": A (B,N) integer tensor for the class ids.
"binary_masks": A (B, H, W, N) tensor for the N binarized 0/1 masks. Masks
for void cls are set to zero.
"confidence": A (B, N) float tensor for the confidence of "classes".
"mask_area": A (B, N) float tensor for the area of each mask. They are
used in inference / visualization.
"""
# Get postprocessed outputs
outputs["postprocessed"] = {}
## Masks:
mask_id_prob = outputs["instance_output"]["mask_id_prob"]
mask_max_prob = tf.reduce_max(mask_id_prob, axis=-1, keepdims=True)
thresholded_binary_masks = tf.cast(
tf.math.logical_and(
tf.equal(mask_max_prob, mask_id_prob),
tf.greater_equal(mask_max_prob, self._mask_threshold)), tf.float32)
area = tf.reduce_sum(thresholded_binary_masks, axis=(1, 2)) # (B, N)
## Classification:
cls_prob = outputs["instance_output"]["cls_prob"]
cls_max_prob = tf.reduce_max(cls_prob, axis=-1) # B, N
cls_max_id = tf.cast(tf.argmax(cls_prob, axis=-1), tf.float32) # B, N
## filtering
c = utilities.resolve_shape(cls_prob)[2]
non_void = tf.reduce_all(
tf.stack(
[
tf.greater_equal(area, self._filter_area), # mask large enough.
tf.not_equal(cls_max_id, 0), # class-0 is for non-object.
tf.not_equal(cls_max_id,
c - 1), # class-(c-1) is for background (last).
tf.greater_equal(cls_max_prob,
self._class_threshold) # prob >= thr
],
axis=-1),
axis=-1)
non_void = tf.cast(non_void, tf.float32)
# Storing
outputs["postprocessed"]["classes"] = tf.cast(cls_max_id * non_void,
tf.int32)
b, n = utilities.resolve_shape(non_void)
outputs["postprocessed"]["binary_masks"] = (
thresholded_binary_masks * tf.reshape(non_void, (b, 1, 1, n)))
outputs["postprocessed"]["confidence"] = cls_max_prob
outputs["postprocessed"]["mask_area"] = area
def _coloring(self, masks: tf.Tensor) -> tf.Tensor:
"""Coloring segmentation masks.
Used in visualization.
Args:
masks: A float binary tensor of shape (B, H, W, N), representing `B`
samples, with `N` masks of size `H*W` each. Each of the `N` masks will
be assigned a random color.
Returns:
A (b, h, w, 3) float tensor in [0., 1.] for the coloring result.
"""
b, h, w, n = utilities.resolve_shape(masks)
palette = tf.random.uniform((1, n, 3), 0.5, 1.)
colored = tf.reshape(
tf.matmul(tf.reshape(masks, (b, -1, n)), palette), (b, h, w, 3))
return colored
def visualize(self,
outputs: typing.NestedTensorDict,
labels: Optional[typing.TensorDict] = None):
"""Visualizes the outputs and labels.
Args:
outputs: A dictionary of outputs.
labels: A dictionary of labels.
The following dict is added to outputs["visualization"]: {
"instance": {
"pred": A (B, H, W, 3) tensor for the visualized map in [0,1].
"gt": A (B, H, W, 3) tensor for the visualized map in [0,1], if labels
is present.
"concat": Concatenation of "prediction" and "gt" along width axis, if
labels is present. }
"seg-text": {... Similar to above, but the shape is (B, H, W, 1).} } All
of these tensors have a rank of 4 (B, H, W, C).
"""
outputs["visualization"] = {}
# 1. prediction
# 1.1 instance mask
binary_masks = outputs["postprocessed"]["binary_masks"]
outputs["visualization"]["instance"] = {
"pred": self._coloring(binary_masks),
}
# 1.2 text-seg
outputs["visualization"]["seg-text"] = {
"pred":
tf.expand_dims(outputs["segmentation_output"]["word_score"], -1),
}
# 2. labels
if labels is not None:
# 2.1 instance mask
# (B, N, H, W) -> (B, H, W, N); the first one is bkg so removed.
gt_masks = tf.transpose(labels["instance_labels"]["masks"][:, 1:],
(0, 2, 3, 1))
outputs["visualization"]["instance"]["gt"] = self._coloring(gt_masks)
# 2.2 text-seg
outputs["visualization"]["seg-text"]["gt"] = tf.expand_dims(
labels["segmentation_output"]["gt_word_score"], -1)
# 3. concat
for v in outputs["visualization"].values():
# Resize to make the size align. The prediction always has stride=1
# resolution, so we make gt align with pred instead of vice versa.
v["concat"] = tf.concat(
[v["pred"],
tf.image.resize(v["gt"],
tf.shape(v["pred"])[1:3])],
axis=2)
@tf.function
def serve(self, image_tensor: tf.Tensor) -> typing.NestedTensorDict:
"""Method to be exported for SavedModel.
Args:
image_tensor: A float32 normalized tensor representing an image of shape
[1, height, width, channels].
Returns:
Dict of output:
classes: (B, N) int32 tensor == o["postprocessed"]["classes"]
masks: (B, H, W, N) float32 tensor == o["postprocessed"]["binary_masks"]
groups: (B, N, N) float32 tensor == o["para_affinity"]
confidence: A (B, N) float tensor == o["postprocessed"]["confidence"]
mask_area: A (B, N) float tensor == o["postprocessed"]["mask_area"]
"""
features = {"images": image_tensor}
nn_outputs = self(features, False)
outputs = {
"classes": nn_outputs["postprocessed"]["classes"],
"masks": nn_outputs["postprocessed"]["binary_masks"],
"confidence": nn_outputs["postprocessed"]["confidence"],
"mask_area": nn_outputs["postprocessed"]["mask_area"],
"groups": nn_outputs["para_affinity"],
}
return outputs
@gin.configurable()
def _get_decoder_head(
atrous_rates: Sequence[int] = (6, 12, 18),
pixel_space_dim: int = 128,
pixel_space_intermediate: int = 256,
low_level: Sequence[Dict[str, Union[str, int]]] = ({
"feature_key": "res3",
"channels_project": 64,
}, {
"feature_key": "res2",
"channels_project": 32,
}),
num_classes=3,
aux_sem_intermediate=256,
norm_fn=tf.keras.layers.BatchNormalization,
) -> max_deeplab_head.MaXDeepLab:
"""Get the MaX-DeepLab prediction head.
Args:
atrous_rates: Dilation rate for astrou conv in the semantic head.
pixel_space_dim: The dimension for the final panoptic features.
pixel_space_intermediate: The dimension for the layer before
`pixel_space_dim` (i.e. the separable 5x5 layer).
low_level: A list of dicts for the feature pyramid in forming the semantic
output. Each dict represents one skip-path from the backbone.
num_classes: Number of classes (entities + bkg) including void. For example,
if we only want to detect word, then `num_classes` = 3 (1 for word, 1 for
bkg, and 1 for void).
aux_sem_intermediate: Similar to `pixel_space_intermediate`, but for the
auxiliary semantic output head.
norm_fn: The normalization function used in the head.
Returns:
A MaX-DeepLab decoder head (as a keras layer).
"""
# Initialize the configs.
configs = config_pb2.ModelOptions()
configs.decoder.feature_key = "feature_semantic"
configs.decoder.atrous_rates.extend(atrous_rates)
configs.max_deeplab.pixel_space_head.output_channels = pixel_space_dim
configs.max_deeplab.pixel_space_head.head_channels = pixel_space_intermediate
for low_level_config in low_level:
low_level_ = configs.max_deeplab.auxiliary_low_level.add()
low_level_.feature_key = low_level_config["feature_key"]
low_level_.channels_project = low_level_config["channels_project"]
configs.max_deeplab.auxiliary_semantic_head.output_channels = num_classes
configs.max_deeplab.auxiliary_semantic_head.head_channels = aux_sem_intermediate
return max_deeplab_head.MaXDeepLab(configs.decoder,
configs.max_deeplab, 0, norm_fn)
class PseudoLayer(tf.keras.layers.Layer):
"""Pseudo layer for ablation study.
The `call()` function has the same argument signature as a transformer
encoder stack. `unused_ph1` and `unused_ph2` are place holders for this
purpose. When studying the effectiveness of using transformer as the
grouping branch, we can use this PseudoLayer to replace the transformer to
use as a no-transformer baseline.
To use a single projection layer instead of transformer, simply set `extra_fc`
to True.
"""
def __init__(self, extra_fc: bool):
super().__init__(name="extra_fc")
self._extra_fc = extra_fc
if extra_fc:
self._layer = tf.keras.Sequential([
tf.keras.layers.Dense(256, activation="relu"),
tf.keras.layers.LayerNormalization(),
])
def call(self,
fts: tf.Tensor,
unused_ph1: Optional[tf.Tensor],
unused_ph2: Optional[tf.Tensor],
training: Optional[bool] = None) -> tf.Tensor:
"""See base class."""
if self._extra_fc:
return self._layer(fts, training)
return fts
@gin.configurable()
def _get_embed_head(
dimension=256,
norm_fn=tf.keras.layers.BatchNormalization
) -> Tuple[tf.keras.Sequential, tf.keras.Sequential]:
"""Projection layers to get instance & grouping features."""
instance_head = tf.keras.Sequential([
tf.keras.layers.Dense(dimension, use_bias=False),
norm_fn(),
tf.keras.layers.ReLU(),
])
grouping_head = tf.keras.Sequential([
tf.keras.layers.Dense(dimension, use_bias=False),
norm_fn(),
tf.keras.layers.ReLU(),
])
return instance_head, grouping_head
@gin.configurable()
def _get_para_head(
dimension=128,
num_layer=3,
extra_fc=False) -> Tuple[tf.keras.layers.Layer, tf.keras.layers.Layer]:
"""Get the additional para head.
Args:
dimension: the dimension of the final output.
num_layer: the number of transformer layer.
extra_fc: Whether an extra single fully-connected layer is used, when
num_layer=0.
Returns:
an encoder and a projection layer for the grouping features.
"""
if num_layer > 0:
encoder = transformer.EncoderStack(
params={
"hidden_size": 256,
"num_hidden_layers": num_layer,
"num_heads": 4,
"filter_size": 512,
"initializer_gain": 1.0,
"attention_dropout": 0.1,
"relu_dropout": 0.1,
"layer_postprocess_dropout": 0.1,
"allow_ffn_pad": True,
})
else:
encoder = PseudoLayer(extra_fc)
dense = tf.keras.layers.Dense(dimension)
return encoder, dense
def _dice_sim(pred: tf.Tensor, ground_truth: tf.Tensor) -> tf.Tensor:
"""Dice Coefficient for mask similarity.
Args:
pred: The predicted mask. [B, N, H, W], in [0, 1].
ground_truth: The ground-truth mask. [B, N, H, W], in [0, 1] or {0, 1}.
Returns:
A matrix for the losses: m[b, i, j] is the dice similarity between pred `i`
and gt `j` in batch `b`.
"""
b, n = utilities.resolve_shape(pred)[:2]
ground_truth = tf.reshape(
tf.transpose(ground_truth, (0, 2, 3, 1)), (b, -1, n)) # B, HW, N
pred = tf.reshape(pred, (b, n, -1)) # B, N, HW
numerator = tf.matmul(pred, ground_truth) * 2.
# TODO(longshangbang): The official implementation does not square the scores.
# Need to do experiment to determine which one is better.
denominator = (
tf.math.reduce_sum(tf.math.square(ground_truth), 1, keepdims=True) +
tf.math.reduce_sum(tf.math.square(pred), 2, keepdims=True))
return (numerator + EPSILON) / (denominator + EPSILON)
def _semantic_loss(
loss_dict: Dict[str, tf.Tensor],
labels: tf.Tensor,
outputs: tf.Tensor,
):
"""Auxiliary semantic loss.
Currently, these losses are added:
(1) text/non-text heatmap
Args:
loss_dict: A dictionary for the loss. The values are loss scalars.
labels: The label dictionary containing:
`gt_word_score`: (B, H, W) tensor for the text/non-text map.
outputs: The output dictionary containing:
`word_score`: (B, H, W) prediction tensor for `gt_word_score`
"""
pred = tf.expand_dims(outputs["word_score"], 1)
gt = tf.expand_dims(labels["gt_word_score"], 1)
loss_dict["loss_segmentation_word"] = 1. - tf.reduce_mean(_dice_sim(pred, gt))
@gin.configurable
def _entity_mask_loss(loss_dict: Dict[str, tf.Tensor],
labels: tf.Tensor,
outputs: tf.Tensor,
alpha: float = gin.REQUIRED):
"""PQ loss for entity-mask training.
This method adds the PQ loss term to loss_dict directly. The match result will
also be stored in outputs (As a [B, N_pred, N_gt] float tensor).
Args:
loss_dict: A dictionary for the loss. The values are loss scalars.
labels: A dict containing: `num_instance` - (B,) `masks` - (B, N, H, W)
`classes` - (B, N)
outputs: A dict containing:
`cls_prob`: (B, N, C)
`mask_id_prob`: (B, H, W, N)
`cls_logits`: (B, N, C)
`mask_id_logits`: (B, H, W, N)
alpha: Weight for pos/neg balance.
"""
# Classification score: (B, N, N)
# in batch b, the probability of prediction i being class of gt j, i.e.:
# score[b, i, j] = pred_cls[b, i, gt_cls[b, j]]
gt_cls = labels["classes"] # (B, N)
pred_cls = outputs["cls_prob"] # (B, N, C)
b, n = utilities.resolve_shape(pred_cls)[:2]
# indices[b, i, j] = gt_cls[b, j]
indices = tf.tile(tf.expand_dims(gt_cls, 1), (1, n, 1))
cls_score = tf.gather(pred_cls, tf.cast(indices, tf.int32), batch_dims=2)
# Mask score (dice): (B, N, N)
# mask_score[b, i, j]: dice-similarity for pred i and gt j in batch b.
mask_score = _dice_sim(
tf.transpose(outputs["mask_id_prob"], (0, 3, 1, 2)), labels["masks"])
# Get similarity matrix and matching.
# padded mask[b, j, i] = -1 << other scores, if i >= num_instance[b]
similarity = cls_score * mask_score
padded_mask = tf.cast(tf.reshape(tf.range(n), (1, 1, n)), tf.float32)
padded_mask = tf.cast(
tf.math.greater_equal(padded_mask,
tf.reshape(labels["num_instance"], (b, 1, 1))),
tf.float32)
# The constant value for padding has no effect.
masked_similarity = similarity * (1. - padded_mask) + padded_mask * (-1.)
matched_mask = matchers_ops.hungarian_matching(-masked_similarity)
matched_mask = tf.cast(matched_mask, tf.float32) * (1 - padded_mask)
outputs["matched_mask"] = matched_mask
# Pos loss
loss_pos = (
tf.stop_gradient(cls_score) * (-mask_score) +
tf.stop_gradient(mask_score) * (-tf.math.log(cls_score)))
loss_pos = tf.reduce_sum(loss_pos * matched_mask, axis=[1, 2]) # (B,)
# Neg loss
matched_pred = tf.cast(tf.reduce_sum(matched_mask, axis=2) > 0,
tf.float32) # (B, N)
# 0 for void class
log_loss = -tf.nn.log_softmax(outputs["cls_logits"])[:, :, 0] # (B, N)
loss_neg = tf.reduce_sum(log_loss * (1. - matched_pred), axis=-1) # (B,)
loss_pq = (alpha * loss_pos + (1 - alpha) * loss_neg) / n
loss_pq = tf.reduce_mean(loss_pq)
loss_dict["loss_pq"] = loss_pq
@gin.configurable
def _instance_discrimination_loss(loss_dict: Dict[str, Any],
labels: Dict[str, Any],
outputs: Dict[str, Any],
tau: float = gin.REQUIRED):
"""Instance discrimination loss.
This method adds the ID loss term to loss_dict directly.
Args:
loss_dict: A dictionary for the loss. The values are loss scalars.
labels: The label dictionary.
outputs: The output dictionary.
tau: The temperature term in the loss
"""
# The normalized feature, shape=(B, H/4, W/4, D)
g = outputs["max_deep_lab"]["pixel_space_normalized_feature"]
b, h, w = utilities.resolve_shape(g)[:3]
# The ground-truth masks, shape=(B, N, H, W) --> (B, N, H/4, W/4)
m = labels["masks"]
m = tf.image.resize(
tf.transpose(m, (0, 2, 3, 1)), (h, w),
tf.image.ResizeMethod.NEAREST_NEIGHBOR)
m = tf.transpose(m, (0, 3, 1, 2))
# The number of ground-truth instance (K), shape=(B,)
num = labels["num_instance"]
n = utilities.resolve_shape(m)[1] # max number of predictions
# is_void[b, i] = 1 if instance i in batch b is a padded slot.
is_void = tf.cast(tf.expand_dims(tf.range(n), 0), tf.float32) # (1, n)
is_void = tf.cast(
tf.math.greater_equal(is_void, tf.expand_dims(num, 1)), tf.float32)
# (B, N, D)
t = tf.math.l2_normalize(tf.einsum("bhwd,bnhw->bnd", g, m), axis=-1)
inst_dist_logits = tf.einsum("bhwd,bid->bhwi", g, t) / tau # (B, H, W, N)
inst_dist_logits = inst_dist_logits - 100. * tf.reshape(is_void, (b, 1, 1, n))
mask_id = tf.cast(
tf.einsum("bnhw,n->bhw", m, tf.range(n, dtype=tf.float32)), tf.int32)
loss_map = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=mask_id, logits=inst_dist_logits) # B, H, W
valid_mask = tf.reduce_sum(m, axis=1)
loss_inst_dist = (
(tf.reduce_sum(loss_map * valid_mask, axis=[1, 2]) + EPSILON) /
(tf.reduce_sum(valid_mask, axis=[1, 2]) + EPSILON))
loss_dict["loss_inst_dist"] = tf.reduce_mean(loss_inst_dist)
@gin.configurable
def _paragraph_grouping_loss(
loss_dict: Dict[str, Any],
labels: Dict[str, Any],
outputs: Dict[str, Any],
tau: float = gin.REQUIRED,
loss_mode="vanilla",
fl_alpha: float = 0.25,
fl_gamma: float = 2.,
):
"""Instance discrimination loss.
This method adds the para discrimination loss term to loss_dict directly.
Args:
loss_dict: A dictionary for the loss. The values are loss scalars.
labels: The label dictionary.
outputs: The output dictionary.
tau: The temperature term in the loss
loss_mode: The type of loss.
fl_alpha: alpha value in focal loss
fl_gamma: gamma value in focal loss
"""
if "paragraph_labels" not in labels:
loss_dict["loss_para"] = 0.
return
# step 1:
# obtain the paragraph labels for each prediction
# (batch, pred, gt)
matched_matrix = outputs["instance_output"]["matched_mask"] # B, N, N
para_label_gt = labels["paragraph_labels"]["paragraph_ids"] # B, N
has_para_label_gt = (
labels["paragraph_labels"]["has_para_ids"][:, tf.newaxis, tf.newaxis])
# '0' means no paragraph labels
pred_label_gt = tf.einsum("bij,bj->bi", matched_matrix,
tf.cast(para_label_gt + 1, tf.float32))
pred_label_gt_pad_col = tf.expand_dims(pred_label_gt, -1) # b,n,1
pred_label_gt_pad_row = tf.expand_dims(pred_label_gt, 1) # b,1,n
gt_affinity = tf.cast(
tf.equal(pred_label_gt_pad_col, pred_label_gt_pad_row), tf.float32)
gt_affinity_mask = (
has_para_label_gt * pred_label_gt_pad_col * pred_label_gt_pad_row)
gt_affinity_mask = tf.cast(tf.not_equal(gt_affinity_mask, 0.), tf.float32)
# step 2:
# get affinity matrix
affinity = outputs["para_affinity"]
# step 3:
# compute loss
loss_fn = tf.keras.losses.BinaryCrossentropy(
from_logits=True,
label_smoothing=0,
axis=-1,
reduction=tf.keras.losses.Reduction.NONE,
name="para_dist")
affinity = tf.reshape(affinity, (-1, 1)) # (b*n*n, 1)
gt_affinity = tf.reshape(gt_affinity, (-1, 1)) # (b*n*n, 1)
gt_affinity_mask = tf.reshape(gt_affinity_mask, (-1,)) # (b*n*n,)
pointwise_loss = loss_fn(gt_affinity, affinity / tau) # (b*n*n,)
if loss_mode == "vanilla":
loss = (
tf.reduce_sum(pointwise_loss * gt_affinity_mask) /
(tf.reduce_sum(gt_affinity_mask) + EPSILON))
elif loss_mode == "balanced":
# pos
pos_mask = gt_affinity_mask * gt_affinity[:, 0]
pos_loss = (
tf.reduce_sum(pointwise_loss * pos_mask) /
(tf.reduce_sum(pos_mask) + EPSILON))
# neg
neg_mask = gt_affinity_mask * (1. - gt_affinity[:, 0])
neg_loss = (
tf.reduce_sum(pointwise_loss * neg_mask) /
(tf.reduce_sum(neg_mask) + EPSILON))
loss = 0.25 * pos_loss + 0.75 * neg_loss
elif loss_mode == "focal":
alpha_wt = fl_alpha * gt_affinity + (1. - fl_alpha) * (1. - gt_affinity)
prob_pos = tf.math.sigmoid(affinity / tau)
pt = prob_pos * gt_affinity + (1. - prob_pos) * (1. - gt_affinity)
fl_loss_pw = tf.stop_gradient(
alpha_wt * tf.pow(1. - pt, fl_gamma))[:, 0] * pointwise_loss
loss = (
tf.reduce_sum(fl_loss_pw * gt_affinity_mask) /
(tf.reduce_sum(gt_affinity_mask) + EPSILON))
else:
raise ValueError(f"Not supported loss mode: {loss_mode}")
loss_dict["loss_para"] = loss
def _mask_id_xent_loss(loss_dict: Dict[str, Any], labels: Dict[str, Any],
outputs: Dict[str, Any]):
"""Mask ID loss.
This method adds the mask ID loss term to loss_dict directly.
Args:
loss_dict: A dictionary for the loss. The values are loss scalars.
labels: The label dictionary.
outputs: The output dictionary.
"""
# (B, N, H, W)
mask_gt = labels["masks"]
# B, H, W, N
mask_id_logits = outputs["instance_output"]["mask_id_logits"]
# B, N, N
matched_matrix = outputs["instance_output"]["matched_mask"]
# B, N
gt_to_pred_id = tf.cast(tf.math.argmax(matched_matrix, axis=1), tf.float32)
# B, H, W
mask_id_labels = tf.cast(
tf.einsum("bnhw,bn->bhw", mask_gt, gt_to_pred_id), tf.int32)
loss_map = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=mask_id_labels, logits=mask_id_logits)
valid_mask = tf.reduce_sum(mask_gt, axis=1)
loss_mask_id = (
(tf.reduce_sum(loss_map * valid_mask, axis=[1, 2]) + EPSILON) /
(tf.reduce_sum(valid_mask, axis=[1, 2]) + EPSILON))
loss_dict["loss_mask_id"] = tf.reduce_mean(loss_mask_id)
| 33,717 | 36.928009 | 82 | py |
models | models-master/official/projects/unified_detector/tasks/ocr_task.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Task definition for ocr."""
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
import gin
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.projects.unified_detector.configs import ocr_config
from official.projects.unified_detector.data_loaders import input_reader
from official.projects.unified_detector.tasks import all_models # pylint: disable=unused-import
from official.projects.unified_detector.utils import typing
NestedTensorDict = typing.NestedTensorDict
ModelType = Union[tf.keras.layers.Layer, tf.keras.Model]
@task_factory.register_task_cls(ocr_config.OcrTaskConfig)
@gin.configurable
class OcrTask(base_task.Task):
"""Defining the OCR training task."""
_loss_items = []
def __init__(self,
params: cfg.TaskConfig,
logging_dir: Optional[str] = None,
name: Optional[str] = None,
model_fn: Callable[..., ModelType] = gin.REQUIRED):
super().__init__(params, logging_dir, name)
self._modef_fn = model_fn
def build_model(self) -> ModelType:
"""Build and return the model, record the loss items as well."""
model = self._modef_fn()
self._loss_items.extend(model.loss_items)
return model
def build_inputs(
self,
params: cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None
) -> tf.data.Dataset:
"""Build the tf.data.Dataset instance."""
return input_reader.InputFn(is_training=params.is_training)({},
input_context)
def build_metrics(self,
training: bool = True) -> Sequence[tf.keras.metrics.Metric]:
"""Build the metrics (currently, only for loss summaries in TensorBoard)."""
del training
metrics = []
# Add loss items
for name in self._loss_items:
metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))
# TODO(longshangbang): add evaluation metrics
return metrics
def train_step(
self,
inputs: Tuple[NestedTensorDict, NestedTensorDict],
model: ModelType,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[Sequence[tf.keras.metrics.Metric]] = None
) -> Dict[str, tf.Tensor]:
features, labels = inputs
input_dict = {"features": features}
if self.task_config.model_call_needs_labels:
input_dict["labels"] = labels
is_mixed_precision = isinstance(optimizer,
tf.keras.mixed_precision.LossScaleOptimizer)
with tf.GradientTape() as tape:
outputs = model(**input_dict, training=True)
loss, loss_dict = model.compute_losses(labels=labels, outputs=outputs)
loss = loss / tf.distribute.get_strategy().num_replicas_in_sync
if is_mixed_precision:
loss = optimizer.get_scaled_loss(loss)
tvars = model.trainable_variables
grads = tape.gradient(loss, tvars)
if is_mixed_precision:
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {"loss": loss}
if metrics:
for m in metrics:
m.update_state(loss_dict[m.name])
return logs
| 3,888 | 34.678899 | 96 | py |
models | models-master/official/projects/unified_detector/tasks/all_models.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Import all models.
All model files are imported here so that they can be referenced in Gin. Also,
importing here avoids making ocr_task.py too messy.
"""
# pylint: disable=unused-import
from official.projects.unified_detector.modeling import universal_detector
| 876 | 35.541667 | 78 | py |
models | models-master/official/projects/mae/optimization.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Customized optimizer to match paper results."""
import dataclasses
from typing import List, Optional
from absl import logging
import tensorflow as tf
from official.modeling import optimization
from official.nlp import optimization as nlp_optimization
@dataclasses.dataclass
class ViTAdamWConfig(optimization.AdamWeightDecayConfig):
layer_decay: Optional[float] = 1.0
vars_substr: Optional[List[str]] = None
layers_idx: Optional[List[int]] = None
@dataclasses.dataclass
class OptimizerConfig(optimization.OptimizerConfig):
vit_adamw: ViTAdamWConfig = dataclasses.field(default_factory=ViTAdamWConfig)
@dataclasses.dataclass
class OptimizationConfig(optimization.OptimizationConfig):
"""Configuration for optimizer and learning rate schedule.
Attributes:
optimizer: optimizer oneof config.
ema: optional exponential moving average optimizer config, if specified, ema
optimizer will be used.
learning_rate: learning rate oneof config.
warmup: warmup oneof config.
"""
optimizer: OptimizerConfig = dataclasses.field(
default_factory=OptimizerConfig
)
# TODO(frederickliu): figure out how to make this configuable.
# TODO(frederickliu): Study if this is needed.
class _ViTAdamW(nlp_optimization.AdamWeightDecay):
"""Custom AdamW to support different lr scaling for backbone.
The code is copied from AdamWeightDecay and Adam with learning scaling.
"""
def __init__(self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
weight_decay_rate=0.0,
include_in_weight_decay=None,
exclude_from_weight_decay=None,
gradient_clip_norm=1.0,
layer_decay=1.0,
vars_substr=None,
layers_idx=None,
name='ViTAdamWeightDecay',
**kwargs):
super(_ViTAdamW,
self).__init__(learning_rate, beta_1, beta_2, epsilon, amsgrad,
weight_decay_rate, include_in_weight_decay,
exclude_from_weight_decay, gradient_clip_norm, name,
**kwargs)
self._layer_decay = layer_decay
self._vars_substr = vars_substr
self._layers_idx = layers_idx
self._max_idx = max(layers_idx) + 1 if layers_idx is not None else 1
def _resource_apply_dense(self, grad, var, apply_state=None):
lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
apply_state = kwargs['apply_state']
if (
self._layer_decay != 1.0
and self._vars_substr is not None
and self._layers_idx is not None
):
is_decayed = False
for var_substr, idx in zip(self._vars_substr, self._layers_idx):
if var_substr in var.name:
decay_factor = self._layer_decay ** (self._max_idx - idx)
lr_t = lr_t * decay_factor
is_decayed = True
logging.debug(
'Applying layer-wise lr decay: %s: %f', var.name, decay_factor)
break
if not is_decayed:
logging.debug('Ignore layer-wise lr decay: %s', var.name)
decay = self._decay_weights_op(var, lr_t, apply_state)
with tf.control_dependencies([decay]):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
lr = coefficients['lr_t']
if (
self._layer_decay != 1.0
and self._vars_substr is not None
and self._layers_idx is not None
):
for var_substr, idx in zip(self._vars_substr, self._layers_idx):
if var_substr in var.name:
lr = lr * (self._layer_decay ** (self._max_idx - idx))
break
if not self.amsgrad:
return tf.raw_ops.ResourceApplyAdam(
var=var.handle,
m=m.handle,
v=v.handle,
beta1_power=coefficients['beta_1_power'],
beta2_power=coefficients['beta_2_power'],
lr=lr,
beta1=coefficients['beta_1_t'],
beta2=coefficients['beta_2_t'],
epsilon=coefficients['epsilon'],
grad=grad,
use_locking=self._use_locking)
else:
vhat = self.get_slot(var, 'vhat')
return tf.raw_ops.ResourceApplyAdamWithAmsgrad(
var=var.handle,
m=m.handle,
v=v.handle,
vhat=vhat.handle,
beta1_power=coefficients['beta_1_power'],
beta2_power=coefficients['beta_2_power'],
lr=lr,
beta1=coefficients['beta_1_t'],
beta2=coefficients['beta_2_t'],
epsilon=coefficients['epsilon'],
grad=grad,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
apply_state = kwargs['apply_state']
if (
self._layer_decay != 1.0
and self._vars_substr is not None
and self._layers_idx is not None
):
is_decayed = False
for var_substr, idx in zip(self._vars_substr, self._layers_idx):
if var_substr in var.name:
decay_factor = self._layer_decay ** (self._max_idx - idx)
lr_t = lr_t * decay_factor
is_decayed = True
logging.debug(
'Applying layer-wise lr decay: %s: %f', var.name, decay_factor)
break
if not is_decayed:
logging.debug('Ignore layer-wise lr decay: %s', var.name)
decay = self._decay_weights_op(var, lr_t, apply_state)
with tf.control_dependencies([decay]):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, 'm')
m_scaled_g_values = grad * coefficients['one_minus_beta_1_t']
m_t = tf.compat.v1.assign(m, m * coefficients['beta_1_t'],
use_locking=self._use_locking)
with tf.control_dependencies([m_t]):
m_t = self._resource_scatter_add(m, indices, m_scaled_g_values)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, 'v')
v_scaled_g_values = (grad * grad) * coefficients['one_minus_beta_2_t']
v_t = tf.compat.v1.assign(v, v * coefficients['beta_2_t'],
use_locking=self._use_locking)
with tf.control_dependencies([v_t]):
v_t = self._resource_scatter_add(v, indices, v_scaled_g_values)
lr = coefficients['lr_t']
if (
self._layer_decay != 1.0
and self._vars_substr is not None
and self._layers_idx is not None
):
for var_substr, idx in zip(self._vars_substr, self._layers_idx):
if var_substr in var.name:
lr = lr * (self._layer_decay ** (self._max_idx - idx))
break
if not self.amsgrad:
v_sqrt = tf.sqrt(v_t)
var_update = tf.compat.v1.assign_sub(
var, lr * m_t / (v_sqrt + coefficients['epsilon']),
use_locking=self._use_locking)
return tf.group(*[var_update, m_t, v_t])
else:
v_hat = self.get_slot(var, 'vhat')
v_hat_t = tf.maximum(v_hat, v_t)
with tf.control_dependencies([v_hat_t]):
v_hat_t = tf.compat.v1.assign(
v_hat, v_hat_t, use_locking=self._use_locking)
v_hat_sqrt = tf.sqrt(v_hat_t)
var_update = tf.compat.v1.assign_sub(
var,
lr* m_t / (v_hat_sqrt + coefficients['epsilon']),
use_locking=self._use_locking)
return tf.group(*[var_update, m_t, v_t, v_hat_t])
optimization.register_optimizer_cls('vit_adamw', _ViTAdamW)
| 8,653 | 37.292035 | 80 | py |
models | models-master/official/projects/mae/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Model Garden Vision training driver, register MAE configs."""
from absl import app
from official.common import flags as tfm_flags
# pylint: disable=unused-import
from official.projects.mae.configs import linear_probe
from official.projects.mae.configs import mae
from official.projects.mae.configs import vit
from official.projects.mae.tasks import image_classification
from official.projects.mae.tasks import linear_probe as linear_probe_task
from official.projects.mae.tasks import masked_ae
# pylint: enable=unused-import
from official.vision import train
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(train.main)
| 1,261 | 37.242424 | 75 | py |
models | models-master/official/projects/mae/configs/mae.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAE configurations."""
import dataclasses
from typing import Tuple
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.vision.configs import image_classification
@dataclasses.dataclass
class MAEConfig(cfg.TaskConfig):
"""The translation task config."""
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(
default_factory=cfg.DataConfig
)
masking_ratio: float = 0.75
patch_h: int = 14
patch_w: int = 14
num_classes: int = 1000
input_size: Tuple[int, int] = (224, 224)
norm_target: bool = False
@exp_factory.register_config_factory('mae_imagenet')
def mae_imagenet() -> cfg.ExperimentConfig:
"""Config to get results that matches the paper."""
train_batch_size = 4096
eval_batch_size = 4096
imagenet_size = 1281167
steps_per_epoch = imagenet_size // train_batch_size
config = cfg.ExperimentConfig(
task=MAEConfig(
train_data=image_classification.DataConfig(
tfds_name='imagenet2012',
tfds_split='train',
is_training=True,
global_batch_size=train_batch_size,
shuffle_buffer_size=10000,
crop_area_range=(0.2, 1.0),
),
validation_data=image_classification.DataConfig(
tfds_name='imagenet2012',
tfds_split='validation',
is_training=False,
global_batch_size=eval_batch_size,
drop_remainder=False,
)
),
trainer=cfg.TrainerConfig(
train_steps=800 * steps_per_epoch,
validation_steps=24,
steps_per_loop=1000,
summary_interval=1000,
checkpoint_interval=1000,
validation_interval=1000,
max_to_keep=5,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
'adamw': {
'beta_2': 0.95,
'weight_decay_rate': 0.05,
# Avoid AdamW legacy behavior.
'gradient_clip_norm':
0.0,
'exclude_from_weight_decay': [
'LayerNorm', 'layer_norm', 'bias']
}
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate':
1.5 * 1e-4 * train_batch_size / 256,
'decay_steps': 800 * steps_per_epoch
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 40 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})
),
restrictions=[
'task.train_data.is_training != None',
])
return config
| 3,688 | 33.157407 | 80 | py |
models | models-master/official/projects/mae/configs/linear_probe.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ViT linear probing configurations."""
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.projects.mae.tasks import linear_probe
from official.vision.configs import image_classification
@exp_factory.register_config_factory('vit_imagenet_mae_linear_probe')
def vit_imagenet_mae_linear_probe() -> cfg.ExperimentConfig:
"""Config to get results that matches the paper."""
train_batch_size = 16384
eval_batch_size = 1024
imagenet_size = 1281167
steps_per_epoch = imagenet_size // train_batch_size
config = cfg.ExperimentConfig(
task=linear_probe.ViTLinearProbeConfig( # pylint: disable=unexpected-keyword-arg
train_data=image_classification.DataConfig(
tfds_name='imagenet2012',
tfds_split='train',
is_training=True,
global_batch_size=train_batch_size,
shuffle_buffer_size=20000,
),
validation_data=image_classification.DataConfig(
tfds_name='imagenet2012',
tfds_split='validation',
is_training=False,
global_batch_size=eval_batch_size,
drop_remainder=False,
aug_rand_hflip=False,
),
init_stochastic_depth_rate=0.0,
init_checkpoint='Please provide',
),
trainer=cfg.TrainerConfig(
train_steps=90 * steps_per_epoch,
validation_steps=48,
steps_per_loop=100,
summary_interval=100,
checkpoint_interval=100,
validation_interval=100,
max_to_keep=1,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'lars',
'lars': {
'weight_decay_rate': 0.0,
'momentum': 0.9,
},
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate': 0.1 * train_batch_size / 256,
'decay_steps': 90 * steps_per_epoch,
},
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 10 * steps_per_epoch,
'warmup_learning_rate': 0,
},
},
}),
),
restrictions=[
'task.train_data.is_training != None',
],
)
return config
| 3,174 | 35.079545 | 87 | py |
models | models-master/official/projects/mae/configs/vit.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ViT configurations."""
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.mae import optimization
from official.projects.mae.tasks import image_classification as vit
from official.vision.configs import common
from official.vision.configs import image_classification
vars_substr = [
'token_layer/cls', 'dense_1/kernel', 'vi_t_classifier/dense',
'encoder/layer_normalization', 'encoder/transformer_encoder_block/'
]
layers_idx = [0, 0, 25, 24, 1]
for i in range(1, 24):
vars_substr.append('encoder/transformer_encoder_block_%s/' % str(i))
layers_idx.append(i + 1)
@exp_factory.register_config_factory('vit_imagenet_mae_finetune')
def vit_imagenet_mae_finetune() -> cfg.ExperimentConfig:
"""Config to get results that matches the paper."""
train_batch_size = 1024
eval_batch_size = 1024
imagenet_size = 1281167
steps_per_epoch = imagenet_size // train_batch_size
config = cfg.ExperimentConfig(
task=vit.ViTConfig(
train_data=image_classification.DataConfig(
tfds_name='imagenet2012',
tfds_split='train',
is_training=True,
global_batch_size=train_batch_size,
shuffle_buffer_size=10000,
aug_type=common.Augmentation(
type='randaug',
randaug=common.RandAugment(
magnitude=9,
magnitude_std=0.5,
exclude_ops=['Cutout', 'Invert'],
),
),
),
validation_data=image_classification.DataConfig(
tfds_name='imagenet2012',
tfds_split='validation',
is_training=False,
global_batch_size=eval_batch_size,
drop_remainder=False,
aug_rand_hflip=False,
),
init_stochastic_depth_rate=0.1,
init_checkpoint='Please provide',
),
trainer=cfg.TrainerConfig(
train_steps=50 * steps_per_epoch,
validation_steps=48,
steps_per_loop=2000,
summary_interval=2000,
checkpoint_interval=2000,
validation_interval=2000,
max_to_keep=1,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'vit_adamw',
'vit_adamw': {
'weight_decay_rate': 0.05,
# Avoid AdamW legacy behavior.
'gradient_clip_norm': 0.0,
'beta_2': 0.999,
'layer_decay': 0.75,
'vars_substr': vars_substr,
'layers_idx': layers_idx,
'exclude_from_weight_decay': ['cls'],
},
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate': 1e-3 * train_batch_size / 256,
'decay_steps': 50 * steps_per_epoch,
},
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0,
},
},
}),
),
restrictions=[
'task.train_data.is_training != None',
],
)
return config
@exp_factory.register_config_factory('vit_imagenet_scratch')
def vit_imagenet_scratch() -> cfg.ExperimentConfig:
"""Config to get results that matches the paper."""
train_batch_size = 4096
eval_batch_size = 1024
imagenet_size = 1281167
steps_per_epoch = imagenet_size // train_batch_size
config = cfg.ExperimentConfig(
task=vit.ViTConfig(
train_data=image_classification.DataConfig(
tfds_name='imagenet2012',
tfds_split='train',
is_training=True,
global_batch_size=train_batch_size,
shuffle_buffer_size=10000,
aug_type=common.Augmentation(
type='randaug',
randaug=common.RandAugment(
magnitude=9,
magnitude_std=0.5,
exclude_ops=['Cutout', 'Invert'])
)
),
validation_data=image_classification.DataConfig(
tfds_name='imagenet2012',
tfds_split='validation',
is_training=False,
global_batch_size=eval_batch_size,
drop_remainder=False,
aug_rand_hflip=False,
)
),
trainer=cfg.TrainerConfig(
train_steps=200 * steps_per_epoch,
validation_steps=48,
steps_per_loop=1000,
summary_interval=1000,
checkpoint_interval=1000,
validation_interval=1000,
max_to_keep=1,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'vit_adamw',
'vit_adamw': {
'weight_decay_rate': 0.3,
# Avoid AdamW legacy behavior.
'gradient_clip_norm': 0.0,
'beta_2': 0.95,
'exclude_from_weight_decay': ['cls']
}
},
'ema': {
'average_decay': 0.9999,
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate':
1e-4 * train_batch_size / 256,
'decay_steps': 200 * steps_per_epoch
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 20 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})
),
restrictions=[
'task.train_data.is_training != None',
])
return config
| 6,741 | 34.114583 | 77 | py |
models | models-master/official/projects/mae/modeling/utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for MAE."""
import math
import tensorflow as tf
from official.modeling import tf_utils
# TODO(frederickliu): Move this to vision ops and add tests.
def position_embedding_sine(attention_mask,
num_pos_features=256,
temperature=10000.,
normalize=True,
scale=2 * math.pi):
"""Sine-based positional embeddings for 2D images.
Args:
attention_mask: a `bool` Tensor specifying the size of the input image to
the Transformer and which elements are padded, of size [batch_size,
height, width]
num_pos_features: a `int` specifying the number of positional features,
should be equal to the hidden size of the Transformer network
temperature: a `float` specifying the temperature of the positional
embedding. Any type that is converted to a `float` can also be accepted.
normalize: a `bool` determining whether the positional embeddings should be
normalized between [0, scale] before application of the sine and cos
functions.
scale: a `float` if normalize is True specifying the scale embeddings before
application of the embedding function.
Returns:
embeddings: a `float` tensor of the same shape as input_tensor specifying
the positional embeddings based on sine features.
"""
if num_pos_features % 2 != 0:
raise ValueError(
"Number of embedding features (num_pos_features) must be even when "
"column and row embeddings are concatenated.")
num_pos_features = num_pos_features // 2
# Produce row and column embeddings based on total size of the image
# <tf.float>[batch_size, height, width]
attention_mask = tf.cast(attention_mask, tf.float32)
row_embedding = tf.cumsum(attention_mask, 1)
col_embedding = tf.cumsum(attention_mask, 2)
if normalize:
eps = 1e-6
row_embedding = row_embedding / (row_embedding[:, -1:, :] + eps) * scale
col_embedding = col_embedding / (col_embedding[:, :, -1:] + eps) * scale
dim_t = tf.range(num_pos_features, dtype=row_embedding.dtype)
dim_t = tf.pow(temperature, 2 * (dim_t // 2) / num_pos_features)
# Creates positional embeddings for each row and column position
# <tf.float>[batch_size, height, width, num_pos_features]
pos_row = tf.expand_dims(row_embedding, -1) / dim_t
pos_col = tf.expand_dims(col_embedding, -1) / dim_t
pos_row = tf.stack(
[tf.sin(pos_row[:, :, :, 0::2]),
tf.cos(pos_row[:, :, :, 1::2])], axis=4)
pos_col = tf.stack(
[tf.sin(pos_col[:, :, :, 0::2]),
tf.cos(pos_col[:, :, :, 1::2])], axis=4)
# final_shape = pos_row.shape.as_list()[:3] + [-1]
final_shape = tf_utils.get_shape_list(pos_row)[:3] + [-1]
pos_row = tf.reshape(pos_row, final_shape)
pos_col = tf.reshape(pos_col, final_shape)
output = tf.concat([pos_row, pos_col], -1)
embeddings = tf.cast(output, tf.float32)
return embeddings
| 3,560 | 39.931034 | 80 | py |
models | models-master/official/projects/mae/modeling/vit.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for ViT."""
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.mae.modeling import utils
from official.vision.modeling.backbones import vit
def to_patch(images, patch_height, patch_width):
"""Image (NHWC) to patches (N(H' W')(patch_height patch_width c))."""
batch_size, h, w, c = tf_utils.get_shape_list(images)
num_h = h // patch_height
num_w = w // patch_width
x = tf.reshape(images,
(batch_size, num_h, patch_height, num_w, patch_width, c))
x = tf.einsum('nhpwqc->nhwpqc', x)
x = tf.reshape(x, (batch_size, num_h, num_w, patch_height * patch_width * c))
return x
class ViTClassifier(tf.keras.Model):
"""ViT classifier for finetune."""
def __init__(self, encoder, num_classes, **kwargs):
super().__init__(**kwargs)
self.encoder = encoder
self.linear = tf.keras.layers.Dense(
num_classes,
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=2e-5))
def call(self, inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
encoded = self.encoder({'images': inputs})
return self.linear(encoded[:, 0])
class ViTLinearClassifier(tf.keras.Model):
"""ViT classifier for linear probing."""
def __init__(self, encoder, num_classes, use_sync_bn=True, **kwargs):
super().__init__(**kwargs)
self.encoder = encoder
self.linear = tf.keras.layers.Dense(
num_classes,
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.01))
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
self.batch_norm = self._norm(
axis=-1, epsilon=1e-6, center=False, scale=False, momentum=0.9)
def call(self, inputs, training=False): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
encoded = self.encoder({'images': inputs})
features = self.batch_norm(encoded[:, 0], training=training)
return self.linear(features)
class VisionTransformer(tf.keras.Model):
"""ViT backbone."""
def __init__(self,
patch_h,
patch_w,
init_stochastic_depth_rate=0.0,
**kwargs):
super().__init__(**kwargs)
self.patch_h = patch_h
self.patch_w = patch_w
self.init_stochastic_depth_rate = init_stochastic_depth_rate
def build(self, input_shape):
self.patch_to_embed = tf.keras.layers.Dense(1024)
# ViT-L
self.encoder = vit.Encoder(
num_layers=24,
mlp_dim=4096,
num_heads=16,
dropout_rate=0.0,
attention_dropout_rate=0.0,
init_stochastic_depth_rate=self.init_stochastic_depth_rate,
add_pos_embed=False,
)
self.token_cls = vit.TokenLayer()
super().build(input_shape)
def to_embed(self, patches):
return self.patch_to_embed(patches)
def insert_cls(self, patch_embeds):
return self.token_cls(patch_embeds)
def add_position_embed(self, patch_embeds):
return patch_embeds + utils.position_embedding_sine(
tf.ones_like(patch_embeds[..., 0]), 1024, normalize=False)
def call(self, inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
if isinstance(inputs, dict):
images = inputs.get('images', None)
patch_embeds = inputs.get('embeddings', None)
else:
raise ValueError('Unexpected inputs type to %s.' % self.__class__)
if images is not None:
patches = to_patch(images, self.patch_h, self.patch_w)
patch_embeds = self.to_embed(patches)
patch_shape = tf.shape(patch_embeds)
patch_embeds = self.add_position_embed(patch_embeds)
patch_embeds = tf.reshape(patch_embeds,
(patch_shape[0], -1, patch_shape[-1]))
patch_embeds = self.insert_cls(patch_embeds)
return self.encoder(patch_embeds)
| 4,523 | 34.904762 | 116 | py |
models | models-master/official/projects/mae/modeling/masked_ae.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for MAE."""
import tensorflow as tf
from official.projects.mae.modeling import utils
from official.vision.modeling.backbones import vit
class MaskedAE(tf.keras.Model):
"""MAE model."""
def __init__(self,
encoder,
name=None,
**kwargs):
super(MaskedAE, self).__init__(name=name, **kwargs)
self.encoder = encoder
self.pixels_per_patch = self.encoder.patch_h * self.encoder.patch_w * 3
def build(self, input_shape):
self.decoder = vit.Encoder(
num_layers=8,
mlp_dim=2048,
num_heads=16,
dropout_rate=0.0,
attention_dropout_rate=0.0,
add_pos_embed=False
)
self.mask = self.add_weight(
'mask', (1, 1, 512),
initializer=tf.keras.initializers.RandomNormal(stddev=0.02))
self.to_pixels = tf.keras.layers.Dense(self.pixels_per_patch)
self.linear = tf.keras.layers.Dense(512)
super().build(input_shape)
def add_position_embed(self, patch_embeds, num_rows, num_cols):
# patch_embeds is 1d (N, 1+H*W, D) with cls token.
shape = tf.shape(patch_embeds)
position_embedding = utils.position_embedding_sine(
tf.ones((shape[0], num_rows, num_cols), dtype=patch_embeds.dtype),
512, normalize=False)
position_embedding = tf.reshape(
position_embedding, (shape[0], num_rows * num_cols, -1))
return patch_embeds + tf.concat(
[tf.zeros((shape[0], 1, shape[2]), dtype=patch_embeds.dtype),
position_embedding
], axis=1)
def call(self, inputs, training=None, masking=None):
patches = inputs['patches']
masked_indices = tf.cast(inputs['masked_indices'], tf.int32)
unmasked_indices = tf.cast(inputs['unmasked_indices'], tf.int32)
batch_size = tf.shape(patches)[0]
num_h_patches = tf.shape(patches)[1]
num_w_patches = tf.shape(patches)[2]
num_patches = num_h_patches * num_w_patches
num_masks = tf.shape(masked_indices)[1]
patch_embeds = self.encoder.to_embed(patches)
patch_embeds = self.encoder.add_position_embed(patch_embeds)
patch_embeds = tf.reshape(
patch_embeds,
(batch_size, num_patches, -1))
patch_embeds = self.encoder.insert_cls(patch_embeds)
unmasked_indices = tf.concat(
[tf.zeros((batch_size, 1), unmasked_indices.dtype),
unmasked_indices + 1],
axis=1)
masked_indices = masked_indices + 1
unmasked_patch_embeds = tf.gather(
patch_embeds, unmasked_indices, batch_dims=1)
encoded = self.encoder({'embeddings': unmasked_patch_embeds})
encoded = self.linear(encoded)
zeros = tf.zeros((batch_size, num_patches + 1, 512))
unmasked_embed = tf.tensor_scatter_nd_add(
zeros,
tf.stack([
tf.tile(
tf.expand_dims(tf.range(batch_size), axis=1),
[1, num_patches + 1 - num_masks]), unmasked_indices
],
axis=-1),
encoded)
mask_embeds = tf.tile(self.mask, [batch_size, num_masks, 1])
full_embed = tf.tensor_scatter_nd_add(
unmasked_embed,
tf.stack([
tf.tile(
tf.expand_dims(tf.range(batch_size), axis=1),
[1, num_masks]), masked_indices
],
axis=-1),
mask_embeds)
full_embed = self.add_position_embed(
full_embed, num_h_patches, num_w_patches)
decoded = self.decoder(full_embed)
pred_pixel_values = self.to_pixels(
tf.gather(decoded, masked_indices, batch_dims=1))
return pred_pixel_values
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(encoder=self.encoder)
return items
| 4,338 | 34.276423 | 75 | py |
models | models-master/official/projects/mae/tasks/linear_probe.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image classification task with ViT and linear probe."""
import dataclasses
from typing import Optional
import tensorflow as tf
from official.core import base_task
from official.core import input_reader
from official.core import task_factory
from official.projects.mae.modeling import vit
from official.projects.mae.tasks import image_classification
from official.vision.dataloaders import classification_input
from official.vision.dataloaders import tfds_factory
@dataclasses.dataclass
class ViTLinearProbeConfig(image_classification.ViTConfig):
"""The LinearProbe task config."""
@task_factory.register_task_cls(ViTLinearProbeConfig)
class ViTLinearProbeTask(base_task.Task):
"""Image classificaiton with ViT and load checkpoint if exists."""
def build_model(self) -> tf.keras.Model:
encoder = vit.VisionTransformer(
self.task_config.patch_h,
self.task_config.patch_w,
self.task_config.init_stochastic_depth_rate,
)
# Freeze backbone.
encoder.trainable = False
model = vit.ViTLinearClassifier(encoder, self.task_config.num_classes)
model(tf.ones((1, 224, 224, 3)))
return model
def build_inputs(
self, params, input_context: Optional[tf.distribute.InputContext] = None
):
num_classes = self.task_config.num_classes
input_size = self.task_config.input_size
image_field_key = self.task_config.train_data.image_field_key
label_field_key = self.task_config.train_data.label_field_key
decoder = tfds_factory.get_classification_decoder(params.tfds_name)
parser = classification_input.Parser(
output_size=input_size[:2],
num_classes=num_classes,
image_field_key=image_field_key,
label_field_key=label_field_key,
decode_jpeg_only=params.decode_jpeg_only,
aug_rand_hflip=params.aug_rand_hflip,
aug_type=params.aug_type,
color_jitter=params.color_jitter,
random_erasing=params.random_erasing,
dtype=params.dtype,
)
postprocess_fn = lambda images, labels: ( # pylint:disable=g-long-lambda
images,
tf.one_hot(labels, num_classes),
)
reader = input_reader.InputReader(
params=params,
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training),
postprocess_fn=postprocess_fn,
)
dataset = reader.read(input_context=input_context)
return dataset
def initialize(self, model: tf.keras.Model):
"""Load encoder if checkpoint exists.
Args:
model: The keras.Model built or used by this task.
"""
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
if not ckpt_dir_or_file:
return
checkpoint_items = dict(encoder=model.encoder)
ckpt = tf.train.Checkpoint(**checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
def build_metrics(self, training=None):
del training
metrics = [
tf.keras.metrics.CategoricalAccuracy(name='accuracy'),
]
return metrics
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
return tf.keras.losses.categorical_crossentropy(
labels, model_outputs, from_logits=True
)
| 3,952 | 33.373913 | 78 | py |
models | models-master/official/projects/mae/tasks/masked_ae.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Task for masked autoencoder pretraining."""
from typing import Optional
import tensorflow as tf
from official.core import base_task
from official.core import input_reader
from official.core import task_factory
from official.modeling import tf_utils
from official.projects.mae.configs import mae as mae_cfg
from official.projects.mae.modeling import masked_ae
from official.projects.mae.modeling import vit
from official.vision.dataloaders import classification_input
from official.vision.dataloaders import tfds_factory
@task_factory.register_task_cls(mae_cfg.MAEConfig)
class MaskedAETask(base_task.Task):
"""Task for masked autoencoder training."""
def build_model(self) -> tf.keras.Model:
encoder = vit.VisionTransformer(
self.task_config.patch_h,
self.task_config.patch_w,
0.0)
# trigger build to be called.
input_size = self.task_config.input_size
encoder({'images': tf.ones((1, input_size[0], input_size[1], 3))})
model = masked_ae.MaskedAE(encoder)
return model
def build_inputs(self,
params,
input_context: Optional[tf.distribute.InputContext] = None):
num_classes = self.task_config.num_classes
input_size = self.task_config.input_size
image_field_key = self.task_config.train_data.image_field_key
label_field_key = self.task_config.train_data.label_field_key
decoder = tfds_factory.get_classification_decoder(params.tfds_name)
parser = classification_input.Parser(
output_size=input_size[:2],
num_classes=num_classes,
image_field_key=image_field_key,
label_field_key=label_field_key,
decode_jpeg_only=params.decode_jpeg_only,
aug_rand_hflip=params.aug_rand_hflip,
aug_type=params.aug_type,
color_jitter=params.color_jitter,
random_erasing=params.random_erasing,
dtype=params.dtype,
crop_area_range=params.crop_area_range)
def patch_and_mask(images, labels):
del labels
patches = vit.to_patch(
images, self.task_config.patch_h, self.task_config.patch_w)
batch_size, num_h_patches, num_w_patches = tf_utils.get_shape_list(
patches)[:3]
num_patches = num_h_patches * num_w_patches
num_masked = tf.cast(
self.task_config.masking_ratio * num_patches, dtype=tf.int32)
r = tf.random.uniform((batch_size, num_patches))
rand_indices = tf.argsort(r)
masked_indices = rand_indices[:, :num_masked]
unmasked_indices = rand_indices[:, num_masked:]
patches_1d = tf.reshape(patches, (batch_size, num_patches, -1))
masked_patches = tf.gather(patches_1d, masked_indices, batch_dims=1)
if self.task_config.norm_target:
mean = tf.reduce_mean(masked_patches, axis=-1, keepdims=True)
var = tf.math.reduce_variance(masked_patches, axis=-1, keepdims=True)
std = (var + 1.e-6)**.5
masked_patches = (masked_patches - mean) / std
return {'patches': patches,
'masked_indices': masked_indices,
'unmasked_indices': unmasked_indices}, masked_patches
reader = input_reader.InputReader(
params=params,
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training),
postprocess_fn=patch_and_mask)
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
return tf.keras.metrics.mean_squared_error(
labels, model_outputs)
| 4,154 | 37.831776 | 79 | py |
models | models-master/official/projects/mae/tasks/masked_ae_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for masked_ae."""
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from official.modeling import optimization
from official.projects.mae.configs import mae as mae_cfg
from official.projects.mae.tasks import masked_ae
from official.vision.configs import image_classification
_NUM_EXAMPLES = 10
def _gen_fn():
np.random.seed(0) # Some seed may cause jpeg decoding to fail.
h = np.random.randint(0, 300)
w = np.random.randint(0, 300)
return {
'image': np.ones(shape=(h, w, 3), dtype=np.uint8),
'label': np.random.randint(0, 100),
'file_name': 'test',
}
def _as_dataset(self, *args, **kwargs):
del args
del kwargs
return tf.data.Dataset.from_generator(
lambda: (_gen_fn() for i in range(_NUM_EXAMPLES)),
output_types=self.info.features.dtype,
output_shapes=self.info.features.shape,
)
class MAETest(tf.test.TestCase):
def test_train_step(self):
config = mae_cfg.MAEConfig(
train_data=image_classification.DataConfig(
tfds_name='imagenet2012',
tfds_split='validation',
is_training=True,
global_batch_size=2,
))
with tfds.testing.mock_data(as_dataset_fn=_as_dataset):
task = masked_ae.MaskedAETask(config)
model = task.build_model()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
opt_cfg = optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
'adamw': {
'weight_decay_rate': 0.05,
# Avoid AdamW legacy behavior.
'gradient_clip_norm': 0.0
}
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate': 1.5 * 1e-4,
'decay_steps': 5
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 1,
'warmup_learning_rate': 0
}
}
})
optimizer = masked_ae.MaskedAETask.create_optimizer(opt_cfg)
task.train_step(next(iterator), model, optimizer)
if __name__ == '__main__':
tf.test.main()
| 2,881 | 29.020833 | 74 | py |
models | models-master/official/projects/mae/tasks/image_classification_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for image_classification."""
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from official.modeling import optimization
from official.projects.mae.tasks import image_classification as vit_cls
from official.vision.configs import image_classification
_NUM_EXAMPLES = 10
def _gen_fn():
h = np.random.randint(0, 300)
w = np.random.randint(0, 300)
return {
'image': np.ones(shape=(h, w, 3), dtype=np.uint8),
'label': np.random.randint(0, 100),
'file_name': 'test',
}
def _as_dataset(self, *args, **kwargs):
del args
del kwargs
return tf.data.Dataset.from_generator(
lambda: (_gen_fn() for i in range(_NUM_EXAMPLES)),
output_types=self.info.features.dtype,
output_shapes=self.info.features.shape,
)
class ImageClassificationTest(tf.test.TestCase):
def test_train_step(self):
config = vit_cls.ViTConfig(
num_classes=1000,
train_data=image_classification.DataConfig(
tfds_name='imagenet2012',
tfds_split='validation',
is_training=True,
global_batch_size=2,
),
)
with tfds.testing.mock_data(as_dataset_fn=_as_dataset):
task = vit_cls.ViTClassificationTask(config)
model = task.build_model()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
opt_cfg = optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
'adamw': {
'weight_decay_rate': 0.05,
# Avoid AdamW legacy behavior.
'gradient_clip_norm': 0.0
}
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate': 1.5 * 1e-4,
'decay_steps': 5
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 1,
'warmup_learning_rate': 0
}
}
})
optimizer = vit_cls.ViTClassificationTask.create_optimizer(opt_cfg)
task.train_step(next(iterator), model, optimizer)
if __name__ == '__main__':
tf.test.main()
| 2,853 | 28.729167 | 74 | py |
models | models-master/official/projects/mae/tasks/image_classification.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image classification task with ViT."""
import dataclasses
from typing import Optional, Tuple
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import input_reader
from official.core import task_factory
from official.projects.mae.modeling import vit
from official.vision.dataloaders import classification_input
from official.vision.dataloaders import tfds_factory
from official.vision.ops import augment
@dataclasses.dataclass
class ViTConfig(cfg.TaskConfig):
"""The translation task config."""
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(
default_factory=cfg.DataConfig
)
patch_h: int = 14
patch_w: int = 14
num_classes: int = 1000
input_size: Tuple[int, int] = (224, 224)
init_stochastic_depth_rate: float = 0.2
@task_factory.register_task_cls(ViTConfig)
class ViTClassificationTask(base_task.Task):
"""Image classificaiton with ViT and load checkpoint if exists."""
def build_model(self) -> tf.keras.Model:
encoder = vit.VisionTransformer(
self.task_config.patch_h,
self.task_config.patch_w,
self.task_config.init_stochastic_depth_rate)
model = vit.ViTClassifier(encoder, self.task_config.num_classes)
model(tf.ones((1, 224, 224, 3)))
return model
def build_inputs(self,
params,
input_context: Optional[tf.distribute.InputContext] = None):
num_classes = self.task_config.num_classes
input_size = self.task_config.input_size
image_field_key = self.task_config.train_data.image_field_key
label_field_key = self.task_config.train_data.label_field_key
decoder = tfds_factory.get_classification_decoder(params.tfds_name)
parser = classification_input.Parser(
output_size=input_size[:2],
num_classes=num_classes,
image_field_key=image_field_key,
label_field_key=label_field_key,
decode_jpeg_only=params.decode_jpeg_only,
aug_rand_hflip=params.aug_rand_hflip,
aug_type=params.aug_type,
color_jitter=params.color_jitter,
random_erasing=params.random_erasing,
dtype=params.dtype)
if params.is_training:
postprocess_fn = augment.MixupAndCutmix(
mixup_alpha=0.8,
cutmix_alpha=1.0,
prob=1.0 if params.is_training else 0.0,
label_smoothing=0.1,
num_classes=num_classes)
else:
postprocess_fn = lambda images, labels: ( # pylint:disable=g-long-lambda
images, tf.one_hot(labels, num_classes))
reader = input_reader.InputReader(
params=params,
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training),
postprocess_fn=postprocess_fn)
dataset = reader.read(input_context=input_context)
return dataset
def initialize(self, model: tf.keras.Model):
"""Load encoder if checkpoint exists.
Args:
model: The keras.Model built or used by this task.
"""
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
if not ckpt_dir_or_file:
return
checkpoint_items = dict(encoder=model.encoder)
ckpt = tf.train.Checkpoint(**checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
def build_metrics(self, training=None):
del training
metrics = [
tf.keras.metrics.CategoricalAccuracy(name='accuracy'),
]
return metrics
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
return tf.keras.losses.categorical_crossentropy(
labels,
model_outputs,
from_logits=True)
| 4,476 | 33.705426 | 80 | py |
models | models-master/official/projects/mae/tasks/linear_probe_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for image_classification."""
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from official.modeling import optimization
from official.projects.mae.tasks import linear_probe
from official.vision.configs import image_classification
_NUM_EXAMPLES = 10
def _gen_fn():
h = np.random.randint(0, 300)
w = np.random.randint(0, 300)
return {
'image': np.ones(shape=(h, w, 3), dtype=np.uint8),
'label': np.random.randint(0, 100),
'file_name': 'test',
}
def _as_dataset(self, *args, **kwargs):
del args
del kwargs
return tf.data.Dataset.from_generator(
lambda: (_gen_fn() for i in range(_NUM_EXAMPLES)),
output_types=self.info.features.dtype,
output_shapes=self.info.features.shape,
)
class ImageClassificationTest(tf.test.TestCase):
def test_train_step(self):
config = linear_probe.ViTLinearProbeConfig(
num_classes=1000,
train_data=image_classification.DataConfig(
tfds_name='imagenet2012',
tfds_split='validation',
is_training=True,
global_batch_size=2,
))
with tfds.testing.mock_data(as_dataset_fn=_as_dataset):
task = linear_probe.ViTLinearProbeTask(config)
model = task.build_model()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
opt_cfg = optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
'adamw': {
'weight_decay_rate': 0.05,
# Avoid AdamW legacy behavior.
'gradient_clip_norm': 0.0
}
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate': 1.5 * 1e-4,
'decay_steps': 5
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 1,
'warmup_learning_rate': 0
}
}
})
optimizer = linear_probe.ViTLinearProbeTask.create_optimizer(opt_cfg)
task.train_step(next(iterator), model, optimizer)
if __name__ == '__main__':
tf.test.main()
| 2,848 | 28.989474 | 75 | py |
models | models-master/official/projects/longformer/longformer_encoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Longformer encoder. Modified From huggingface/transformers."""
# pylint: disable=g-classes-have-attributes
from typing import Any, Callable, List, Optional, Union
from absl import logging
import tensorflow as tf
from official.modeling.tf_utils import get_shape_list
from official.nlp.modeling import layers
from official.projects.longformer.longformer_encoder_block import LongformerEncoderBlock
_Initializer = Union[str, tf.keras.initializers.Initializer]
_approx_gelu = lambda x: tf.keras.activations.gelu(x, approximate=True)
class LongformerEncoder(tf.keras.layers.Layer):
"""LongformerEncoder.
Args:
vocab_size: The size of the token vocabulary.
attention_window: list of ints representing the window size for each layer.
global_attention_size: the size of global attention used for each token.
pad_token_id: the token id for the pad token
hidden_size: The size of the transformer hidden layers.
num_layers: The number of transformer layers.
num_attention_heads: The number of attention heads for each transformer. The
hidden size must be divisible by the number of attention heads.
max_sequence_length: The maximum sequence length that this encoder can
consume. If None, max_sequence_length uses the value from sequence length.
This determines the variable shape for positional embeddings.
type_vocab_size: The number of types that the 'type_ids' input can take.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network for each transformer.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network for each transformer.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: The dropout rate to use for the attention layers within
the transformer layers.
initializer: The initialzer to use for all weights in this encoder.
output_range: The sequence output range, [0, output_range), by slicing the
target sequence of the last transformer layer. `None` means the entire
target sequence will attend to the source sequence, which yields the full
output.
embedding_width: The width of the word embeddings. If the embedding width is
not equal to hidden size, embedding parameters will be factorized into two
matrices in the shape of ['vocab_size', 'embedding_width'] and
['embedding_width', 'hidden_size'] ('embedding_width' is usually much
smaller than 'hidden_size').
embedding_layer: An optional Layer instance which will be called to generate
embeddings for the input word IDs.
norm_first: Whether to normalize inputs to attention and intermediate dense
layers. If set False, output of attention and intermediate dense layers is
normalized.
"""
def __init__(
self,
vocab_size: int,
attention_window: Union[List[int], int] = 512,
global_attention_size: int = 0,
pad_token_id: int = 1,
hidden_size: int = 768,
num_layers: int = 12,
num_attention_heads: int = 12,
max_sequence_length: int = 512,
type_vocab_size: int = 16,
inner_dim: int = 3072,
inner_activation: Callable[..., Any] = _approx_gelu,
output_dropout: float = 0.1,
attention_dropout: float = 0.1,
initializer: _Initializer = tf.keras.initializers.TruncatedNormal(
stddev=0.02),
output_range: Optional[int] = None,
embedding_width: Optional[int] = None,
embedding_layer: Optional[tf.keras.layers.Layer] = None,
norm_first: bool = False,
**kwargs):
super().__init__(**kwargs)
# Longformer args
self._attention_window = attention_window
self._global_attention_size = global_attention_size
self._pad_token_id = pad_token_id
activation = tf.keras.activations.get(inner_activation)
initializer = tf.keras.initializers.get(initializer)
if embedding_width is None:
embedding_width = hidden_size
if embedding_layer is None:
self._embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=initializer,
name='word_embeddings')
else:
self._embedding_layer = embedding_layer
self._position_embedding_layer = layers.PositionEmbedding(
initializer=initializer,
max_length=max_sequence_length,
name='position_embedding')
self._type_embedding_layer = layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=initializer,
use_one_hot=True,
name='type_embeddings')
self._embedding_norm_layer = tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)
self._embedding_dropout = tf.keras.layers.Dropout(
rate=output_dropout, name='embedding_dropout')
# We project the 'embedding' output to 'hidden_size' if it is not already
# 'hidden_size'.
self._embedding_projection = None
if embedding_width != hidden_size:
self._embedding_projection = tf.keras.layers.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
kernel_initializer=initializer,
name='embedding_projection')
self._transformer_layers = []
self._attention_mask_layer = layers.SelfAttentionMask(
name='self_attention_mask')
for i in range(num_layers):
layer = LongformerEncoderBlock(
global_attention_size=global_attention_size,
num_attention_heads=num_attention_heads,
inner_dim=inner_dim,
inner_activation=inner_activation,
attention_window=attention_window[i],
layer_id=i,
output_dropout=output_dropout,
attention_dropout=attention_dropout,
norm_first=norm_first,
output_range=output_range if i == num_layers - 1 else None,
kernel_initializer=initializer,
name=f'transformer/layer_{i}')
self._transformer_layers.append(layer)
self._pooler_layer = tf.keras.layers.Dense(
units=hidden_size,
activation='tanh',
kernel_initializer=initializer,
name='pooler_transform')
self._config = {
'vocab_size': vocab_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'num_attention_heads': num_attention_heads,
'max_sequence_length': max_sequence_length,
'type_vocab_size': type_vocab_size,
'inner_dim': inner_dim,
'inner_activation': tf.keras.activations.serialize(activation),
'output_dropout': output_dropout,
'attention_dropout': attention_dropout,
'initializer': tf.keras.initializers.serialize(initializer),
'output_range': output_range,
'embedding_width': embedding_width,
'embedding_layer': embedding_layer,
'norm_first': norm_first,
'attention_window': attention_window,
'global_attention_size': global_attention_size,
'pad_token_id': pad_token_id,
}
self.inputs = dict(
input_word_ids=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_mask=tf.keras.Input(shape=(None,), dtype=tf.int32),
input_type_ids=tf.keras.Input(shape=(None,), dtype=tf.int32))
def call(self, inputs):
word_embeddings = None
if isinstance(inputs, dict):
word_ids = inputs.get('input_word_ids') # input_ids
mask = inputs.get('input_mask') # attention_mask
type_ids = inputs.get('input_type_ids') # token_type_ids
word_embeddings = inputs.get('input_word_embeddings',
None) # input_embeds
else:
raise ValueError(f'Unexpected inputs type to {self.__class__}.')
(
padding_len,
word_ids,
mask,
type_ids,
word_embeddings,
) = self._pad_to_window_size(
word_ids=word_ids,
mask=mask,
type_ids=type_ids,
word_embeddings=word_embeddings,
pad_token_id=self._pad_token_id)
if word_embeddings is None:
word_embeddings = self._embedding_layer(word_ids)
# absolute position embeddings.
position_embeddings = self._position_embedding_layer(word_embeddings)
type_embeddings = self._type_embedding_layer(type_ids)
embeddings = word_embeddings + position_embeddings + type_embeddings
embeddings = self._embedding_norm_layer(embeddings)
embeddings = self._embedding_dropout(embeddings)
if self._embedding_projection is not None:
embeddings = self._embedding_projection(embeddings)
batch_size, seq_len = get_shape_list(mask)
# create masks with fixed len global_attention_size
mask = tf.transpose(
tf.concat(
values=[
tf.ones(
(self._global_attention_size, batch_size), tf.int32) * 2,
tf.transpose(mask)[self._global_attention_size:]
],
axis=0))
is_index_masked = tf.math.less(mask, 1)
is_index_global_attn = tf.transpose(
tf.concat(
values=[
tf.ones((self._global_attention_size, batch_size), tf.bool),
tf.zeros((seq_len - self._global_attention_size, batch_size),
tf.bool)
],
axis=0))
# Longformer
attention_mask = mask
extended_attention_mask = tf.reshape(
attention_mask, (tf.shape(mask)[0], tf.shape(mask)[1], 1, 1))
attention_mask = tf.cast(
tf.math.abs(1 - extended_attention_mask), tf.dtypes.float32) * -10000.0
encoder_outputs = []
x = embeddings
# TFLongformerEncoder
for layer in self._transformer_layers:
x = layer([x, attention_mask, is_index_masked, is_index_global_attn])
encoder_outputs.append(x)
last_encoder_output = encoder_outputs[-1]
if padding_len > 0:
last_encoder_output = last_encoder_output[:, :-padding_len]
first_token_tensor = last_encoder_output[:, 0, :]
pooled_output = self._pooler_layer(first_token_tensor)
return dict(
sequence_output=last_encoder_output,
pooled_output=pooled_output,
encoder_outputs=encoder_outputs)
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_embedding_layer(self):
return self._embedding_layer
def get_config(self):
return dict(self._config)
@property
def transformer_layers(self):
"""List of Transformer layers in the encoder."""
return self._transformer_layers
@property
def pooler_layer(self):
"""The pooler dense layer after the transformer layers."""
return self._pooler_layer
@classmethod
def from_config(cls, config, custom_objects=None):
if 'embedding_layer' in config and config['embedding_layer'] is not None:
warn_string = (
'You are reloading a model that was saved with a '
'potentially-shared embedding layer object. If you contine to '
'train this model, the embedding layer will no longer be shared. '
'To work around this, load the model outside of the Keras API.')
print('WARNING: ' + warn_string)
logging.warn(warn_string)
return cls(**config)
def _pad_to_window_size(
self,
word_ids,
mask,
type_ids,
word_embeddings,
pad_token_id,
):
# padding
attention_window = max(self._attention_window)
assert (attention_window %
2 == 0), ('`attention_window` should be an even value.'
f'Given {attention_window}')
input_shape = get_shape_list(
word_ids) if word_ids is not None else get_shape_list(word_embeddings)
batch_size, seq_len = input_shape[:2]
if seq_len is not None:
padding_len = (attention_window -
seq_len % attention_window) % attention_window
else:
padding_len = 0
paddings = tf.convert_to_tensor([[0, 0], [0, padding_len]])
if word_ids is not None:
word_ids = tf.pad(word_ids, paddings, constant_values=pad_token_id)
if word_embeddings is not None:
def pad_embeddings():
word_ids_padding = tf.fill((batch_size, padding_len), self.pad_token_id)
word_embeddings_padding = self._embedding_layer(word_ids_padding)
return tf.concat([word_embeddings, word_embeddings_padding], axis=-2)
word_embeddings = tf.cond(
tf.math.greater(padding_len, 0), pad_embeddings,
lambda: word_embeddings)
mask = tf.pad(
mask, paddings,
constant_values=False) # no attention on the padding tokens
token_type_ids = tf.pad(
type_ids, paddings, constant_values=0) # pad with token_type_id = 0
return (
padding_len,
word_ids,
mask,
token_type_ids,
word_embeddings,
)
| 13,559 | 36.04918 | 88 | py |
models | models-master/official/projects/longformer/longformer_encoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.projects.longformer.longformer_encoder."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from official.projects.longformer.longformer_encoder import LongformerEncoder
class LongformerEncoderTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(LongformerEncoderTest, self).setUp()
np.random.seed(0)
tf.random.set_seed(0)
@combinations.generate(
combinations.combine(
attention_window=[32, 128], global_attention_size=[0, 1, 2]))
def test_encoder(self, attention_window, global_attention_size):
sequence_length = 128
batch_size = 2
vocab_size = 1024
hidden_size = 256
network = LongformerEncoder(
global_attention_size=global_attention_size,
vocab_size=vocab_size,
attention_window=[attention_window],
hidden_size=hidden_size,
num_layers=1,
num_attention_heads=4,
max_sequence_length=512)
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length), dtype=np.int32)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length), dtype=np.int32)
type_id_data = np.random.randint(
2, size=(batch_size, sequence_length), dtype=np.int32)
inputs = {
'input_word_ids': word_id_data,
'input_mask': mask_data,
'input_type_ids': type_id_data,
}
outputs = network(inputs)
self.assertEqual(outputs['sequence_output'].shape,
(batch_size, sequence_length, hidden_size))
@combinations.generate(
combinations.combine(
norm_first=[True, False], global_attention_size=[0, 1, 2]))
def test_norm_first(self, norm_first, global_attention_size):
sequence_length = 128
batch_size = 2
vocab_size = 1024
hidden_size = 256
network = LongformerEncoder(
global_attention_size=global_attention_size,
vocab_size=vocab_size,
attention_window=[32],
hidden_size=hidden_size,
num_layers=1,
num_attention_heads=4,
max_sequence_length=512,
norm_first=norm_first)
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length), dtype=np.int32)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length), dtype=np.int32)
type_id_data = np.random.randint(
2, size=(batch_size, sequence_length), dtype=np.int32)
inputs = {
'input_word_ids': word_id_data,
'input_mask': mask_data,
'input_type_ids': type_id_data,
}
outputs = network(inputs)
self.assertEqual(outputs['sequence_output'].shape,
(batch_size, sequence_length, hidden_size))
if __name__ == '__main__':
tf.test.main()
| 3,454 | 34.255102 | 77 | py |
models | models-master/official/projects/longformer/longformer_attention.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Longformer attention block. Modified From huggingface/transformers."""
# pylint: disable=g-classes-have-attributes
import math
import string
import numpy as np
import tensorflow as tf
from official.modeling.tf_utils import get_shape_list
_CHR_IDX = string.ascii_lowercase
def _build_attention_equation(rank, attn_axes):
"""Builds einsum equations for the attention computation.
Query, key, value inputs after projection are expected to have the shape as:
`(bs, <non-attention dims>, <attention dims>, num_heads, channels)`.
`bs` and `<non-attention dims>` are treated as `<batch dims>`.
The attention operations can be generalized:
(1) Query-key dot product:
`(<batch dims>, <query attention dims>, num_heads, channels), (<batch dims>,
<key attention dims>, num_heads, channels) -> (<batch dims>,
num_heads, <query attention dims>, <key attention dims>)`
(2) Combination:
`(<batch dims>, num_heads, <query attention dims>, <key attention dims>),
(<batch dims>, <value attention dims>, num_heads, channels) -> (<batch dims>,
<query attention dims>, num_heads, channels)`
Args:
rank: Rank of query, key, value tensors.
attn_axes: List/tuple of axes, `[-1, rank)`, that attention will be applied
to.
Returns:
Einsum equations.
"""
target_notation = _CHR_IDX[:rank]
# `batch_dims` includes the head dim.
batch_dims = tuple(np.delete(range(rank), attn_axes + (rank - 1,)))
letter_offset = rank
source_notation = ""
for i in range(rank):
if i in batch_dims or i == rank - 1:
source_notation += target_notation[i]
else:
source_notation += _CHR_IDX[letter_offset]
letter_offset += 1
product_notation = "".join([target_notation[i] for i in batch_dims] +
[target_notation[i] for i in attn_axes] +
[source_notation[i] for i in attn_axes])
dot_product_equation = f"{source_notation},{target_notation}->{product_notation}"
attn_scores_rank = len(product_notation)
combine_equation = f"{product_notation},{source_notation}->{target_notation}"
return dot_product_equation, combine_equation, attn_scores_rank
def _build_proj_equation(free_dims, bound_dims, output_dims):
"""Builds an einsum equation for projections inside multi-head attention."""
input_str = ""
kernel_str = ""
output_str = ""
bias_axes = ""
letter_offset = 0
for i in range(free_dims):
char = _CHR_IDX[i + letter_offset]
input_str += char
output_str += char
letter_offset += free_dims
for i in range(bound_dims):
char = _CHR_IDX[i + letter_offset]
input_str += char
kernel_str += char
letter_offset += bound_dims
for i in range(output_dims):
char = _CHR_IDX[i + letter_offset]
kernel_str += char
output_str += char
bias_axes += char
equation = f"{input_str},{kernel_str}->{output_str}"
return equation, bias_axes, len(output_str)
def _get_output_shape(output_rank, known_last_dims):
return [None] * (output_rank - len(known_last_dims)) + list(known_last_dims)
@tf.keras.utils.register_keras_serializable(package="Text")
class LongformerAttention(tf.keras.layers.MultiHeadAttention):
"""LongformerAttention.
Args:
attention_window: int representing the window size for attention.
layer_id: int of the id of the layer.
global_attention_size: the size of global attention used for each token.
"""
def __init__(self, attention_window, layer_id, global_attention_size,
**kwargs):
super().__init__(**kwargs)
self._layer_id = layer_id
self._attention_window = attention_window
assert (self._attention_window % 2 == 0), (
f"`attention_window` for layer {self._layer_id} has to be an even "
f"value. Given {self.attention_window}")
assert (self._attention_window > 0), (
f"`attention_window` for layer {self._layer_id} has to be positive. "
f"Given {self.attention_window}")
self._one_sided_attn_window_size = self._attention_window // 2
self.global_attention_size = global_attention_size
def _build_from_signature(self, query, value, key=None):
"""Builds layers and variables.
Once the method is called, self._built_from_signature will be set to True.
Args:
query: Query tensor or TensorShape.
value: Value tensor or TensorShape.
key: Key tensor or TensorShape.
"""
self._built_from_signature = True
if hasattr(query, "shape"):
self._query_shape = tf.TensorShape(query.shape)
else:
self._query_shape = tf.TensorShape(query)
if hasattr(value, "shape"):
self._value_shape = tf.TensorShape(value.shape)
else:
self._value_shape = tf.TensorShape(value)
if key is None:
self._key_shape = self._value_shape
elif hasattr(key, "shape"):
self._key_shape = tf.TensorShape(key.shape)
else:
self._key_shape = tf.TensorShape(key)
common_kwargs = dict(
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
# Any setup work performed only once should happen in an `init_scope`
# to avoid creating symbolic Tensors that will later pollute any eager
# operations.
# with tf_utils.maybe_init_scope(self):
# TODO(crickwu): check whether tf_utils.maybe_init_scope(self) (keras)
# is needed.
free_dims = self._query_shape.rank - 1
einsum_equation, bias_axes, output_rank = _build_proj_equation(
free_dims, bound_dims=1, output_dims=2)
self._query_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1,
[self._num_heads, self._key_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="query",
**common_kwargs)
self._global_query_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1,
[self._num_heads, self._key_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="global_query",
**common_kwargs)
einsum_equation, bias_axes, output_rank = _build_proj_equation(
self._key_shape.rank - 1, bound_dims=1, output_dims=2)
self._key_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1,
[self._num_heads, self._key_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="key",
**common_kwargs)
self._global_key_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1,
[self._num_heads, self._key_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="global_key",
**common_kwargs)
einsum_equation, bias_axes, output_rank = _build_proj_equation(
self._value_shape.rank - 1, bound_dims=1, output_dims=2)
self._value_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1,
[self._num_heads, self._value_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="value",
**common_kwargs)
self._global_value_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1,
[self._num_heads, self._value_dim]),
bias_axes=bias_axes if self._use_bias else None,
name="global_value",
**common_kwargs)
# Builds the attention computations for multi-head dot product attention.
# These computations could be wrapped into the keras attention layer once
# it support mult-head einsum computations.
self._build_attention(output_rank)
self._global_dropout_layer = tf.keras.layers.Dropout(rate=self._dropout)
# self._output_dense = self._make_output_dense(
# free_dims, common_kwargs, "attention_output")
self._output_dense = tf.keras.layers.Dense(
units=self._num_heads * self._key_dim, name="dense", **common_kwargs)
def call(self,
hidden_states,
attention_mask=None,
is_index_masked=None,
is_index_global_attn=None,
training=None):
"""Applies Dot-product attention with query, key, value tensors.
This function defines the computation inside `call` with projected
multi-head Q, K, V inputs. Users can override this function for customized
attention implementation.
Args:
hidden_states: inputs for generating query, key and value tensors.
attention_mask: a boolean mask of shape `(B, T, S)`, that prevents
attention to certain positions.
is_index_masked: boolean indicating whether the index is masked.
is_index_global_attn: boolean indicating whether the index is global
attention.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Returns:
attention_output: Multi-headed outputs of attention computation.
"""
if not self._built_from_signature:
self._build_from_signature(
query=hidden_states, value=hidden_states, key=hidden_states)
# N = `num_attention_heads`
# H = `size_per_head`
# `query` = [B, T, N ,H]
query = self._query_dense(hidden_states)
# `key` = [B, S, N, H]
key = self._key_dense(hidden_states)
# `value` = [B, S, N, H]
value = self._value_dense(hidden_states)
# Note: Applying scalar multiply at the smaller end of einsum improves
# XLA performance, but may introduce slight numeric differences in
# the Transformer attention head.
query = tf.multiply(query, 1.0 / math.sqrt(float(self._key_dim)))
batch_size, seq_len, num_heads, head_dim = get_shape_list(query)
# attn_probs = (batch_size, seq_len, num_heads, window*2+1)
attn_scores = self._sliding_chunks_query_key_matmul(
query, key, self._one_sided_attn_window_size)
# diagonal mask with zeros everywhere and -inf inplace of padding
diagonal_mask = self._sliding_chunks_query_key_matmul(
tf.ones(get_shape_list(attention_mask)),
attention_mask,
self._one_sided_attn_window_size,
)
# pad local attention probs
attn_scores += diagonal_mask
if tf.executing_eagerly():
tf.debugging.assert_equal(
get_shape_list(attn_scores),
[
batch_size, seq_len, self._num_heads,
self._one_sided_attn_window_size * 2 + 1
],
message=f"attn_probs should be of size "
f"({batch_size}, {seq_len}, {num_heads}, "
f"{self._one_sided_attn_window_size * 2 + 1}),"
f" but is of size {get_shape_list(attn_scores)}",
)
# compute global attn indices required through out forward fn
(
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
) = self._get_global_attn_indices(is_index_global_attn,
self.global_attention_size)
# this function is only relevant for global attention
if self.global_attention_size > 0:
attn_scores = self._concat_with_global_key_attn_probs(
attn_scores=attn_scores,
query_vectors=query,
key_vectors=key,
max_num_global_attn_indices=max_num_global_attn_indices,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
)
else:
pass
attn_probs = tf.nn.softmax(attn_scores, axis=-1)
# softmax sometimes inserts NaN if all positions are masked,
# replace them with 0
# Make sure to create a mask with the proper shape:
# if is_global_attn==True => [batch_size, seq_len, self.num_heads,
# self.one_sided_attn_window_size * 2 + max_num_global_attn_indices + 1]
# if is_global_attn==False => [batch_size, seq_len, self.num_heads,
# self.one_sided_attn_window_size * 2 + 1]
if self.global_attention_size > 0:
masked_index = tf.tile(
is_index_masked[:, :, None, None],
(1, 1, self._num_heads, self._one_sided_attn_window_size * 2 +
max_num_global_attn_indices + 1),
)
else:
masked_index = tf.tile(
is_index_masked[:, :, None, None],
(1, 1, self._num_heads, self._one_sided_attn_window_size * 2 + 1),
)
attn_probs = tf.where(
masked_index,
tf.zeros(get_shape_list(masked_index), dtype=attn_probs.dtype),
attn_probs,
)
layer_head_mask = None
if layer_head_mask is not None:
if tf.executing_eagerly():
tf.debugging.assert_equal(
get_shape_list(layer_head_mask),
[self._num_heads],
message=f"Head mask for a single layer should be of size "
f"{(self._num_heads)}, but is "
f"{get_shape_list(layer_head_mask)}",
)
attn_probs = tf.reshape(layer_head_mask, (1, 1, -1, 1)) * attn_probs
# apply dropout
attn_probs = self._dropout_layer(attn_probs, training=training)
value_vectors = tf.reshape(
value, (batch_size, seq_len, self._num_heads, self._key_dim))
# if global attention, compute sum of global and local attn
if self.global_attention_size > 0:
attn_output = self._compute_attn_output_with_global_indices(
value_vectors=value_vectors,
attn_probs=attn_probs,
max_num_global_attn_indices=max_num_global_attn_indices,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
)
else:
attn_output = self._sliding_chunks_matmul_attn_probs_value(
attn_probs, value_vectors, self._one_sided_attn_window_size)
if tf.executing_eagerly():
tf.debugging.assert_equal(
get_shape_list(attn_output),
[batch_size, seq_len, self._num_heads, head_dim],
message="Unexpected size",
)
attn_output = tf.reshape(
attn_output,
(batch_size, seq_len, self._num_heads * self._key_dim)) # FIXME
# compute value for global attention and overwrite to attention output
# TODO(crickwu): remove the redundant computation
if self.global_attention_size > 0:
attn_output, global_attn_probs = self._compute_global_attn_output_from_hidden( # pylint: disable=unused-variable
attn_output=attn_output,
hidden_states=hidden_states,
max_num_global_attn_indices=max_num_global_attn_indices,
layer_head_mask=layer_head_mask,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
is_index_masked=is_index_masked,
training=training,
)
else:
global_attn_probs = tf.zeros(
(batch_size, self._num_heads, max_num_global_attn_indices, seq_len))
# make sure that local attention probabilities are set to 0 for indices of
# global attn
if self.global_attention_size > 0:
masked_global_attn_index = tf.tile(
is_index_global_attn[:, :, None, None],
(1, 1, self._num_heads, self._one_sided_attn_window_size * 2 +
max_num_global_attn_indices + 1),
)
else:
masked_global_attn_index = tf.tile(
is_index_global_attn[:, :, None, None],
(1, 1, self._num_heads, self._one_sided_attn_window_size * 2 + 1),
)
attn_probs = tf.where(
masked_global_attn_index,
tf.zeros(
get_shape_list(masked_global_attn_index), dtype=attn_probs.dtype),
attn_probs,
)
# we can return extra information here
# (attn_output, attn_probs, global_attn_probs)
return attn_output
def get_config(self):
config = {
"layer_id": self._layer_id,
"attention_window": self._one_sided_attn_window_size,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def _sliding_chunks_query_key_matmul(self, query, key, window_overlap):
"""Matrix multiplication of query and key tensors.
This multiplication uses a sliding window attention pattern.
This implementation splits the input into overlapping chunks of size
2w (e.g. 512 for pretrained Longformer) with an overlap of size
window_overlap.
Args:
query: query tensor.
key: key tensor.
window_overlap: int.
Returns:
diagonal_attention_scores: tensor.
"""
batch_size, seq_len, num_heads, head_dim = get_shape_list(query)
if tf.executing_eagerly():
tf.debugging.assert_equal(
seq_len % (window_overlap * 2),
0,
message=f"Sequence length should be multiple of {window_overlap * 2}. "
f"Given {seq_len}",
)
tf.debugging.assert_equal(
get_shape_list(query),
get_shape_list(key),
message=f"Shape of query and key should be equal, but got query: "
f"{get_shape_list(query)} and key: {get_shape_list(key)}",
)
chunks_count = seq_len // window_overlap - 1
# group batch_size and num_heads dimensions into one,
# then chunk seq_len into chunks of size window_overlap * 2
query = tf.reshape(
tf.transpose(query, (0, 2, 1, 3)),
(batch_size * num_heads, seq_len, head_dim),
)
key = tf.reshape(
tf.transpose(key, (0, 2, 1, 3)),
(batch_size * num_heads, seq_len, head_dim))
chunked_query = self._chunk(query, window_overlap)
chunked_key = self._chunk(key, window_overlap)
# matrix multiplication
# bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcxy: batch_size * num_heads x chunks x 2window_overlap x 2window_overlap
chunked_query = tf.cast(chunked_query, dtype=chunked_key.dtype)
chunked_attention_scores = tf.einsum("bcxd,bcyd->bcxy", chunked_query,
chunked_key) # multiply
# convert diagonals into columns
paddings = tf.convert_to_tensor([[0, 0], [0, 0], [0, 1], [0, 0]])
diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims(
chunked_attention_scores, paddings)
# allocate space for the overall attention matrix where the chunks are
# combined. The last dimension
# has (window_overlap * 2 + 1) columns. The first (window_overlap) columns
# are the window_overlap lower triangles (attention from a word to
# window_overlap previous words). The following column is attention score
# from each word to itself, then
# followed by window_overlap columns for the upper triangle.
# copy parts from diagonal_chunked_attention_scores into the combined matrix
# of attentions - copying the main diagonal and the upper triangle
# TODO(crickwu): This code is most likely not very efficient and should be
# improved.
diagonal_attn_scores_up_triang = tf.concat(
[
diagonal_chunked_attention_scores[:, :, :window_overlap, :
window_overlap + 1],
diagonal_chunked_attention_scores[:, -1:,
window_overlap:, :window_overlap +
1],
],
axis=1,
)
# - copying the lower triangle
diagonal_attn_scores_low_triang = tf.concat(
[
tf.zeros(
(batch_size * num_heads, 1, window_overlap, window_overlap),
dtype=diagonal_chunked_attention_scores.dtype,
),
diagonal_chunked_attention_scores[:, :, -(window_overlap + 1):-1,
window_overlap + 1:],
],
axis=1,
)
diagonal_attn_scores_first_chunk = tf.concat(
[
tf.roll(
diagonal_chunked_attention_scores,
shift=[1, window_overlap],
axis=[2, 3],
)[:, :, :window_overlap, :window_overlap],
tf.zeros(
(batch_size * num_heads, 1, window_overlap, window_overlap),
dtype=diagonal_chunked_attention_scores.dtype,
),
],
axis=1,
)
first_chunk_mask = (
tf.tile(
tf.range(chunks_count + 1)[None, :, None, None],
(batch_size * num_heads, 1, window_overlap, window_overlap),
) < 1)
diagonal_attn_scores_low_triang = tf.where(
first_chunk_mask,
diagonal_attn_scores_first_chunk,
diagonal_attn_scores_low_triang,
)
# merging upper and lower triangle
diagonal_attention_scores = tf.concat(
[diagonal_attn_scores_low_triang, diagonal_attn_scores_up_triang],
axis=-1)
# separate batch_size and num_heads dimensions again
diagonal_attention_scores = tf.transpose(
tf.reshape(
diagonal_attention_scores,
(batch_size, num_heads, seq_len, 2 * window_overlap + 1),
),
(0, 2, 1, 3),
)
diagonal_attention_scores = self._mask_invalid_locations(
diagonal_attention_scores, window_overlap)
return diagonal_attention_scores
@staticmethod
def _mask_invalid_locations(input_tensor, window_overlap):
# create correct upper triangle bool mask
mask_2d_upper = tf.reverse(
tf.linalg.band_part(
tf.ones(shape=(window_overlap, window_overlap + 1)), -1, 0),
axis=[0],
)
# pad to full matrix
padding = tf.convert_to_tensor(
[[0, get_shape_list(input_tensor)[1] - window_overlap],
[0, get_shape_list(input_tensor)[3] - window_overlap - 1]])
# create lower mask
mask_2d = tf.pad(mask_2d_upper, padding)
# combine with upper mask
mask_2d = mask_2d + tf.reverse(mask_2d, axis=[0, 1])
# broadcast to full matrix
mask_4d = tf.tile(mask_2d[None, :, None, :],
(get_shape_list(input_tensor)[0], 1, 1, 1))
# inf tensor used for masking
inf_tensor = -float("inf") * tf.ones_like(input_tensor)
# mask
input_tensor = tf.where(
tf.math.greater(mask_4d, 0), inf_tensor, input_tensor)
return input_tensor
def _sliding_chunks_matmul_attn_probs_value(self, attn_probs, value,
window_overlap):
"""Same as _sliding_chunks_query_key_matmul but for attn_probs and value."""
batch_size, seq_len, num_heads, head_dim = get_shape_list(value)
if tf.executing_eagerly():
tf.debugging.assert_equal(
seq_len % (window_overlap * 2),
0,
message="Seq_len has to be multiple of 2 * window_overlap",
)
tf.debugging.assert_equal(
get_shape_list(attn_probs)[:3],
get_shape_list(value)[:3],
message="value and attn_probs must have same dims (except head_dim)",
)
tf.debugging.assert_equal(
get_shape_list(attn_probs)[3],
2 * window_overlap + 1,
message="attn_probs last dim has to be 2 * window_overlap + 1",
)
chunks_count = seq_len // window_overlap - 1
# group batch_size and num_heads dimensions into one, then chunk seq_len
# into chunks of size 2 window overlap
chunked_attn_probs = tf.reshape(
tf.transpose(attn_probs, (0, 2, 1, 3)),
(
batch_size * num_heads,
seq_len // window_overlap,
window_overlap,
2 * window_overlap + 1,
),
)
# group batch_size and num_heads dimensions into one
value = tf.reshape(
tf.transpose(value, (0, 2, 1, 3)),
(batch_size * num_heads, seq_len, head_dim),
)
# pad seq_len with w at the beginning of the sequence and another window
# overlap at the end
paddings = tf.convert_to_tensor([[0, 0], [window_overlap, window_overlap],
[0, 0]])
padded_value = tf.pad(value, paddings, constant_values=-1)
# chunk padded_value into chunks of size 3 window overlap and an overlap of
# size window overlap
frame_size = 3 * window_overlap * head_dim
frame_hop_size = (get_shape_list(padded_value)[1] * head_dim -
frame_size) // chunks_count
chunked_value = tf.signal.frame(
tf.reshape(padded_value, (batch_size * num_heads, -1)),
frame_size,
frame_hop_size,
)
chunked_value = tf.reshape(
chunked_value,
(batch_size * num_heads, chunks_count + 1, 3 * window_overlap,
head_dim),
)
if tf.executing_eagerly():
tf.debugging.assert_equal(
get_shape_list(chunked_value),
[
batch_size * num_heads, chunks_count + 1, 3 * window_overlap,
head_dim
],
message="Chunked value has the wrong shape",
)
chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs)
context = tf.einsum("bcwd,bcdh->bcwh", chunked_attn_probs, chunked_value)
context = tf.transpose(
tf.reshape(context, (batch_size, num_heads, seq_len, head_dim)),
(0, 2, 1, 3),
)
return context
@staticmethod
def _pad_and_transpose_last_two_dims(hidden_states_padded, paddings):
"""Pads rows and then flips rows and columns."""
hidden_states_padded = tf.pad(
hidden_states_padded, paddings
) # padding value is not important because it will be overwritten
batch_size, chunk_size, seq_length, hidden_dim = get_shape_list(
hidden_states_padded)
hidden_states_padded = tf.reshape(
hidden_states_padded, (batch_size, chunk_size, hidden_dim, seq_length))
return hidden_states_padded
@staticmethod
def _pad_and_diagonalize(chunked_hidden_states):
"""Shifts every row 1 step right, converting columns into diagonals.
Example::
chunked_hidden_states: [ 0.4983, 2.6918, -0.0071, 1.0492,
-1.8348, 0.7672, 0.2986, 0.0285,
-0.7584, 0.4206, -0.0405, 0.1599,
2.0514, -1.1600, 0.5372, 0.2629 ]
window_overlap = num_rows = 4
(pad & diagonalize) =>
[ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000
0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000
0.0000, 0.0000, -0.7584, 0.4206, -0.0405, 0.1599, 0.0000
0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ]
Args:
chunked_hidden_states: tensor.
Returns:
padded_hidden_stategs: tensor.
"""
total_num_heads, num_chunks, window_overlap, hidden_dim = get_shape_list(
chunked_hidden_states)
paddings = tf.convert_to_tensor([[0, 0], [0, 0], [0, 0],
[0, window_overlap + 1]])
chunked_hidden_states = tf.pad(chunked_hidden_states, paddings)
chunked_hidden_states = tf.reshape(chunked_hidden_states,
(total_num_heads, num_chunks, -1))
chunked_hidden_states = chunked_hidden_states[:, :, :-window_overlap]
chunked_hidden_states = tf.reshape(
chunked_hidden_states,
(total_num_heads, num_chunks, window_overlap,
window_overlap + hidden_dim),
)
chunked_hidden_states = chunked_hidden_states[:, :, :, :-1]
return chunked_hidden_states
@staticmethod
def _chunk(hidden_states, window_overlap):
"""convert into overlapping chunks. Chunk size = 2w, overlap size = w."""
batch_size, seq_length, hidden_dim = get_shape_list(hidden_states)
num_output_chunks = 2 * (seq_length // (2 * window_overlap)) - 1
# define frame size and frame stride (similar to convolution)
frame_hop_size = window_overlap * hidden_dim
frame_size = 2 * frame_hop_size
hidden_states = tf.reshape(hidden_states,
(batch_size, seq_length * hidden_dim))
# chunk with overlap
chunked_hidden_states = tf.signal.frame(hidden_states, frame_size,
frame_hop_size)
if tf.executing_eagerly():
tf.debugging.assert_equal(
get_shape_list(chunked_hidden_states),
[batch_size, num_output_chunks, frame_size],
message=f"Make sure chunking is correctly applied. `Chunked hidden "
f"states should have output dimension"
f" {[batch_size, frame_size, num_output_chunks]}, but got "
f"{get_shape_list(chunked_hidden_states)}.",
)
chunked_hidden_states = tf.reshape(
chunked_hidden_states,
(batch_size, num_output_chunks, 2 * window_overlap, hidden_dim),
)
return chunked_hidden_states
@staticmethod
def _get_global_attn_indices(is_index_global_attn, global_attention_size):
"""Computes global attn indices required throughout forward pass."""
# All global attention size are fixed through global_attention_size
batch_size, _ = get_shape_list(is_index_global_attn)
max_num_global_attn_indices = global_attention_size
row_indices = tf.range(batch_size)
row_indices = tf.repeat(
tf.expand_dims(row_indices, axis=0),
repeats=[global_attention_size],
axis=0)
row_indices = tf.reshape(row_indices,
(batch_size * global_attention_size, 1))
col_indices = tf.range(global_attention_size)
col_indices = tf.repeat(
tf.expand_dims(col_indices, axis=1), repeats=[batch_size], axis=0)
is_index_global_attn_nonzero = tf.concat((row_indices, col_indices), axis=1)
# this is actually same as `is_index_global_attn_nonzero`,
# since we assume all global attention are the same size
is_local_index_global_attn_nonzero = tf.concat((row_indices, col_indices),
axis=1)
# empty tensor
is_local_index_no_global_attn_nonzero = tf.reshape(
tf.expand_dims(tf.range(0), axis=1), (0, 2))
return (
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
)
def _concat_with_global_key_attn_probs(
self,
attn_scores,
key_vectors,
query_vectors,
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
):
batch_size = get_shape_list(key_vectors)[0]
# select global key vectors
global_key_vectors = tf.gather_nd(key_vectors, is_index_global_attn_nonzero)
# create only global key vectors
key_vectors_only_global = tf.scatter_nd(
is_local_index_global_attn_nonzero,
global_key_vectors,
shape=(
batch_size,
max_num_global_attn_indices,
self._num_heads,
self._key_dim,
),
)
# (batch_size, seq_len, num_heads, max_num_global_attn_indices)
attn_probs_from_global_key = tf.einsum("blhd,bshd->blhs", query_vectors,
key_vectors_only_global)
# (batch_size, max_num_global_attn_indices, seq_len, num_heads)
attn_probs_from_global_key_trans = tf.transpose(attn_probs_from_global_key,
(0, 3, 1, 2))
mask_shape = (
get_shape_list(is_local_index_no_global_attn_nonzero)[0],) + tuple(
get_shape_list(attn_probs_from_global_key_trans)[-2:])
mask = tf.ones(mask_shape) * -10000.0
mask = tf.cast(mask, dtype=attn_probs_from_global_key_trans.dtype)
# scatter mask
attn_probs_from_global_key_trans = tf.tensor_scatter_nd_update(
attn_probs_from_global_key_trans,
is_local_index_no_global_attn_nonzero,
mask,
)
# (batch_size, seq_len, num_heads, max_num_global_attn_indices)
attn_probs_from_global_key = tf.transpose(attn_probs_from_global_key_trans,
(0, 2, 3, 1))
# concat to attn_probs
# (batch_size, seq_len, num_heads, extra attention count + 2*window+1)
attn_scores = tf.concat((attn_probs_from_global_key, attn_scores), axis=-1)
return attn_scores
def _compute_attn_output_with_global_indices(
self,
value_vectors,
attn_probs,
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
):
batch_size = get_shape_list(attn_probs)[0]
# cut local attn probs to global only
attn_probs_only_global = attn_probs[:, :, :, :max_num_global_attn_indices]
# select global value vectors
global_value_vectors = tf.gather_nd(value_vectors,
is_index_global_attn_nonzero)
# create only global value vectors
value_vectors_only_global = tf.scatter_nd(
is_local_index_global_attn_nonzero,
global_value_vectors,
shape=(
batch_size,
max_num_global_attn_indices,
self._num_heads,
self._key_dim,
),
)
# compute attn output only global
attn_output_only_global = tf.einsum("blhs,bshd->blhd",
attn_probs_only_global,
value_vectors_only_global)
# reshape attn probs
attn_probs_without_global = attn_probs[:, :, :,
max_num_global_attn_indices:]
# compute attn output with global
attn_output_without_global = self._sliding_chunks_matmul_attn_probs_value(
attn_probs_without_global, value_vectors,
self._one_sided_attn_window_size)
return attn_output_only_global + attn_output_without_global
def _compute_global_attn_output_from_hidden(
self,
attn_output,
hidden_states,
max_num_global_attn_indices,
layer_head_mask,
is_local_index_global_attn_nonzero,
is_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
is_index_masked,
training,
):
batch_size, seq_len = get_shape_list(hidden_states)[:2]
# prepare global hidden states
global_attn_hidden_states = tf.gather_nd(hidden_states,
is_index_global_attn_nonzero)
global_attn_hidden_states = tf.scatter_nd(
is_local_index_global_attn_nonzero,
global_attn_hidden_states,
shape=(batch_size, max_num_global_attn_indices,
self._num_heads * self._key_dim),
)
# global key, query, value
global_query_vectors_only_global = self._global_query_dense(
global_attn_hidden_states)
global_key_vectors = self._global_key_dense(hidden_states)
global_value_vectors = self._global_value_dense(hidden_states)
# normalize
global_query_vectors_only_global /= tf.math.sqrt(
tf.cast(self._key_dim, dtype=global_query_vectors_only_global.dtype))
global_query_vectors_only_global = self.reshape_and_transpose(
global_query_vectors_only_global, batch_size)
global_key_vectors = self.reshape_and_transpose(global_key_vectors,
batch_size)
global_value_vectors = self.reshape_and_transpose(global_value_vectors,
batch_size)
# compute attn scores
global_attn_scores = tf.matmul(
global_query_vectors_only_global, global_key_vectors, transpose_b=True)
if tf.executing_eagerly():
tf.debugging.assert_equal(
get_shape_list(global_attn_scores),
[batch_size * self._num_heads, max_num_global_attn_indices, seq_len],
message=f"global_attn_scores have the wrong size. Size should be"
f"{(batch_size * self._num_heads, max_num_global_attn_indices, seq_len)}, "
f"but is {get_shape_list(global_attn_scores)}.",
)
global_attn_scores = tf.reshape(
global_attn_scores,
(batch_size, self._num_heads, max_num_global_attn_indices, seq_len),
)
global_attn_scores_trans = tf.transpose(global_attn_scores, (0, 2, 1, 3))
mask_shape = (get_shape_list(is_local_index_no_global_attn_nonzero)[0],
) + tuple(get_shape_list(global_attn_scores_trans)[-2:])
global_attn_mask = tf.ones(mask_shape) * -10000.0
global_attn_mask = tf.cast(
global_attn_mask, dtype=global_attn_scores_trans.dtype)
# scatter mask
global_attn_scores_trans = tf.tensor_scatter_nd_update(
global_attn_scores_trans,
is_local_index_no_global_attn_nonzero,
global_attn_mask,
)
global_attn_scores = tf.transpose(global_attn_scores_trans, (0, 2, 1, 3))
# mask global attn scores
attn_mask = tf.tile(is_index_masked[:, None, None, :],
(1, get_shape_list(global_attn_scores)[1], 1, 1))
global_attn_scores = tf.where(attn_mask, -10000.0, global_attn_scores)
global_attn_scores = tf.reshape(
global_attn_scores,
(batch_size * self._num_heads, max_num_global_attn_indices, seq_len),
)
# compute global attn probs
global_attn_probs_float = tf.nn.softmax(global_attn_scores, axis=-1)
# apply layer head masking
if layer_head_mask is not None:
if tf.executing_eagerly():
tf.debugging.assert_equal(
get_shape_list(layer_head_mask),
[self._num_heads],
message=f"Head mask for a single layer should be of size "
f"{(self._num_heads)}, but is {get_shape_list(layer_head_mask)}",
)
global_attn_probs_float = tf.reshape(
layer_head_mask,
(1, -1, 1, 1)) * tf.reshape(global_attn_probs_float,
(batch_size, self._num_heads,
max_num_global_attn_indices, seq_len))
global_attn_probs_float = tf.reshape(
global_attn_probs_float,
(batch_size * self._num_heads, max_num_global_attn_indices, seq_len))
# dropout
global_attn_probs = self._global_dropout_layer(
global_attn_probs_float, training=training)
# global attn output
global_attn_output = tf.matmul(global_attn_probs, global_value_vectors)
if tf.executing_eagerly():
tf.debugging.assert_equal(
get_shape_list(global_attn_output),
[
batch_size * self._num_heads, max_num_global_attn_indices,
self._key_dim
],
message=f"global_attn_output tensor has the wrong size. Size should be "
f"{(batch_size * self._num_heads, max_num_global_attn_indices, self._key_dim)}, "
f"but is {get_shape_list(global_attn_output)}.",
)
global_attn_output = tf.reshape(
global_attn_output,
(batch_size, self._num_heads, max_num_global_attn_indices,
self._key_dim),
)
# get only non zero global attn output
nonzero_global_attn_output = tf.gather_nd(
tf.transpose(global_attn_output, (0, 2, 1, 3)),
is_local_index_global_attn_nonzero,
)
nonzero_global_attn_output = tf.reshape(
nonzero_global_attn_output,
(get_shape_list(is_local_index_global_attn_nonzero)[0], -1),
)
# overwrite values with global attention
attn_output = tf.tensor_scatter_nd_update(attn_output,
is_index_global_attn_nonzero,
nonzero_global_attn_output)
global_attn_probs = tf.reshape(
global_attn_probs,
(batch_size, self._num_heads, max_num_global_attn_indices, seq_len))
attn_output = self._output_dense(attn_output)
return attn_output, global_attn_probs
def reshape_and_transpose(self, vector, batch_size):
return tf.reshape(
tf.transpose(
tf.reshape(vector,
(batch_size, -1, self._num_heads, self._key_dim)),
(0, 2, 1, 3),
),
(batch_size * self._num_heads, -1, self._key_dim),
)
| 41,493 | 37.313943 | 119 | py |
models | models-master/official/projects/longformer/longformer_experiments.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Longformer experiments."""
# pylint: disable=g-doc-return-or-yield,line-too-long
import dataclasses
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import pretrain_dataloader
from official.nlp.data import sentence_prediction_dataloader
from official.nlp.tasks import masked_lm
from official.nlp.tasks import sentence_prediction
from official.projects.longformer.longformer import LongformerEncoderConfig
AdamWeightDecay = optimization.AdamWeightDecayConfig
PolynomialLr = optimization.PolynomialLrConfig
PolynomialWarmupConfig = optimization.PolynomialWarmupConfig
@dataclasses.dataclass
class LongformerOptimizationConfig(optimization.OptimizationConfig):
"""Longformer optimization configuration."""
optimizer: optimization.OptimizerConfig = optimization.OptimizerConfig(
type='adamw',
adamw=AdamWeightDecay(
weight_decay_rate=0.01,
exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'],
epsilon=1e-6))
learning_rate: optimization.LrConfig = optimization.LrConfig(
type='polynomial',
polynomial=PolynomialLr(
initial_learning_rate=1e-4,
decay_steps=1000000,
end_learning_rate=0.0))
warmup: optimization.WarmupConfig = optimization.WarmupConfig(
type='polynomial', polynomial=PolynomialWarmupConfig(warmup_steps=10000))
@exp_factory.register_config_factory('longformer/pretraining')
def longformer_pretraining() -> cfg.ExperimentConfig:
"""Longformer pretraining experiment."""
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(enable_xla=True),
task=masked_lm.MaskedLMConfig(
model=bert.PretrainerConfig(
encoder=encoders.EncoderConfig(
type='any', any=LongformerEncoderConfig()),
cls_heads=[
bert.ClsHeadConfig(
inner_dim=768,
num_classes=2,
dropout_rate=0.1,
name='next_sentence')
]),
train_data=pretrain_dataloader.BertPretrainDataConfig(
use_v2_feature_names=True),
validation_data=pretrain_dataloader.BertPretrainDataConfig(
use_v2_feature_names=True, is_training=False)),
trainer=cfg.TrainerConfig(
optimizer_config=LongformerOptimizationConfig(), train_steps=1000000),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('longformer/glue')
def longformer_glue() -> cfg.ExperimentConfig:
"""Longformer glue fine-tuning."""
config = cfg.ExperimentConfig(
task=sentence_prediction.SentencePredictionConfig(
model=sentence_prediction.ModelConfig(
encoder=encoders.EncoderConfig(
type='any', any=LongformerEncoderConfig())),
train_data=sentence_prediction_dataloader
.SentencePredictionDataConfig(),
validation_data=sentence_prediction_dataloader
.SentencePredictionDataConfig(
is_training=False, drop_remainder=False)),
trainer=cfg.TrainerConfig(
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
'adamw': {
'weight_decay_rate':
0.01,
'exclude_from_weight_decay':
['LayerNorm', 'layer_norm', 'bias'],
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 3e-5,
'end_learning_rate': 0.0,
}
},
'warmup': {
'type': 'polynomial'
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
| 4,844 | 38.072581 | 80 | py |
models | models-master/official/projects/longformer/longformer_encoder_block.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Longformer attention layer. Modified From huggingface/transformers."""
import tensorflow as tf
from official.projects.longformer.longformer_attention import LongformerAttention
@tf.keras.utils.register_keras_serializable(package="Text")
class LongformerEncoderBlock(tf.keras.layers.Layer):
"""LongformerEncoderBlock.
Args:
num_attention_heads: Number of attention heads.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network.
output_range: the sequence output range, [0, output_range) for slicing the
target sequence. `None` means the target sequence is not sliced.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
use_bias: Whether to enable use_bias in attention layer. If set False,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate
dense layers. If set False, output of attention and intermediate dense
layers is normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: Dropout probability for within the attention layer.
inner_dropout: Dropout probability for the first Dense layer in a
two-layer feedforward network.
attention_initializer: Initializer for kernels of attention layers. If set
`None`, attention layers use kernel_initializer as initializer for
kernel.
attention_axes: axes over which the attention is applied. `None` means
attention over all axes, but batch, heads, and features.
**kwargs: keyword arguments/
"""
def __init__(
self,
global_attention_size,
num_attention_heads,
inner_dim,
inner_activation,
# Longformer
attention_window,
layer_id=0,
output_range=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_bias=True,
norm_first=False,
norm_epsilon=1e-12,
output_dropout=0.0,
attention_dropout=0.0,
inner_dropout=0.0,
attention_initializer=None,
attention_axes=None,
**kwargs):
super().__init__(**kwargs)
self.global_attention_size = global_attention_size
self._num_heads = num_attention_heads
self._inner_dim = inner_dim
self._inner_activation = inner_activation
# Longformer
self._attention_window = attention_window
self._layer_id = layer_id
self._attention_dropout = attention_dropout
self._attention_dropout_rate = attention_dropout
self._output_dropout = output_dropout
self._output_dropout_rate = output_dropout
self._output_range = output_range
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._inner_dropout = inner_dropout
if attention_initializer:
self._attention_initializer = tf.keras.initializers.get(
attention_initializer)
else:
self._attention_initializer = self._kernel_initializer
self._attention_axes = attention_axes
def build(self, input_shape):
if isinstance(input_shape, tf.TensorShape):
input_tensor_shape = input_shape
elif isinstance(input_shape, (list, tuple)):
input_tensor_shape = tf.TensorShape(input_shape[0])
else:
raise ValueError(
f"The type of input shape argument is not supported, got: "
f"{type(input_shape)}")
einsum_equation = "abc,cd->abd"
if len(input_tensor_shape.as_list()) > 3:
einsum_equation = "...bc,cd->...bd"
hidden_size = input_tensor_shape[-1]
if hidden_size % self._num_heads != 0:
raise ValueError(
f"The input size ({hidden_size}) is not a multiple of the number of attention "
f"heads ({self._num_heads})")
self._attention_head_size = int(hidden_size // self._num_heads)
common_kwargs = dict(
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
# TFLongformerSelfAttention + TFLongformerSelfOutput.dense
self._attention_layer = LongformerAttention(
# Longformer
layer_id=self._layer_id,
global_attention_size=self.global_attention_size,
attention_window=self._attention_window,
num_heads=self._num_heads,
key_dim=self._attention_head_size,
dropout=self._attention_dropout,
use_bias=self._use_bias,
kernel_initializer=self._attention_initializer,
attention_axes=self._attention_axes,
name="self_attention",
**common_kwargs)
# TFLongformerSelfOutput.dropout
self._attention_dropout = tf.keras.layers.Dropout(rate=self._output_dropout)
# Use float32 in layernorm for numeric stability.
# It is probably safe in mixed_float16, but we haven't validated this yet.
# TFLongformerSelfOutput.Layernorm
self._attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32))
# TFLongformerIntermediate
# TFLongformerIntermediate.dense
self._intermediate_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=(None, self._inner_dim),
bias_axes="d",
kernel_initializer=self._kernel_initializer,
name="intermediate",
**common_kwargs)
policy = tf.keras.mixed_precision.global_policy()
if policy.name == "mixed_bfloat16":
# bfloat16 causes BERT with the LAMB optimizer to not converge
# as well, so we use float32.
# TODO(b/154538392): Investigate this.
policy = tf.float32
# TFLongformerIntermediate.intermediate_act_fn
self._intermediate_activation_layer = tf.keras.layers.Activation(
self._inner_activation, dtype=policy)
self._inner_dropout_layer = tf.keras.layers.Dropout(
rate=self._inner_dropout)
# TFLongformerOutput
# TFLongformerOutput.dense
self._output_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=(None, hidden_size),
bias_axes="d",
name="output",
kernel_initializer=self._kernel_initializer,
**common_kwargs)
# TFLongformerOutput.dropout
self._output_dropout = tf.keras.layers.Dropout(rate=self._output_dropout)
# Use float32 in layernorm for numeric stability.
# TFLongformerOutput.layernorm
self._output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32)
super().build(input_shape)
def get_config(self):
config = {
"num_attention_heads":
self._num_heads,
"inner_dim":
self._inner_dim,
"inner_activation":
self._inner_activation,
"output_dropout":
self._output_dropout_rate,
"attention_dropout":
self._attention_dropout_rate,
"output_range":
self._output_range,
"kernel_initializer":
tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer":
tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer":
tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer":
tf.keras.regularizers.serialize(self._bias_regularizer),
"activity_regularizer":
tf.keras.regularizers.serialize(self._activity_regularizer),
"kernel_constraint":
tf.keras.constraints.serialize(self._kernel_constraint),
"bias_constraint":
tf.keras.constraints.serialize(self._bias_constraint),
"use_bias":
self._use_bias,
"norm_first":
self._norm_first,
"norm_epsilon":
self._norm_epsilon,
"inner_dropout":
self._inner_dropout,
"attention_initializer":
tf.keras.initializers.serialize(self._attention_initializer),
"attention_axes":
self._attention_axes,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
"""Transformer self-attention encoder block call.
Args:
inputs: a single tensor or a list of tensors. `input tensor` as the single
sequence of embeddings. [`input tensor`, `attention mask`] to have the
additional attention mask. [`query tensor`, `key value tensor`,
`attention mask`] to have separate input streams for the query, and
key/value to the multi-head attention.
Returns:
An output tensor with the same dimensions as input/query tensor.
"""
if isinstance(inputs, (list, tuple)):
if len(inputs) == 4:
(
input_tensor,
attention_mask,
is_index_masked,
is_index_global_attn,
) = inputs
key_value = None
elif len(inputs) == 5:
assert False # No key_value
else:
raise ValueError(
f"Unexpected inputs to {self.__class__} with length at {len(inputs)}"
)
else:
input_tensor = inputs
attention_mask = None
is_index_masked = None
is_index_global_attn = None
key_value = None
if self._output_range:
if self._norm_first:
source_tensor = input_tensor[:, 0:self._output_range, :]
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm(key_value)
target_tensor = input_tensor[:, 0:self._output_range, :]
if attention_mask is not None:
attention_mask = attention_mask[:, 0:self._output_range, :]
if is_index_masked is not None:
is_index_masked = is_index_masked[:, 0:self._output_range]
if is_index_global_attn is not None:
is_index_global_attn = is_index_global_attn[:, 0:self._output_range]
else:
if self._norm_first:
source_tensor = input_tensor
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm(key_value)
target_tensor = input_tensor
if key_value is None:
key_value = input_tensor
attention_output = self._attention_layer(
hidden_states=target_tensor,
attention_mask=attention_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
)
# TFLongformerAttention.TFLongformerSelfOutput.* - {.dense}
attention_output = self._attention_dropout(attention_output)
if self._norm_first:
attention_output = source_tensor + attention_output
else:
attention_output = self._attention_layer_norm(target_tensor +
attention_output)
if self._norm_first:
source_attention_output = attention_output
attention_output = self._output_layer_norm(attention_output)
# TFLongformerIntermediate
inner_output = self._intermediate_dense(attention_output)
inner_output = self._intermediate_activation_layer(inner_output)
inner_output = self._inner_dropout_layer(inner_output)
# TFLongformerOutput
layer_output = self._output_dense(inner_output)
layer_output = self._output_dropout(layer_output)
if self._norm_first:
return source_attention_output + layer_output
# During mixed precision training, layer norm output is always fp32 for now.
# Casts fp32 for the subsequent add.
layer_output = tf.cast(layer_output, tf.float32)
return self._output_layer_norm(layer_output + attention_output)
| 13,814 | 39.513196 | 89 | py |
models | models-master/official/projects/longformer/longformer_attention_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.projects.longformer.longformer_attention."""
import numpy as np
import tensorflow as tf
from official.modeling.tf_utils import get_shape_list
from official.projects.longformer import longformer_attention
def _create_mock_attention_data(num_heads,
key_dim,
value_dim,
q_seq_length,
kv_seq_length,
batch_size,
include_mask=False):
"""Creates mock testing data.
Args:
num_heads: `int`, Number of attention heads.
key_dim: `int`, Size of query head.
value_dim: `int`, Size of key, value dim.
q_seq_length: `int`, query sequence length of the input.
kv_seq_length: `int`, key, value sequence length of the input.
batch_size: `int`, the batch size.
include_mask: optional `bool`, whether or not to include mask data.
Returns:
A dictionary with `str` as keys and `Tensor` as values.
"""
query_shape = (batch_size, q_seq_length, key_dim)
value_shape = (batch_size, kv_seq_length, value_dim)
data = dict(
query=tf.random.normal(shape=query_shape),
value=tf.random.normal(shape=value_shape),
key=tf.random.normal(shape=value_shape))
total_seq_length = kv_seq_length
if include_mask:
mask_shape = (batch_size, num_heads, q_seq_length, total_seq_length)
mask_data = np.random.randint(2, size=mask_shape).astype('float32')
mask_data = dict(attention_mask=mask_data)
data.update(mask_data)
return data
class LongformerAttentionTest(tf.test.TestCase):
def setUp(self):
super(LongformerAttentionTest, self).setUp()
np.random.seed(0)
tf.random.set_seed(0)
def _get_hidden_states(self):
return tf.convert_to_tensor(
[[
[
4.98332758e-01,
2.69175139e00,
-7.08081422e-03,
1.04915401e00,
-1.83476661e00,
7.67220476e-01,
2.98580543e-01,
2.84803992e-02,
],
[
-7.58357372e-01,
4.20635998e-01,
-4.04739919e-02,
1.59924145e-01,
2.05135748e00,
-1.15997978e00,
5.37166397e-01,
2.62873606e-01,
],
[
-1.69438001e00,
4.17574660e-01,
-1.49196962e00,
-1.76483717e00,
-1.94566312e-01,
-1.71183858e00,
7.72903565e-01,
-1.11557056e00,
],
[
5.44028163e-01,
2.05466114e-01,
-3.63045868e-01,
2.41865062e-01,
3.20348382e-01,
-9.05611176e-01,
-1.92690727e-01,
-1.19917547e00,
],
]],
dtype=tf.float32,
)
def test_diagonalize(self):
hidden_states = self._get_hidden_states()
hidden_states = tf.reshape(hidden_states,
(1, 8, 4)) # set seq length = 8, hidden dim = 4
chunked_hidden_states = longformer_attention.LongformerAttention._chunk(
hidden_states, window_overlap=2)
window_overlap_size = get_shape_list(chunked_hidden_states)[2]
self.assertEqual(window_overlap_size, 4)
padded_hidden_states = longformer_attention.LongformerAttention._pad_and_diagonalize(
chunked_hidden_states)
self.assertEqual(
get_shape_list(padded_hidden_states)[-1],
get_shape_list(chunked_hidden_states)[-1] + window_overlap_size - 1)
# first row => [0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000]
tf.debugging.assert_near(
padded_hidden_states[0, 0, 0, :4],
chunked_hidden_states[0, 0, 0],
rtol=1e-3)
tf.debugging.assert_near(
padded_hidden_states[0, 0, 0, 4:],
tf.zeros((3,), dtype=tf.dtypes.float32),
rtol=1e-3)
# last row => [0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629]
tf.debugging.assert_near(
padded_hidden_states[0, 0, -1, 3:],
chunked_hidden_states[0, 0, -1],
rtol=1e-3)
tf.debugging.assert_near(
padded_hidden_states[0, 0, -1, :3],
tf.zeros((3,), dtype=tf.dtypes.float32),
rtol=1e-3)
def test_pad_and_transpose_last_two_dims(self):
hidden_states = self._get_hidden_states()
self.assertTrue(get_shape_list(hidden_states), [1, 8, 4])
# pad along seq length dim
paddings = tf.constant([[0, 0], [0, 0], [0, 1], [0, 0]],
dtype=tf.dtypes.int32)
hidden_states = longformer_attention.LongformerAttention._chunk(
hidden_states, window_overlap=2)
padded_hidden_states = longformer_attention.LongformerAttention._pad_and_transpose_last_two_dims(
hidden_states, paddings)
self.assertEqual(get_shape_list(padded_hidden_states), [1, 1, 8, 5])
expected_added_dim = tf.zeros((5,), dtype=tf.dtypes.float32)
tf.debugging.assert_near(
expected_added_dim, padded_hidden_states[0, 0, -1, :], rtol=1e-6)
tf.debugging.assert_near(
hidden_states[0, 0, -1, :],
tf.reshape(padded_hidden_states, (1, -1))[0, 24:32],
rtol=1e-6)
def test_mask_invalid_locations(self):
hidden_states = self._get_hidden_states()
batch_size = 1
seq_length = 8
hidden_size = 4
hidden_states = tf.reshape(hidden_states,
(batch_size, seq_length, hidden_size))
hidden_states = longformer_attention.LongformerAttention._chunk(
hidden_states, window_overlap=2)
hid_states_1 = longformer_attention.LongformerAttention._mask_invalid_locations(
hidden_states, 1)
hid_states_2 = longformer_attention.LongformerAttention._mask_invalid_locations(
hidden_states, 2)
hid_states_3 = longformer_attention.LongformerAttention._mask_invalid_locations(
hidden_states[:, :, :, :3], 2)
hid_states_4 = longformer_attention.LongformerAttention._mask_invalid_locations(
hidden_states[:, :, 2:, :], 2)
self.assertEqual(
tf.math.reduce_sum(
tf.cast(tf.math.is_inf(hid_states_1), tf.dtypes.int32)), 8)
self.assertEqual(
tf.math.reduce_sum(
tf.cast(tf.math.is_inf(hid_states_2), tf.dtypes.int32)), 24)
self.assertEqual(
tf.math.reduce_sum(
tf.cast(tf.math.is_inf(hid_states_3), tf.dtypes.int32)), 24)
self.assertEqual(
tf.math.reduce_sum(
tf.cast(tf.math.is_inf(hid_states_4), tf.dtypes.int32)), 12)
def test_chunk(self):
hidden_states = self._get_hidden_states()
batch_size = 1
seq_length = 8
hidden_size = 4
hidden_states = tf.reshape(hidden_states,
(batch_size, seq_length, hidden_size))
chunked_hidden_states = longformer_attention.LongformerAttention._chunk(
hidden_states, window_overlap=2)
# expected slices across chunk and seq length dim
expected_slice_along_seq_length = tf.convert_to_tensor(
[0.4983, -0.7584, -1.6944], dtype=tf.dtypes.float32)
expected_slice_along_chunk = tf.convert_to_tensor(
[0.4983, -1.8348, -0.7584, 2.0514], dtype=tf.dtypes.float32)
self.assertEqual(get_shape_list(chunked_hidden_states), [1, 3, 4, 4])
tf.debugging.assert_near(
chunked_hidden_states[0, :, 0, 0],
expected_slice_along_seq_length,
rtol=1e-3)
tf.debugging.assert_near(
chunked_hidden_states[0, 0, :, 0],
expected_slice_along_chunk,
rtol=1e-3)
def test_layer_local_attn(self):
hidden_states = self._get_hidden_states()
batch_size, seq_length, _ = hidden_states.shape
layer = longformer_attention.LongformerAttention(
num_heads=2,
key_dim=4,
value_dim=4,
layer_id=0,
attention_window=4,
global_attention_size=0,
)
attention_mask = tf.zeros((batch_size, seq_length), dtype=tf.dtypes.float32)
is_index_global_attn = tf.math.greater(attention_mask, 1)
attention_mask = tf.where(
tf.range(4)[None, :, None, None] > 1, -10000.0,
attention_mask[:, :, None, None])
is_index_masked = tf.math.less(attention_mask[:, :, 0, 0], 0)
output_hidden_states = layer(
hidden_states=hidden_states,
attention_mask=attention_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
)[0]
self.assertTrue(output_hidden_states.shape, (1, 4, 8))
def test_layer_global_attn(self):
layer = longformer_attention.LongformerAttention(
num_heads=2,
key_dim=4,
value_dim=4,
layer_id=0,
attention_window=4,
global_attention_size=1,
)
hidden_states = self._get_hidden_states()
hidden_states = tf.concat(
[self._get_hidden_states(),
self._get_hidden_states() - 0.5], axis=0)
_, seq_length, _ = hidden_states.shape
# create attn mask
attention_mask_1 = tf.zeros((1, 1, 1, seq_length), dtype=tf.dtypes.float32)
attention_mask_2 = tf.zeros((1, 1, 1, seq_length), dtype=tf.dtypes.float32)
attention_mask_1 = tf.where(
tf.range(4)[None, :, None, None] == 0, 10000.0, attention_mask_1)
attention_mask_1 = tf.where(
tf.range(4)[None, :, None, None] > 2, -10000.0, attention_mask_1)
attention_mask_2 = tf.where(
tf.range(4)[None, :, None, None] == 0, 10000.0, attention_mask_2)
attention_mask = tf.concat([attention_mask_1, attention_mask_2], axis=0)
is_index_masked = tf.math.less(attention_mask[:, :, 0, 0], 0)
is_index_global_attn = tf.math.greater(attention_mask[:, :, 0, 0], 0)
output_hidden_states = layer(
hidden_states=hidden_states,
attention_mask=-tf.math.abs(attention_mask),
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
)[0]
self.assertTrue(output_hidden_states.shape, (2, 4, 8))
if __name__ == '__main__':
tf.test.main()
| 10,784 | 34.130293 | 101 | py |
models | models-master/official/projects/longformer/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A customized training library for the specific task."""
from absl import app
from absl import flags
import gin
from official.common import distribute_utils
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
from official.projects.longformer import longformer_experiments # pylint: disable=unused-import
FLAGS = flags.FLAGS
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu,
**params.runtime.model_parallelism())
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir)
train_utils.save_gin_config(FLAGS.mode, model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(main)
| 2,570 | 35.728571 | 96 | py |
models | models-master/official/projects/longformer/longformer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Longformer model configurations and instantiation methods."""
import dataclasses
from typing import List
import tensorflow as tf
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.projects.longformer.longformer_encoder import LongformerEncoder
@dataclasses.dataclass
class LongformerEncoderConfig(encoders.BertEncoderConfig):
"""Extra paramerters for Longformer configs.
Attributes:
attention_window: list of ints representing the window size for each layer.
global_attention_size: the size of global attention used for each token.
pad_token_id: the token id for the pad token
"""
attention_window: List[int] = dataclasses.field(default_factory=list)
global_attention_size: int = 0
pad_token_id: int = 1
@base_config.bind(LongformerEncoderConfig)
def get_encoder(encoder_cfg: LongformerEncoderConfig):
"""Gets a 'LongformerEncoder' object.
Args:
encoder_cfg: A 'LongformerEncoderConfig'.
Returns:
A encoder object.
"""
encoder = LongformerEncoder(
attention_window=encoder_cfg.attention_window,
global_attention_size=encoder_cfg.global_attention_size,
vocab_size=encoder_cfg.vocab_size,
hidden_size=encoder_cfg.hidden_size,
num_layers=encoder_cfg.num_layers,
num_attention_heads=encoder_cfg.num_attention_heads,
inner_dim=encoder_cfg.intermediate_size,
inner_activation=tf_utils.get_activation(encoder_cfg.hidden_activation),
output_dropout=encoder_cfg.dropout_rate,
attention_dropout=encoder_cfg.attention_dropout_rate,
max_sequence_length=encoder_cfg.max_position_embeddings,
type_vocab_size=encoder_cfg.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
output_range=encoder_cfg.output_range,
embedding_width=encoder_cfg.embedding_size,
norm_first=encoder_cfg.norm_first)
return encoder
| 2,615 | 36.371429 | 79 | py |
models | models-master/official/projects/longformer/utils/longformer_tokenizer_to_tfrecord.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Longformer training examples to Tfrecord."""
import collections
import os
import datasets
import tensorflow as tf
import transformers
pretrained_lm = "allenai/longformer-base-4096"
task_name = "mnli"
save_path = "./"
raw_datasets = datasets.load_dataset("glue", task_name, cache_dir=None)
label_list = raw_datasets["train"].features["label"].names
num_labels = len(label_list)
tokenizer = transformers.AutoTokenizer.from_pretrained(
pretrained_lm,
use_fast=True,
)
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
sentence1_key, sentence2_key = task_to_keys[task_name]
padding = "max_length"
# make sure this is the same with model input size.
max_seq_length = 512
def preprocess_function(examples):
# Tokenize the texts
args = ((examples[sentence1_key],) if sentence2_key is None else
(examples[sentence1_key], examples[sentence2_key]))
result = tokenizer(
*args, padding=padding, max_length=max_seq_length, truncation=True)
return result
raw_datasets = raw_datasets.map(
preprocess_function,
batched=True,
desc="Running tokenizer on dataset",
)
train_dataset = raw_datasets["train"]
eval_dataset = raw_datasets["validation_matched" if task_name ==
"mnli" else "validation"]
print("train_dataset", train_dataset[0])
print("eval_dataset", eval_dataset[0])
def file_based_convert_examples_to_features(examples, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
tf.io.gfile.makedirs(os.path.dirname(output_file))
writer = tf.io.TFRecordWriter(output_file)
for ex_index, example in enumerate(examples):
if ex_index % 10000 == 0:
print(f"Writing example {ex_index} of {len(examples)}")
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(example["input_ids"])
features["input_mask"] = create_int_feature(example["attention_mask"])
features["segment_ids"] = create_int_feature([0] *
len(example["attention_mask"]))
features["label_ids"] = create_int_feature([example["label"]])
features["is_real_example"] = create_int_feature([1])
features["example_id"] = create_int_feature([example["idx"]])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
file_based_convert_examples_to_features(
train_dataset,
os.path.join(save_path,
f"{pretrained_lm.replace('/', '_')}_train.tf_record"))
file_based_convert_examples_to_features(
eval_dataset,
os.path.join(save_path,
f"{pretrained_lm.replace('/', '_')}_eval.tf_record"))
| 3,750 | 32.19469 | 80 | py |
models | models-master/official/projects/longformer/utils/convert_pretrained_pytorch_checkpoint_to_tf.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts pre-trained pytorch checkpoint into a tf encoder checkpoint."""
import os
from absl import app
import numpy as np
import tensorflow as tf
import transformers
from official.modeling import tf_utils
from official.projects.longformer.longformer import LongformerEncoderConfig
from official.projects.longformer.longformer_encoder import LongformerEncoder
def _get_pytorch_longformer_model():
pretrained_lm = "allenai/longformer-base-4096"
model = transformers.AutoModel.from_pretrained(pretrained_lm)
return {n: p.data.numpy() for n, p in model.named_parameters()}
def _create_longformer_model():
"""Creates a Longformer model."""
encoder_cfg = LongformerEncoderConfig
encoder_cfg.vocab_size = 50265
encoder_cfg.max_position_embeddings = 4098
encoder_cfg.attention_window = [2] * encoder_cfg.num_layers
encoder_cfg.global_attention_size = 1
encoder = LongformerEncoder(
attention_window=encoder_cfg.attention_window,
global_attention_size=encoder_cfg.global_attention_size,
vocab_size=encoder_cfg.vocab_size,
hidden_size=encoder_cfg.hidden_size,
num_layers=encoder_cfg.num_layers,
num_attention_heads=encoder_cfg.num_attention_heads,
inner_dim=encoder_cfg.intermediate_size,
inner_activation=tf_utils.get_activation(encoder_cfg.hidden_activation),
output_dropout=encoder_cfg.dropout_rate,
attention_dropout=encoder_cfg.attention_dropout_rate,
max_sequence_length=encoder_cfg.max_position_embeddings,
type_vocab_size=encoder_cfg.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
output_range=encoder_cfg.output_range,
embedding_width=encoder_cfg.embedding_size,
norm_first=encoder_cfg.norm_first)
return encoder
# pylint: disable=protected-access
def convert(encoder, allenai_model):
"""Convert AllenAI Longformer to the one in the codebase."""
num_layers = encoder._config["num_layers"]
num_attention_heads = encoder._config["num_attention_heads"]
hidden_size = encoder._config["hidden_size"]
head_size = hidden_size // num_attention_heads
assert head_size * num_attention_heads == hidden_size
encoder._embedding_layer.set_weights(
[allenai_model["embeddings.word_embeddings.weight"]])
encoder._embedding_norm_layer.set_weights([
allenai_model["embeddings.LayerNorm.weight"],
allenai_model["embeddings.LayerNorm.bias"]
])
encoder._type_embedding_layer.set_weights([
np.repeat(
allenai_model["embeddings.token_type_embeddings.weight"], 2, axis=0)
])
encoder._position_embedding_layer.set_weights(
[allenai_model["embeddings.position_embeddings.weight"]])
encoder._pooler_layer.set_weights([
allenai_model["pooler.dense.weight"], allenai_model["pooler.dense.bias"]
])
for layer_num in range(num_layers):
encoder._transformer_layers[
layer_num]._attention_layer._global_key_dense.set_weights([
allenai_model[
f"encoder.layer.{layer_num}.attention.self.key_global.weight"].T
.reshape(
(hidden_size, num_attention_heads, head_size)), allenai_model[
f"encoder.layer.{layer_num}.attention.self.key_global.bias"]
.reshape((num_attention_heads, head_size))
])
encoder._transformer_layers[
layer_num]._attention_layer._global_query_dense.set_weights([
allenai_model[
f"encoder.layer.{layer_num}.attention.self.query_global.weight"]
.T.reshape((hidden_size, num_attention_heads, head_size)),
allenai_model[
f"encoder.layer.{layer_num}.attention.self.query_global.bias"]
.reshape((num_attention_heads, head_size))
])
encoder._transformer_layers[
layer_num]._attention_layer._global_value_dense.set_weights([
allenai_model[
f"encoder.layer.{layer_num}.attention.self.value_global.weight"]
.T.reshape((hidden_size, num_attention_heads, head_size)),
allenai_model[
f"encoder.layer.{layer_num}.attention.self.value_global.bias"]
.reshape((num_attention_heads, head_size))
])
encoder._transformer_layers[
layer_num]._attention_layer._key_dense.set_weights([
allenai_model[
f"encoder.layer.{layer_num}.attention.self.key.weight"].T
.reshape(
(hidden_size, num_attention_heads, head_size)), allenai_model[
f"encoder.layer.{layer_num}.attention.self.key_global.bias"]
.reshape((num_attention_heads, head_size))
])
encoder._transformer_layers[
layer_num]._attention_layer._query_dense.set_weights([
allenai_model[
f"encoder.layer.{layer_num}.attention.self.query.weight"].T
.reshape((hidden_size, num_attention_heads, head_size)),
allenai_model[
f"encoder.layer.{layer_num}.attention.self.query.bias"].reshape(
(num_attention_heads, head_size))
])
encoder._transformer_layers[
layer_num]._attention_layer._value_dense.set_weights([
allenai_model[
f"encoder.layer.{layer_num}.attention.self.value.weight"].T
.reshape((hidden_size, num_attention_heads, head_size)),
allenai_model[
f"encoder.layer.{layer_num}.attention.self.value.bias"].reshape(
(num_attention_heads, head_size))
])
encoder._transformer_layers[
layer_num]._attention_layer._output_dense.set_weights([
allenai_model[
f"encoder.layer.{layer_num}.attention.output.dense.weight"].T,
allenai_model[
f"encoder.layer.{layer_num}.attention.output.dense.bias"]
])
encoder._transformer_layers[layer_num]._attention_layer_norm.set_weights([
allenai_model[
f"encoder.layer.{layer_num}.attention.output.LayerNorm.weight"],
allenai_model[
f"encoder.layer.{layer_num}.attention.output.LayerNorm.bias"]
])
encoder._transformer_layers[layer_num]._intermediate_dense.set_weights([
allenai_model[f"encoder.layer.{layer_num}.intermediate.dense.weight"].T,
allenai_model[f"encoder.layer.{layer_num}.intermediate.dense.bias"]
])
encoder._transformer_layers[layer_num]._output_dense.set_weights([
allenai_model[f"encoder.layer.{layer_num}.output.dense.weight"].T,
allenai_model[f"encoder.layer.{layer_num}.output.dense.bias"]
])
encoder._transformer_layers[layer_num]._output_layer_norm.set_weights([
allenai_model[f"encoder.layer.{layer_num}.output.LayerNorm.weight"],
allenai_model[f"encoder.layer.{layer_num}.output.LayerNorm.bias"]
])
def convert_checkpoint(output_path):
"""Converts and save the checkpoint."""
output_dir, _ = os.path.split(output_path)
tf.io.gfile.makedirs(output_dir)
encoder = _create_longformer_model()
allenai_model = _get_pytorch_longformer_model()
sequence_length = 128
batch_size = 2
word_id_data = np.random.randint(
10, size=(batch_size, sequence_length), dtype=np.int32)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length), dtype=np.int32)
type_id_data = np.random.randint(
2, size=(batch_size, sequence_length), dtype=np.int32)
inputs = {
"input_word_ids": word_id_data,
"input_mask": mask_data,
"input_type_ids": type_id_data,
}
encoder(inputs)
convert(encoder, allenai_model)
tf.train.Checkpoint(encoder=encoder).write(output_path)
def main(_):
convert_checkpoint("longformer-4096/longformer")
if __name__ == "__main__":
app.run(main)
| 8,403 | 40.810945 | 80 | py |
models | models-master/official/projects/qat/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/qat/nlp/pretrained_checkpoint_converter.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A converter for BERT pretrained checkpoint to QAT BERT checkpoint."""
import tempfile
# Import libraries
from absl import app
from absl import flags
import tensorflow as tf
from official.common import registry_imports # pylint: disable=unused-import
from official.core import exp_factory
from official.core import task_factory
from official.modeling import hyperparams
from official.projects.qat.nlp import registry_imports as qat_registry_imports # pylint: disable=unused-import
FLAGS = flags.FLAGS
_EXPERIMENT = flags.DEFINE_string(
'experiment', default=None,
help='The experiment type registered for the pretrained model.')
_CONFIG_FILE = flags.DEFINE_multi_string(
'config_file',
default=None,
help='YAML/JSON files which specifies overrides. The override order '
'follows the order of args. Note that each file '
'can be used as an override template to override the default parameters '
'specified in Python. If the same parameter is specified in both '
'`--config_file` and `--params_override`, `config_file` will be used '
'first, followed by params_override.')
_PARAMS_OVERRIDE = flags.DEFINE_string(
'params_override',
default=None,
help='a YAML/JSON string or a YAML file which specifies additional '
'overrides over the default parameters and those specified in '
'`--config_file`. Note that this is supposed to be used only to override '
'the model parameters, but not the parameters like TPU specific flags. '
'One canonical use case of `--config_file` and `--params_override` is '
'users first define a template config file using `--config_file`, then '
'use `--params_override` to adjust the minimal set of tuning parameters, '
'for example setting up different `train_batch_size`. The final override '
'order of parameters: default_model_params --> params from config_file '
'--> params in params_override. See also the help message of '
'`--config_file`.')
_PRETRAINED_CHECKPOINT = flags.DEFINE_string(
'pretrained_checkpoint',
default=None,
help='The path of pretrained checkpoint for the original bert model.')
_EXPERIEMNT_QAT = flags.DEFINE_string(
'experiment_qat', default=None,
help='The experiment type registered for the pretrained model.')
_CONFIG_FILE_QAT = flags.DEFINE_multi_string(
'config_file_qat',
default=None,
help='config_file flag for the qat model.')
_PARAMS_OVERRIDE_QAT = flags.DEFINE_string(
'params_override_qat',
default=None,
help='params_override flag for the qat model.')
_OUTPUT_CHECKPOINT = flags.DEFINE_string(
'output_checkpoint',
default=None,
help='The output checkpoint path for QAT applied BERT model.')
def _build_model(experiment, config_file, params_override):
"""Build the model."""
params = exp_factory.get_exp_config(experiment)
for config_file in config_file or []:
params = hyperparams.override_params_dict(
params, config_file, is_strict=True)
if params_override:
params = hyperparams.override_params_dict(
params, params_override, is_strict=True)
task = task_factory.get_task(params.task, logging_dir=tempfile.mkdtemp())
return task.build_model()
def _set_weights_to_qat(model_from, model_to):
"""Set pretrained weight to QAT applied model."""
name_to_index = {}
for index, weight in enumerate(model_to.weights):
origin_name = weight.name.replace('quant_', '').replace(
'mobile_bert_embedding_1', 'mobile_bert_embedding')
name_to_index[origin_name] = index
model_to_weights = model_to.get_weights()
for weight, value in zip(model_from.weights, model_from.get_weights()):
index = name_to_index[weight.name]
model_to_weights[index] = value
model_to.set_weights(model_to_weights)
def main(_):
model = _build_model(
_EXPERIMENT.value, _CONFIG_FILE.value, _PARAMS_OVERRIDE.value)
if _PRETRAINED_CHECKPOINT.value is not None:
ckpt = tf.train.Checkpoint(model=model)
status = ckpt.restore(_PRETRAINED_CHECKPOINT.value)
status.expect_partial().assert_existing_objects_matched()
model_qat = _build_model(
_EXPERIEMNT_QAT.value, _CONFIG_FILE_QAT.value, _PARAMS_OVERRIDE_QAT.value)
_set_weights_to_qat(model, model_qat)
if hasattr(model_qat, 'checkpoint_items'):
checkpoint_items = model_qat.checkpoint_items
else:
checkpoint_items = {}
ckpt_qat = tf.train.Checkpoint(
model=model_qat,
**checkpoint_items)
ckpt_qat.save(_OUTPUT_CHECKPOINT.value)
if __name__ == '__main__':
app.run(main)
| 5,163 | 35.624113 | 111 | py |
models | models-master/official/projects/qat/nlp/registry_imports.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All necessary imports for registration on qat project."""
# pylint: disable=unused-import
from official.projects.qat.nlp import configs
from official.projects.qat.nlp.modeling.layers import mobile_bert_layers
from official.projects.qat.nlp.modeling.layers import multi_head_attention
from official.projects.qat.nlp.modeling.layers import transformer_encoder_block
from official.projects.qat.nlp.tasks import question_answering
| 1,039 | 46.272727 | 79 | py |
models | models-master/official/projects/qat/nlp/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/qat/nlp/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFM common training driver."""
from absl import app
from absl import flags
import gin
from official.common import distribute_utils
from official.common import registry_imports # pylint: disable=unused-import
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
from official.projects.qat.nlp import registry_imports as qat_registry_imports # pylint: disable=unused-import
FLAGS = flags.FLAGS
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu,
**params.runtime.model_parallelism())
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir)
train_utils.save_gin_config(FLAGS.mode, model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(main)
| 2,636 | 37.217391 | 111 | py |
models | models-master/official/projects/qat/nlp/configs/finetuning_experiments.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fine-tuning configuration definition."""
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.nlp.configs import finetuning_experiments
from official.projects.qat.nlp.tasks import question_answering
@exp_factory.register_config_factory('bert/squad_qat')
def bert_squad() -> cfg.ExperimentConfig:
"""BERT Squad V1/V2 with QAT."""
config = finetuning_experiments.bert_squad()
task = question_answering.QuantizedModelQAConfig.from_args(
**config.task.as_dict())
# Copy QADataConfig objects.
task.train_data = config.task.train_data
task.validation_data = config.task.validation_data
config.task = task
return config
| 1,303 | 35.222222 | 74 | py |
models | models-master/official/projects/qat/nlp/configs/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configs package definition."""
from official.projects.qat.nlp.configs import finetuning_experiments
| 713 | 38.666667 | 74 | py |
models | models-master/official/projects/qat/nlp/quantization/schemes.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantization schemes."""
# Import libraries
import numpy as np
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.modeling import tf_utils
from official.projects.qat.nlp.modeling.layers import mobile_bert_layers
from official.projects.qat.nlp.modeling.layers import transformer_encoder_block
from official.projects.qat.nlp.quantization import configs
keras = tf.keras
default_8bit_transforms = tfmot.quantization.keras.default_8bit.default_8bit_transforms
LayerNode = tfmot.quantization.keras.graph_transformations.transforms.LayerNode
LayerPattern = tfmot.quantization.keras.graph_transformations.transforms.LayerPattern
class TransformerEncoderBlockQuantize(
tfmot.quantization.keras.graph_transformations.transforms.Transform):
"""Add QAT support for Keras Custom layer."""
_QUANTIZATION_AWARE_TRAINING_WEIGHT_NAMES = frozenset({
'optimizer_step',
'output_max', 'output_min',
'kernel_min', 'kernel_max',
'depthwise_kernel_min', 'depthwise_kernel_max',
'query_min', 'query_max',
'attention_scores_min', 'attention_scores_max',
'attention_output_min', 'attention_output_max',
'masked_softmax_attention_mask_min',
'masked_softmax_attention_mask_max',
'masked_softmax_sub1_min', 'masked_softmax_sub1_max',
'masked_softmax_mask1_min', 'masked_softmax_mask1_max',
'masked_softmax_sub2_min', 'masked_softmax_sub2_max',
'masked_softmax_clamp_min', 'masked_softmax_clamp_max',
'masked_softmax_mask2_min', 'masked_softmax_mask2_max',
'masked_softmax_adder_sub_min', 'masked_softmax_adder_sub_max',
'masked_softmax_adder_mul_min', 'masked_softmax_adder_mul_max',
'masked_softmax_add_min', 'masked_softmax_add_max',
'post_activation_min', 'post_activation_max',
'word_embedding_out_min', 'word_embedding_out_max',
'pos_embedding_out_min', 'pos_embedding_out_max',
'type_embedding_out_min', 'type_embedding_out_max',
'bias_min', 'bias_max'
})
_SUPPOTRED_MODEL_WEIGHT_NAMES = frozenset({
'kernel', 'depthwise_kernel', 'bias',
'gamma', 'beta', 'moving_mean', 'moving_variance',
'embeddings'
})
def __init__(self):
super().__init__()
self._original_layer_pattern = 'modeling>TransformerEncoderBlock'
self._quantized_layer_class = transformer_encoder_block.TransformerEncoderBlockQuantized
def pattern(self) -> LayerPattern:
"""See base class."""
return LayerPattern(self._original_layer_pattern)
def _is_quantization_weight_name(self, name):
simple_name = name.split('/')[-1].split(':')[0]
if simple_name in self._QUANTIZATION_AWARE_TRAINING_WEIGHT_NAMES:
return True
if simple_name in self._SUPPOTRED_MODEL_WEIGHT_NAMES:
return False
raise ValueError('Variable name {} is not supported on '
'CustomLayerQuantize({}) transform.'.format(
simple_name,
self._original_layer_pattern))
def replacement(self, match_layer: LayerNode) -> LayerNode:
"""See base class."""
bottleneck_layer = match_layer.layer
bottleneck_config = bottleneck_layer['config']
bottleneck_names_and_weights = list(match_layer.names_and_weights)
quantized_layer = self._quantized_layer_class(
**bottleneck_config)
quantized_layer_config = quantized_layer.get_config()
if 'hidden_size' in quantized_layer_config:
dummy_input_shape = [
1, 1, quantized_layer_config['hidden_size']]
quantized_layer.compute_output_shape(dummy_input_shape)
elif 'num_attention_heads' in quantized_layer_config:
dummy_input_shape = [
1, 1, quantized_layer_config['num_attention_heads']]
quantized_layer.compute_output_shape(dummy_input_shape)
else:
dummy_input_shape = [1, 1]
quantized_layer(np.zeros(shape=dummy_input_shape, dtype=np.int32),
np.zeros(shape=dummy_input_shape, dtype=np.int32),
training=False)
quantized_names_and_weights = zip(
[weight.name for weight in quantized_layer.weights],
quantized_layer.get_weights())
match_idx = 0
names_and_weights = []
for name_and_weight in quantized_names_and_weights:
if not self._is_quantization_weight_name(name=name_and_weight[0]):
name_and_weight = bottleneck_names_and_weights[match_idx]
match_idx = match_idx + 1
names_and_weights.append(name_and_weight)
if match_idx != len(bottleneck_names_and_weights):
raise ValueError('{}/{} of Bottleneck weights is transformed.'.format(
match_idx, len(bottleneck_names_and_weights)))
quantized_layer_config = tf_utils.serialize_layer(
quantized_layer, use_legacy_format=True
)
quantized_layer_config['name'] = quantized_layer_config['config']['name']
layer_metadata = {
'quantize_config':
configs.NoQuantizeConfig()}
return LayerNode(
quantized_layer_config,
metadata=layer_metadata,
names_and_weights=names_and_weights)
class MobileBertTransformerQuantize(TransformerEncoderBlockQuantize):
def __init__(self):
super().__init__()
self._original_layer_pattern = 'Text>MobileBertTransformer'
self._quantized_layer_class = mobile_bert_layers.MobileBertTransformerQuantized
class MobileBertEmbeddingQuantize(TransformerEncoderBlockQuantize):
def __init__(self):
super().__init__()
self._original_layer_pattern = 'Text>MobileBertEmbedding'
self._quantized_layer_class = mobile_bert_layers.MobileBertEmbeddingQuantized
class QuantizeLayoutTransform(
tfmot.quantization.keras.QuantizeLayoutTransform):
"""Default model transformations."""
def apply(self, model, layer_quantize_map):
"""Implement default 8-bit transforms.
Currently this means the following.
1. Pull activations into layers, and apply fuse activations. (TODO)
2. Modify range in incoming layers for Concat. (TODO)
3. Fuse Conv2D/DepthwiseConv2D + BN into single layer.
Args:
model: Keras model to be quantized.
layer_quantize_map: Map with keys as layer names, and values as dicts
containing custom `QuantizeConfig`s which may have been passed with
layers.
Returns:
(Transformed Keras model to better match TensorFlow Lite backend, updated
layer quantize map.)
"""
transforms = [
default_8bit_transforms.SeparableConv1DQuantize(),
default_8bit_transforms.SeparableConvQuantize(),
default_8bit_transforms.Conv2DReshapeBatchNormReLUQuantize(),
default_8bit_transforms.Conv2DReshapeBatchNormActivationQuantize(),
default_8bit_transforms.Conv2DBatchNormReLUQuantize(),
default_8bit_transforms.Conv2DBatchNormActivationQuantize(),
default_8bit_transforms.Conv2DReshapeBatchNormQuantize(),
default_8bit_transforms.Conv2DBatchNormQuantize(),
default_8bit_transforms.ConcatTransform6Inputs(),
default_8bit_transforms.ConcatTransform5Inputs(),
default_8bit_transforms.ConcatTransform4Inputs(),
default_8bit_transforms.ConcatTransform3Inputs(),
default_8bit_transforms.ConcatTransform(),
default_8bit_transforms.LayerReLUQuantize(),
default_8bit_transforms.LayerReluActivationQuantize(),
TransformerEncoderBlockQuantize(),
MobileBertTransformerQuantize(),
MobileBertEmbeddingQuantize(),
]
return tfmot.quantization.keras.graph_transformations.model_transformer.ModelTransformer(
model, transforms,
set(layer_quantize_map.keys()), layer_quantize_map).transform()
class Default8BitQuantizeScheme(
tfmot.quantization.keras.default_8bit.Default8BitQuantizeScheme):
def get_layout_transformer(self):
return QuantizeLayoutTransform()
| 8,465 | 39.507177 | 93 | py |
models | models-master/official/projects/qat/nlp/quantization/helper.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantization helpers."""
import tensorflow_model_optimization as tfmot
class LayerQuantizerHelper(object):
"""Helper class that handles quantizers."""
def __init__(self, *args, **kwargs):
self._quantizers = {}
self._quantizer_vars = {}
super().__init__(*args, **kwargs)
def _all_value_quantizer(self):
return tfmot.quantization.keras.quantizers.AllValuesQuantizer(
num_bits=8, per_axis=False, symmetric=False, narrow_range=False)
def _moving_average_quantizer(self):
return tfmot.quantization.keras.quantizers.MovingAverageQuantizer(
num_bits=8, per_axis=False, symmetric=False, narrow_range=False)
def _add_quantizer(self, name, all_value_quantizer=False):
if all_value_quantizer:
self._quantizers[name] = self._all_value_quantizer()
else:
self._quantizers[name] = self._moving_average_quantizer()
def _apply_quantizer(self, name, inputs, training, **kwargs):
return self._quantizers[name](
inputs, training, self._quantizer_vars[name], **kwargs)
def _build_quantizer_vars(self):
for name in self._quantizers:
self._quantizer_vars[name] = self._quantizers[name].build(
tensor_shape=None, name=name, layer=self)
| 1,835 | 35.72 | 74 | py |
models | models-master/official/projects/qat/nlp/quantization/wrappers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantization Wrappers."""
import tensorflow_model_optimization as tfmot
class MultiHeadAttentionQuantizeWrapper(
tfmot.quantization.keras.QuantizeWrapperV2):
"""Custom quantize wrapper for the MultiHeadAttention layer."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._first_call_built = False
def build(self, input_shape):
self.layer.build(input_shape)
def call(self,
query,
value,
key=None,
attention_mask=None,
return_attention_scores=False,
training=None):
if not self._first_call_built:
# pylint: disable=protected-access
self.layer._build_from_signature(query=query, value=value, key=key)
# pylint: enable=protected-access
self.layer.call(
query, value, key=key, attention_mask=attention_mask,
return_attention_scores=return_attention_scores,
training=training)
super().build(input_shape=None)
self._first_call_built = True
return super().call(
query, value=value, key=key, attention_mask=attention_mask,
return_attention_scores=return_attention_scores,
training=training
)
| 1,820 | 33.358491 | 74 | py |
models | models-master/official/projects/qat/nlp/quantization/configs_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for configs.py."""
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.modeling import tf_utils
from official.projects.qat.nlp.quantization import configs
class _TestHelper(object):
def _convert_list(self, list_of_tuples):
"""Transforms a list of 2-tuples to a tuple of 2 lists.
`QuantizeConfig` methods return a list of 2-tuples in the form
[(weight1, quantizer1), (weight2, quantizer2)]. This function converts
it into a 2-tuple of lists. ([weight1, weight2]), (quantizer1, quantizer2).
Args:
list_of_tuples: List of 2-tuples.
Returns:
2-tuple of lists.
"""
list1 = []
list2 = []
for a, b in list_of_tuples:
list1.append(a)
list2.append(b)
return list1, list2
# TODO(pulkitb): Consider asserting on full equality for quantizers.
def _assert_weight_quantizers(self, quantizer_list):
for quantizer in quantizer_list:
self.assertIsInstance(
quantizer,
tfmot.quantization.keras.quantizers.LastValueQuantizer)
def _assert_activation_quantizers(self, quantizer_list):
for quantizer in quantizer_list:
self.assertIsInstance(
quantizer,
tfmot.quantization.keras.quantizers.MovingAverageQuantizer)
def _assert_kernel_equality(self, a, b):
self.assertAllEqual(a.numpy(), b.numpy())
class Default8BitQuantizeConfigTest(tf.test.TestCase, _TestHelper):
def _simple_dense_layer(self):
layer = tf.keras.layers.Dense(2)
layer.build(input_shape=(3,))
return layer
def testGetsQuantizeWeightsAndQuantizers(self):
layer = self._simple_dense_layer()
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
(weights, weight_quantizers) = self._convert_list(
quantize_config.get_weights_and_quantizers(layer))
self._assert_weight_quantizers(weight_quantizers)
self.assertEqual([layer.kernel], weights)
def testGetsQuantizeActivationsAndQuantizers(self):
layer = self._simple_dense_layer()
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
(activations, activation_quantizers) = self._convert_list(
quantize_config.get_activations_and_quantizers(layer))
self._assert_activation_quantizers(activation_quantizers)
self.assertEqual([layer.activation], activations)
def testSetsQuantizeWeights(self):
layer = self._simple_dense_layer()
quantize_kernel = tf.keras.backend.variable(
np.ones(layer.kernel.shape.as_list()))
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
quantize_config.set_quantize_weights(layer, [quantize_kernel])
self._assert_kernel_equality(layer.kernel, quantize_kernel)
def testSetsQuantizeActivations(self):
layer = self._simple_dense_layer()
quantize_activation = tf.keras.activations.relu
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
quantize_config.set_quantize_activations(layer, [quantize_activation])
self.assertEqual(layer.activation, quantize_activation)
def testSetsQuantizeWeights_ErrorOnWrongNumberOfWeights(self):
layer = self._simple_dense_layer()
quantize_kernel = tf.keras.backend.variable(
np.ones(layer.kernel.shape.as_list()))
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
with self.assertRaises(ValueError):
quantize_config.set_quantize_weights(layer, [])
with self.assertRaises(ValueError):
quantize_config.set_quantize_weights(layer,
[quantize_kernel, quantize_kernel])
def testSetsQuantizeWeights_ErrorOnWrongShapeOfWeight(self):
layer = self._simple_dense_layer()
quantize_kernel = tf.keras.backend.variable(np.ones([1, 2]))
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
with self.assertRaises(ValueError):
quantize_config.set_quantize_weights(layer, [quantize_kernel])
def testSetsQuantizeActivations_ErrorOnWrongNumberOfActivations(self):
layer = self._simple_dense_layer()
quantize_activation = tf.keras.activations.relu
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
with self.assertRaises(ValueError):
quantize_config.set_quantize_activations(layer, [])
with self.assertRaises(ValueError):
quantize_config.set_quantize_activations(
layer, [quantize_activation, quantize_activation])
def testGetsResultQuantizers_ReturnsQuantizer(self):
layer = self._simple_dense_layer()
quantize_config = configs.Default8BitQuantizeConfig(
[], [], True)
output_quantizers = quantize_config.get_output_quantizers(layer)
self.assertLen(output_quantizers, 1)
self._assert_activation_quantizers(output_quantizers)
def testGetsResultQuantizers_EmptyWhenFalse(self):
layer = self._simple_dense_layer()
quantize_config = configs.Default8BitQuantizeConfig(
[], [], False)
output_quantizers = quantize_config.get_output_quantizers(layer)
self.assertEqual([], output_quantizers)
def testSerialization(self):
quantize_config = configs.Default8BitQuantizeConfig(
['kernel'], ['activation'], False)
expected_config = {
'class_name': 'Default8BitQuantizeConfig',
'config': {
'weight_attrs': ['kernel'],
'activation_attrs': ['activation'],
'quantize_output': False
}
}
serialized_quantize_config = tf_utils.serialize_keras_object(
quantize_config
)
self.assertEqual(expected_config, serialized_quantize_config)
quantize_config_from_config = (
tf_utils.deserialize_keras_object(
serialized_quantize_config,
module_objects=globals(),
custom_objects=configs._types_dict(),
)
)
self.assertEqual(quantize_config, quantize_config_from_config)
@parameterized.parameters(
configs.LastValueQuantizer,
configs.MovingAverageQuantizer,
configs.NoQuantizer)
class QuantizersTest(tf.test.TestCase, parameterized.TestCase):
def _simple_dense_layer(self):
layer = tf.keras.layers.Dense(2)
layer.build(input_shape=(3,))
return layer
def _get_quant_params(self, quantizer_type):
if quantizer_type == configs.NoQuantizer:
return {}
return {
'num_bits': 8,
'per_axis': False,
'symmetric': False,
'narrow_range': False
}
def _test_quantizer(self, quantizer):
inputs = tf.Variable(
np.array([[-1.0, 0.5], [0.0, 1.0]]),
name='inputs',
dtype=tf.dtypes.float32)
min_var = tf.Variable(0.0)
max_var = tf.Variable(0.0)
weights = {'min_var': min_var, 'max_var': max_var}
quant_tensor = quantizer(inputs, training=True, weights=weights)
results = self.evaluate(quant_tensor)
min_max_values = self.evaluate([min_var, max_var])
# TODO(pulkitb): Assert on expected values for testing.
# Since the underlying code is already tested in quant_ops_test.py, this
# just ensures the Quantizers code is wired properly.
print('Result: ', results)
print('min_var: ', min_max_values[0])
print('max_var: ', min_max_values[1])
layer = self._simple_dense_layer()
weights = quantizer.build(tf.TensorShape([1, 1, 1]), 'test', layer)
if isinstance(quantizer, (
configs.LastValueQuantizer, configs.MovingAverageQuantizer)):
self.assertLen(weights, 2)
self.assertFalse(weights['min_var'].trainable)
self.assertFalse(weights['max_var'].trainable)
elif isinstance(quantizer, configs.NoQuantizer):
self.assertEmpty(weights)
def testQuantizer(self, quantizer_type):
quantizer = quantizer_type(**self._get_quant_params(quantizer_type))
self._test_quantizer(quantizer)
def testSerialization(self, quantizer_type):
quantizer = quantizer_type(**self._get_quant_params(quantizer_type))
expected_config = {
'class_name': quantizer_type.__name__,
'config': self._get_quant_params(quantizer_type),
}
serialized_quantizer = tf_utils.serialize_keras_object(
quantizer
)
self.assertEqual(expected_config, serialized_quantizer)
quantizer_from_config = tf_utils.deserialize_keras_object(
serialized_quantizer,
module_objects=globals(),
custom_objects=configs._types_dict(),
)
self.assertEqual(quantizer, quantizer_from_config)
if __name__ == '__main__':
tf.test.main()
| 9,371 | 31.541667 | 79 | py |
models | models-master/official/projects/qat/nlp/quantization/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/qat/nlp/quantization/configs.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom quantize configs."""
from typing import Sequence, Callable, Tuple, Any, Dict
import tensorflow as tf
import tensorflow_model_optimization as tfmot
Quantizer = tfmot.quantization.keras.quantizers.Quantizer
Layer = tf.keras.layers.Layer
Activation = Callable[[tf.Tensor], tf.Tensor]
WeightAndQuantizer = Tuple[tf.Variable, Quantizer]
ActivationAndQuantizer = Tuple[Activation, Quantizer]
class _QuantizeHelper(object):
"""Mixin with helper functions for quantizers."""
def _add_range_weights(self, layer, name, per_axis=False, tensor_shape=None):
"""Add min and max vars to layer."""
# Added naming index to avoid duplicated.
if hasattr(layer, 'quantize_helper_weight_idx'):
layer.quantize_helper_weight_idx += 1
name = '{}/{}'.format(layer.quantize_helper_weight_idx, name)
else:
layer.quantize_helper_weight_idx = 0
shape = None
if per_axis and tensor_shape is not None:
shape = (tensor_shape[-1])
min_weight = layer.add_weight(
name + '_min',
initializer=tf.keras.initializers.Constant(-6.0),
trainable=False,
shape=shape)
max_weight = layer.add_weight(
name + '_max',
initializer=tf.keras.initializers.Constant(6.0),
trainable=False,
shape=shape)
return {'min_var': min_weight, 'max_var': max_weight}
class LastValueQuantizer(
_QuantizeHelper,
tfmot.quantization.keras.quantizers.LastValueQuantizer):
pass
class MovingAverageQuantizer(
_QuantizeHelper,
tfmot.quantization.keras.quantizers.MovingAverageQuantizer):
pass
class NoQuantizer(tfmot.quantization.keras.quantizers.Quantizer):
"""Dummy quantizer do nothing."""
def __call__(self, inputs, training, weights, **kwargs):
return tf.identity(inputs)
def get_config(self):
return {}
def build(self, tensor_shape, name, layer):
return {}
def __eq__(self, other):
if not isinstance(other, NoQuantizer):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
class DefaultEinsumDenseQuantizeConfig(tfmot.quantization.keras.QuantizeConfig):
"""QuantizeConfig for EinsumDense layer."""
# Configure how to quantize weights.
def get_weights_and_quantizers(self, layer):
return [(layer.kernel, LastValueQuantizer(
num_bits=8, symmetric=True, narrow_range=False, per_axis=False))]
# Configure how to quantize activations.
def get_activations_and_quantizers(self, layer):
return [(layer.activation, MovingAverageQuantizer(
num_bits=8, symmetric=False, narrow_range=False, per_axis=False))]
def set_quantize_weights(self, layer, quantize_weights):
# Add this line for each item returned in `get_weights_and_quantizers`
# , in the same order
layer.kernel = quantize_weights[0]
def set_quantize_activations(self, layer, quantize_activations):
# Add this line for each item returned in `get_activations_and_quantizers`
# , in the same order.
layer.activation = quantize_activations[0]
# Configure how to quantize outputs (may be equivalent to activations).
def get_output_quantizers(self, layer):
return []
def get_config(self):
return {}
# pylint: disable=protected-access
class DefaultMultiHeadAttentionQuantizeConfig(
tfmot.quantization.keras.QuantizeConfig):
"""Default quantize config for MultiHeadAttention layer.
It only quantize child EinsumDense layers. It should be applied to
MultiHeadAttentionQuantized layer.
"""
def __init__(self):
self.einsum_dense_config = DefaultEinsumDenseQuantizeConfig()
self.num_weight_per_einsum_dense = 1
self.num_activation_per_einsum_dense = 1
def _get_einsum_dense_layers(self, layer):
return [
layer._query_dense,
layer._key_dense,
layer._value_dense,
layer._output_dense]
def get_weights_and_quantizers(self, layer):
ret = []
for einsum_dense_layer in self._get_einsum_dense_layers(layer):
ret += self.einsum_dense_config.get_weights_and_quantizers(
einsum_dense_layer)
return ret
def get_activations_and_quantizers(self, layer):
ret = []
for einsum_dense_layer in self._get_einsum_dense_layers(layer):
ret += self.einsum_dense_config.get_activations_and_quantizers(
einsum_dense_layer)
return ret
def set_quantize_weights(self, layer, quantize_weights):
idx = 0
for einsum_dense_layer in self._get_einsum_dense_layers(layer):
self.einsum_dense_config.set_quantize_weights(
einsum_dense_layer,
quantize_weights[idx:idx+self.num_weight_per_einsum_dense])
idx += self.num_weight_per_einsum_dense
def set_quantize_activations(self, layer, quantize_activations):
idx = 0
for einsum_dense_layer in self._get_einsum_dense_layers(layer):
self.einsum_dense_config.set_quantize_activations(
einsum_dense_layer,
quantize_activations[idx:idx+self.num_activation_per_einsum_dense])
idx += self.num_activation_per_einsum_dense
def get_output_quantizers(self, layer):
return []
def get_config(self):
return {}
# pylint: enable=protected-access
class Default8BitOutputQuantizeConfig(tfmot.quantization.keras.QuantizeConfig):
"""QuantizeConfig which only quantizes the output from a layer."""
def get_weights_and_quantizers(
self, layer: Layer) -> Sequence[WeightAndQuantizer]:
return []
def get_activations_and_quantizers(
self, layer: Layer) -> Sequence[ActivationAndQuantizer]:
return []
def set_quantize_weights(self,
layer: Layer,
quantize_weights: Sequence[tf.Tensor]):
pass
def set_quantize_activations(self,
layer: Layer,
quantize_activations: Sequence[Activation]):
pass
def get_output_quantizers(self, layer: Layer) -> Sequence[Quantizer]:
return [
MovingAverageQuantizer(
num_bits=8, per_axis=False, symmetric=False, narrow_range=False)
]
def get_config(self) -> Dict[str, Any]:
return {}
class Default8BitActivationQuantizeConfig(
tfmot.quantization.keras.QuantizeConfig):
"""QuantizeConfig for keras.layers.Activation.
`keras.layers.Activation` needs a separate `QuantizeConfig` since the
decision to quantize depends on the specific activation type.
"""
def _assert_activation_layer(self, layer: Layer):
if not isinstance(layer, tf.keras.layers.Activation):
raise RuntimeError(
'Default8BitActivationQuantizeConfig can only be used with '
'`keras.layers.Activation`.')
def get_weights_and_quantizers(
self, layer: Layer) -> Sequence[WeightAndQuantizer]:
"""See base class."""
self._assert_activation_layer(layer)
return []
def get_activations_and_quantizers(
self, layer: Layer) -> Sequence[ActivationAndQuantizer]:
"""See base class."""
self._assert_activation_layer(layer)
return []
def set_quantize_weights(
self,
layer: Layer,
quantize_weights: Sequence[tf.Tensor]):
"""See base class."""
self._assert_activation_layer(layer)
def set_quantize_activations(
self,
layer: Layer,
quantize_activations: Sequence[Activation]):
"""See base class."""
self._assert_activation_layer(layer)
def get_output_quantizers(self, layer: Layer) -> Sequence[Quantizer]:
"""See base class."""
self._assert_activation_layer(layer)
if not hasattr(layer.activation, '__name__'):
raise ValueError('Activation {} not supported by '
'Default8BitActivationQuantizeConfig.'.format(
layer.activation))
# This code is copied from TFMOT repo, but added relu6 to support mobilenet.
if layer.activation.__name__ in ['relu', 'relu6']:
# 'relu' should generally get fused into the previous layer.
return [MovingAverageQuantizer(
num_bits=8, per_axis=False, symmetric=False, narrow_range=False)]
elif layer.activation.__name__ in ['linear', 'softmax', 'sigmoid']:
return []
raise ValueError('Activation {} not supported by '
'Default8BitActivationQuantizeConfig.'.format(
layer.activation))
def get_config(self) -> Dict[str, Any]:
"""Get a config for this quantizer config."""
return {}
class NoQuantizeConfig(tfmot.quantization.keras.QuantizeConfig):
"""Empty quantize config."""
# Configure how to quantize weights.
def get_weights_and_quantizers(self, layer):
return []
# Configure how to quantize activations.
def get_activations_and_quantizers(self, layer):
return []
def set_quantize_weights(self, layer, quantize_weights):
# Add this line for each item returned in `get_weights_and_quantizers`
# , in the same order
pass
def set_quantize_activations(self, layer, quantize_activations):
# Add this line for each item returned in `get_activations_and_quantizers`
# , in the same order.
pass
# Configure how to quantize outputs (may be equivalent to activations).
def get_output_quantizers(self, layer):
return []
def get_config(self):
return {}
class Default8BitQuantizeConfig(tfmot.quantization.keras.QuantizeConfig):
"""QuantizeConfig for non recurrent Keras layers."""
def __init__(self, weight_attrs, activation_attrs, quantize_output):
self.weight_attrs = weight_attrs
self.activation_attrs = activation_attrs
self.quantize_output = quantize_output
# TODO(pulkitb): For some layers such as Conv2D, per_axis should be True.
# Add mapping for which layers support per_axis.
self.weight_quantizer = LastValueQuantizer(
num_bits=8, per_axis=False, symmetric=True, narrow_range=True)
self.activation_quantizer = MovingAverageQuantizer(
num_bits=8, per_axis=False, symmetric=False, narrow_range=False)
def get_weights_and_quantizers(self, layer):
return [(getattr(layer, weight_attr), self.weight_quantizer)
for weight_attr in self.weight_attrs]
def get_activations_and_quantizers(self, layer):
return [(getattr(layer, activation_attr), self.activation_quantizer)
for activation_attr in self.activation_attrs]
def set_quantize_weights(self, layer, quantize_weights):
if len(self.weight_attrs) != len(quantize_weights):
raise ValueError(
'`set_quantize_weights` called on layer {} with {} '
'weight parameters, but layer expects {} values.'.format(
layer.name, len(quantize_weights), len(self.weight_attrs)))
for weight_attr, weight in zip(self.weight_attrs, quantize_weights):
current_weight = getattr(layer, weight_attr)
if current_weight.shape != weight.shape:
raise ValueError('Existing layer weight shape {} is incompatible with'
'provided weight shape {}'.format(
current_weight.shape, weight.shape))
setattr(layer, weight_attr, weight)
def set_quantize_activations(self, layer, quantize_activations):
if len(self.activation_attrs) != len(quantize_activations):
raise ValueError(
'`set_quantize_activations` called on layer {} with {} '
'activation parameters, but layer expects {} values.'.format(
layer.name, len(quantize_activations),
len(self.activation_attrs)))
for activation_attr, activation in zip(
self.activation_attrs, quantize_activations):
setattr(layer, activation_attr, activation)
def get_output_quantizers(self, layer):
if self.quantize_output:
return [self.activation_quantizer]
return []
@classmethod
def from_config(cls, config):
"""Instantiates a `Default8BitQuantizeConfig` from its config.
Args:
config: Output of `get_config()`.
Returns:
A `Default8BitQuantizeConfig` instance.
"""
return cls(**config)
def get_config(self):
# TODO(pulkitb): Add weight and activation quantizer to config.
# Currently it's created internally, but ideally the quantizers should be
# part of the constructor and passed in from the registry.
return {
'weight_attrs': self.weight_attrs,
'activation_attrs': self.activation_attrs,
'quantize_output': self.quantize_output
}
def __eq__(self, other):
if not isinstance(other, Default8BitQuantizeConfig):
return False
return (self.weight_attrs == other.weight_attrs and
self.activation_attrs == self.activation_attrs and
self.weight_quantizer == other.weight_quantizer and
self.activation_quantizer == other.activation_quantizer and
self.quantize_output == other.quantize_output)
def __ne__(self, other):
return not self.__eq__(other)
def _types_dict():
return {
'NoQuantizer':
NoQuantizer,
'LastValueQuantizer':
LastValueQuantizer,
'MovingAverageQuantizer':
MovingAverageQuantizer,
'DefaultEinsumDenseQuantizeConfig':
DefaultEinsumDenseQuantizeConfig,
'DefaultMultiHeadAttentionQuantizeConfig':
DefaultMultiHeadAttentionQuantizeConfig,
'Default8BitOutputQuantizeConfig':
Default8BitOutputQuantizeConfig,
'Default8BitActivationQuantizeConfig':
Default8BitActivationQuantizeConfig,
'NoQuantizeConfig':
NoQuantizeConfig,
'Default8BitQuantizeConfig':
Default8BitQuantizeConfig,
}
| 14,177 | 32.597156 | 80 | py |
models | models-master/official/projects/qat/nlp/modeling/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/qat/nlp/modeling/networks/span_labeling.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Span labeling network."""
# pylint: disable=g-classes-have-attributes
import collections
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.projects.qat.nlp.quantization import configs
def _apply_paragraph_mask(logits, paragraph_mask):
"""Applies a position mask to calculated logits."""
masked_logits = logits * (paragraph_mask) - 1e30 * (1 - paragraph_mask)
return tf.nn.log_softmax(masked_logits, -1), masked_logits
@tf.keras.utils.register_keras_serializable(package='Text')
class SpanLabelingQuantized(tf.keras.Model):
"""Span labeling network head for BERT modeling.
This network implements a simple single-span labeler based on a dense layer.
*Note* that the network is constructed by
[Keras Functional API](https://keras.io/guides/functional_api/).
Args:
input_width: The innermost dimension of the input tensor to this network.
activation: The activation, if any, for the dense layer in this network.
initializer: The initializer for the dense layer in this network. Defaults
to a Glorot uniform initializer.
output: The output style for this network. Can be either `logits` or
`predictions`.
"""
def __init__(self,
input_width,
activation=None,
initializer='glorot_uniform',
output='logits',
**kwargs):
sequence_data = tf.keras.layers.Input(
shape=(None, input_width), name='sequence_data', dtype=tf.float32)
logits_layer = tf.keras.layers.Dense(
2, # This layer predicts start location and end location.
activation=activation,
kernel_initializer=initializer,
name='predictions/transform/logits')
logits_layer = tfmot.quantization.keras.QuantizeWrapperV2(
logits_layer,
configs.Default8BitQuantizeConfig(['kernel'], ['activation'], False))
intermediate_logits = logits_layer(sequence_data)
start_logits, end_logits = self._split_output_tensor(intermediate_logits)
start_predictions = tf.keras.layers.Activation(tf.nn.log_softmax)(
start_logits)
end_predictions = tf.keras.layers.Activation(tf.nn.log_softmax)(end_logits)
if output == 'logits':
output_tensors = [start_logits, end_logits]
elif output == 'predictions':
output_tensors = [start_predictions, end_predictions]
else:
raise ValueError(
('Unknown `output` value "%s". `output` can be either "logits" or '
'"predictions"') % output)
# b/164516224
# Once we've created the network using the Functional API, we call
# super().__init__ as though we were invoking the Functional API Model
# constructor, resulting in this object having all the properties of a model
# created using the Functional API. Once super().__init__ is called, we
# can assign attributes to `self` - note that all `self` assignments are
# below this line.
super().__init__(
inputs=[sequence_data], outputs=output_tensors, **kwargs)
config_dict = {
'input_width': input_width,
'activation': activation,
'initializer': initializer,
'output': output,
}
# We are storing the config dict as a namedtuple here to ensure checkpoint
# compatibility with an earlier version of this model which did not track
# the config dict attribute. TF does not track immutable attrs which
# do not contain Trackables, so by creating a config namedtuple instead of
# a dict we avoid tracking it.
config_cls = collections.namedtuple('Config', config_dict.keys())
self._config = config_cls(**config_dict)
self.start_logits = start_logits
self.end_logits = end_logits
def _split_output_tensor(self, tensor):
transposed_tensor = tf.transpose(tensor, [2, 0, 1])
return tf.unstack(transposed_tensor)
def get_config(self):
return dict(self._config._asdict())
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 4,622 | 38.512821 | 80 | py |
models | models-master/official/projects/qat/nlp/modeling/networks/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/qat/nlp/modeling/models/bert_span_labeler.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT Question Answering model."""
# pylint: disable=g-classes-have-attributes
import collections
import tensorflow as tf
from official.projects.qat.nlp.modeling.networks import span_labeling
@tf.keras.utils.register_keras_serializable(package='Text')
class BertSpanLabelerQuantized(tf.keras.Model):
"""Span labeler model based on a BERT-style transformer-based encoder.
This is an implementation of the network structure surrounding a transformer
encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers
for Language Understanding" (https://arxiv.org/abs/1810.04805).
The BertSpanLabeler allows a user to pass in a transformer encoder, and
instantiates a span labeling network based on a single dense layer.
*Note* that the model is constructed by
[Keras Functional API](https://keras.io/guides/functional_api/).
Args:
network: A transformer network. This network should output a sequence output
and a classification output. Furthermore, it should expose its embedding
table via a `get_embedding_table` method.
initializer: The initializer (if any) to use in the span labeling network.
Defaults to a Glorot uniform initializer.
output: The output style for this network. Can be either `logit`' or
`predictions`.
"""
def __init__(self,
network,
initializer='glorot_uniform',
output='logits',
**kwargs):
# We want to use the inputs of the passed network as the inputs to this
# Model. To do this, we need to keep a handle to the network inputs for use
# when we construct the Model object at the end of init.
inputs = network.inputs
# Because we have a copy of inputs to create this Model object, we can
# invoke the Network object with its own input tensors to start the Model.
outputs = network(inputs)
if isinstance(outputs, list):
sequence_output = outputs[0]
else:
sequence_output = outputs['sequence_output']
# The input network (typically a transformer model) may get outputs from all
# layers. When this case happens, we retrieve the last layer output.
if isinstance(sequence_output, list):
sequence_output = sequence_output[-1]
# This is an instance variable for ease of access to the underlying task
# network.
span_labeling_quantized = span_labeling.SpanLabelingQuantized(
input_width=sequence_output.shape[-1],
initializer=initializer,
output=output,
name='span_labeling')
start_logits, end_logits = span_labeling_quantized(sequence_output)
# Use identity layers wrapped in lambdas to explicitly name the output
# tensors. This allows us to use string-keyed dicts in Keras fit/predict/
# evaluate calls.
start_logits = tf.keras.layers.Lambda(
tf.identity, name='start_positions')(
start_logits)
end_logits = tf.keras.layers.Lambda(
tf.identity, name='end_positions')(
end_logits)
logits = [start_logits, end_logits]
# b/164516224
# Once we've created the network using the Functional API, we call
# super().__init__ as though we were invoking the Functional API Model
# constructor, resulting in this object having all the properties of a model
# created using the Functional API. Once super().__init__ is called, we
# can assign attributes to `self` - note that all `self` assignments are
# below this line.
super().__init__(
inputs=inputs, outputs=logits, **kwargs)
self._network = network
config_dict = {
'network': network,
'initializer': initializer,
'output': output,
}
# We are storing the config dict as a namedtuple here to ensure checkpoint
# compatibility with an earlier version of this model which did not track
# the config dict attribute. TF does not track immutable attrs which
# do not contain Trackables, so by creating a config namedtuple instead of
# a dict we avoid tracking it.
config_cls = collections.namedtuple('Config', config_dict.keys())
self._config = config_cls(**config_dict)
self.span_labeling = span_labeling_quantized
@property
def checkpoint_items(self):
return dict(encoder=self._network)
def get_config(self):
return dict(self._config._asdict())
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 5,056 | 39.134921 | 80 | py |
models | models-master/official/projects/qat/nlp/modeling/models/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/qat/nlp/modeling/layers/transformer_encoder_block.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based TransformerEncoder block layer."""
from typing import Optional
from absl import logging
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.projects.qat.nlp.modeling.layers.multi_head_attention import MultiHeadAttentionQuantized
from official.projects.qat.nlp.quantization import configs
from official.projects.qat.nlp.quantization import wrappers
def _quantized_multi_head_attention(*args, **kwargs):
layer = MultiHeadAttentionQuantized(*args, **kwargs)
return wrappers.MultiHeadAttentionQuantizeWrapper(
layer, configs.DefaultMultiHeadAttentionQuantizeConfig())
def _quantized_einsum_dense(*args, **kwargs):
layer = tf.keras.layers.EinsumDense(*args, **kwargs)
return tfmot.quantization.keras.QuantizeWrapperV2(
layer, configs.DefaultEinsumDenseQuantizeConfig())
def _output_quantize(layer):
return tfmot.quantization.keras.QuantizeWrapperV2(
layer, configs.Default8BitOutputQuantizeConfig())
class TransformerEncoderBlockQuantized(tf.keras.layers.Layer):
"""TransformerEncoderBlock layer.
This layer implements the Transformer Encoder from
"Attention Is All You Need". (https://arxiv.org/abs/1706.03762),
which combines a `tf.keras.layers.MultiHeadAttention` layer with a
two-layer feedforward network.
References:
[Attention Is All You Need](https://arxiv.org/abs/1706.03762)
[BERT: Pre-training of Deep Bidirectional Transformers for Language
Understanding](https://arxiv.org/abs/1810.04805)
"""
def __init__(self,
num_attention_heads,
inner_dim,
inner_activation,
output_range=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_bias=True,
norm_first=False,
norm_epsilon=1e-12,
output_dropout=0.0,
attention_dropout=0.0,
inner_dropout=0.0,
attention_initializer=None,
attention_axes=None,
**kwargs):
"""Initializes `TransformerEncoderBlock`.
Args:
num_attention_heads: Number of attention heads.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network.
output_range: the sequence output range, [0, output_range) for slicing the
target sequence. `None` means the target sequence is not sliced.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
use_bias: Whether to enable use_bias in attention layer. If set False,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate
dense layers. If set False, output of attention and intermediate dense
layers is normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: Dropout probability for within the attention layer.
inner_dropout: Dropout probability for the first Dense layer in a
two-layer feedforward network.
attention_initializer: Initializer for kernels of attention layers. If set
`None`, attention layers use kernel_initializer as initializer for
kernel.
attention_axes: axes over which the attention is applied. `None` means
attention over all axes, but batch, heads, and features.
**kwargs: keyword arguments/
"""
super().__init__(**kwargs)
if output_range is not None:
logging.warning("`output_range` is available as an argument for `call()`."
"The `output_range` as __init__ argument is deprecated.")
self._num_heads = num_attention_heads
self._inner_dim = inner_dim
self._inner_activation = inner_activation
self._attention_dropout = attention_dropout
self._attention_dropout_rate = attention_dropout
self._output_dropout = output_dropout
self._output_dropout_rate = output_dropout
self._output_range = output_range
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._inner_dropout = inner_dropout
if attention_initializer:
self._attention_initializer = tf.keras.initializers.get(
attention_initializer)
else:
self._attention_initializer = self._kernel_initializer
self._attention_axes = attention_axes
def build(self, input_shape):
if isinstance(input_shape, tf.TensorShape):
input_tensor_shape = input_shape
elif isinstance(input_shape, (list, tuple)):
input_tensor_shape = tf.TensorShape(input_shape[0])
else:
raise ValueError(
"The type of input shape argument is not supported, got: %s" %
type(input_shape))
if len(input_tensor_shape.as_list()) != 3:
raise ValueError("TransformerEncoderBlock expects a three-dimensional "
"input of shape [batch, sequence, width].")
hidden_size = input_tensor_shape[-1]
if hidden_size % self._num_heads != 0:
raise ValueError(
"The input size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, self._num_heads))
self._attention_head_size = int(hidden_size // self._num_heads)
common_kwargs = dict(
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
self._attention_layer = _quantized_multi_head_attention(
num_heads=self._num_heads,
key_dim=self._attention_head_size,
dropout=self._attention_dropout,
use_bias=self._use_bias,
kernel_initializer=self._attention_initializer,
attention_axes=self._attention_axes,
name="self_attention",
**common_kwargs)
self._attention_dropout = tf.keras.layers.Dropout(rate=self._output_dropout)
# Use float32 in layernorm for numeric stability.
# It is probably safe in mixed_float16, but we haven't validated this yet.
self._attention_layer_norm = _output_quantize(
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32))
self._intermediate_dense = _quantized_einsum_dense(
"abc,cd->abd",
output_shape=(None, self._inner_dim),
bias_axes="d",
kernel_initializer=self._kernel_initializer,
name="intermediate",
**common_kwargs)
policy = tf.keras.mixed_precision.global_policy()
if policy.name == "mixed_bfloat16":
# bfloat16 causes BERT with the LAMB optimizer to not converge
# as well, so we use float32.
# TODO(b/154538392): Investigate this.
policy = tf.float32
self._intermediate_activation_layer = _output_quantize(
tf.keras.layers.Activation(
self._inner_activation, dtype=policy))
self._inner_dropout_layer = tf.keras.layers.Dropout(
rate=self._inner_dropout)
self._output_dense = _quantized_einsum_dense(
"abc,cd->abd",
output_shape=(None, hidden_size),
bias_axes="d",
name="output",
kernel_initializer=self._kernel_initializer,
**common_kwargs)
self._output_dropout = tf.keras.layers.Dropout(rate=self._output_dropout)
# Use float32 in layernorm for numeric stability.
self._output_layer_norm = _output_quantize(
tf.keras.layers.LayerNormalization(
name="output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32))
self._add = _output_quantize(tf.keras.layers.Add())
self._output_add = tf.keras.layers.Add()
super().build(input_shape)
def get_config(self):
config = {
"num_attention_heads":
self._num_heads,
"inner_dim":
self._inner_dim,
"inner_activation":
self._inner_activation,
"output_dropout":
self._output_dropout_rate,
"attention_dropout":
self._attention_dropout_rate,
"output_range":
self._output_range,
"kernel_initializer":
tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer":
tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer":
tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer":
tf.keras.regularizers.serialize(self._bias_regularizer),
"activity_regularizer":
tf.keras.regularizers.serialize(self._activity_regularizer),
"kernel_constraint":
tf.keras.constraints.serialize(self._kernel_constraint),
"bias_constraint":
tf.keras.constraints.serialize(self._bias_constraint),
"use_bias":
self._use_bias,
"norm_first":
self._norm_first,
"norm_epsilon":
self._norm_epsilon,
"inner_dropout":
self._inner_dropout,
"attention_initializer":
tf.keras.initializers.serialize(self._attention_initializer),
"attention_axes": self._attention_axes,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, output_range: Optional[tf.Tensor] = None):
"""Transformer self-attention encoder block call.
Args:
inputs: a single tensor or a list of tensors. `input tensor` as the single
sequence of embeddings. [`input tensor`, `attention mask`] to have the
additional attention mask. [`query tensor`, `key value tensor`,
`attention mask`] to have separate input streams for the query, and
key/value to the multi-head attention.
output_range: the sequence output range, [0, output_range) for slicing the
target sequence. `None` means the target sequence is not sliced. If you
would like to have no change to the model training, it is better to only
set the `output_range` for serving.
Returns:
An ouput tensor with the same dimensions as input/query tensor.
"""
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
input_tensor, attention_mask = inputs
key_value = None
elif len(inputs) == 3:
input_tensor, key_value, attention_mask = inputs
else:
raise ValueError("Unexpected inputs to %s with length at %d" %
(self.__class__, len(inputs)))
else:
input_tensor, key_value, attention_mask = (inputs, None, None)
if output_range is None:
output_range = self._output_range
if output_range:
if self._norm_first:
source_tensor = input_tensor[:, 0:output_range, :]
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm(key_value)
target_tensor = input_tensor[:, 0:output_range, :]
if attention_mask is not None:
attention_mask = attention_mask[:, 0:output_range, :]
else:
if self._norm_first:
source_tensor = input_tensor
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm(key_value)
target_tensor = input_tensor
if key_value is None:
key_value = input_tensor
attention_output = self._attention_layer(
query=target_tensor, value=key_value, attention_mask=attention_mask)
attention_output = self._attention_dropout(attention_output)
if self._norm_first:
attention_output = self._add([source_tensor, attention_output])
else:
attention_output = self._attention_layer_norm(
self._add([target_tensor, attention_output]))
if self._norm_first:
source_attention_output = attention_output
attention_output = self._output_layer_norm(attention_output)
inner_output = self._intermediate_dense(attention_output)
inner_output = self._intermediate_activation_layer(inner_output)
inner_output = self._inner_dropout_layer(inner_output)
layer_output = self._output_dense(inner_output)
layer_output = self._output_dropout(layer_output)
if self._norm_first:
return self._output_add([source_attention_output, layer_output])
# During mixed precision training, layer norm output is always fp32 for now.
# Casts fp32 for the subsequent add.
layer_output = tf.cast(layer_output, tf.float32)
return self._output_layer_norm(
self._output_add([layer_output, attention_output]))
| 14,705 | 41.626087 | 102 | py |
models | models-master/official/projects/qat/nlp/modeling/layers/mobile_bert_layers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MobileBERT embedding and transformer layers."""
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.nlp import modeling
from official.projects.qat.nlp.modeling.layers.multi_head_attention import MultiHeadAttentionQuantized
from official.projects.qat.nlp.quantization import configs
from official.projects.qat.nlp.quantization import helper
from official.projects.qat.nlp.quantization import wrappers
def _quantized_multi_head_attention(*args, **kwargs):
layer = MultiHeadAttentionQuantized(*args, **kwargs)
return wrappers.MultiHeadAttentionQuantizeWrapper(
layer, configs.DefaultMultiHeadAttentionQuantizeConfig())
def _quantized_einsum_dense(*args, **kwargs):
layer = tf.keras.layers.EinsumDense(*args, **kwargs)
return tfmot.quantization.keras.QuantizeWrapperV2(
layer, configs.DefaultEinsumDenseQuantizeConfig())
def _output_quantize(layer):
return tfmot.quantization.keras.QuantizeWrapperV2(
layer, configs.Default8BitOutputQuantizeConfig())
@tf.keras.utils.register_keras_serializable(package='Text')
class NoNormQuantized(tf.keras.layers.Layer):
"""Apply element-wise linear transformation to the last dimension."""
def __init__(self, name=None):
super().__init__(name=name)
def build(self, shape):
kernal_size = shape[-1]
self.bias = self.add_weight('beta',
shape=[kernal_size],
initializer='zeros')
self.scale = self.add_weight('gamma',
shape=[kernal_size],
initializer='ones')
self.multiply = _output_quantize(
tf.keras.layers.Multiply())
def call(self, feature):
broadcast_shape = tf.shape(feature)
scale = tf.broadcast_to(self.scale, broadcast_shape)
output = self.multiply([feature, scale])
return output + self.bias
def _get_norm_layer(normalization_type='no_norm', name=None):
"""Get normlization layer.
Args:
normalization_type: String. The type of normalization_type, only `no_norm`
and `layer_norm` are supported.
name: Name for the norm layer.
Returns:
layer norm class.
"""
if normalization_type == 'no_norm':
layer = NoNormQuantized(name=name)
elif normalization_type == 'layer_norm':
layer = tf.keras.layers.LayerNormalization(
name=name,
axis=-1,
epsilon=1e-12,
dtype=tf.float32)
else:
raise NotImplementedError('Only "no_norm" and "layer_norm" are supported.')
return layer
class MobileBertEmbeddingQuantized(helper.LayerQuantizerHelper,
tf.keras.layers.Layer):
"""Performs an embedding lookup for MobileBERT.
This layer includes word embedding, token type embedding, position embedding.
"""
def __init__(self,
word_vocab_size,
word_embed_size,
type_vocab_size,
output_embed_size,
max_sequence_length=512,
normalization_type='no_norm',
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
dropout_rate=0.1,
**kwargs):
"""Class initialization.
Args:
word_vocab_size: Number of words in the vocabulary.
word_embed_size: Word embedding size.
type_vocab_size: Number of word types.
output_embed_size: Embedding size for the final embedding output.
max_sequence_length: Maximum length of input sequence.
normalization_type: String. The type of normalization_type, only `no_norm`
and `layer_norm` are supported.
initializer: The initializer to use for the embedding weights and linear
projection weights.
dropout_rate: Dropout rate.
**kwargs: keyword arguments.
"""
super().__init__(**kwargs)
self.word_vocab_size = word_vocab_size
self.word_embed_size = word_embed_size
self.type_vocab_size = type_vocab_size
self.output_embed_size = output_embed_size
self.max_sequence_length = max_sequence_length
self.normalization_type = normalization_type
self.initializer = tf.keras.initializers.get(initializer)
self.dropout_rate = dropout_rate
self.word_embedding = modeling.layers.OnDeviceEmbedding(
self.word_vocab_size,
self.word_embed_size,
initializer=initializer,
name='word_embedding')
self.type_embedding = modeling.layers.OnDeviceEmbedding(
self.type_vocab_size,
self.output_embed_size,
initializer=initializer,
name='type_embedding')
self.pos_embedding = modeling.layers.PositionEmbedding(
max_length=max_sequence_length,
initializer=initializer,
name='position_embedding')
self.word_embedding_proj = _quantized_einsum_dense(
'abc,cd->abd',
output_shape=[None, self.output_embed_size],
kernel_initializer=initializer,
bias_axes='d',
name='embedding_projection')
self.embedding_out_add_pos = _output_quantize(tf.keras.layers.Add())
self.layer_norm = _output_quantize(
_get_norm_layer(normalization_type, 'embedding_norm'))
self.dropout_layer = tf.keras.layers.Dropout(
self.dropout_rate,
name='embedding_dropout')
self.embedding_out_add_type = _output_quantize(tf.keras.layers.Add())
def build(self, input_shape):
self._add_quantizer('word_embedding_out')
self._add_quantizer('pos_embedding_out')
self._add_quantizer('type_embedding_out')
self._build_quantizer_vars()
def get_config(self):
config = {
'word_vocab_size': self.word_vocab_size,
'word_embed_size': self.word_embed_size,
'type_vocab_size': self.type_vocab_size,
'output_embed_size': self.output_embed_size,
'max_sequence_length': self.max_sequence_length,
'normalization_type': self.normalization_type,
'initializer': tf.keras.initializers.serialize(self.initializer),
'dropout_rate': self.dropout_rate
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, input_ids, token_type_ids=None, training=None):
word_embedding_out = self.word_embedding(input_ids)
word_embedding_out = self._apply_quantizer(
'word_embedding_out', word_embedding_out, training)
word_embedding_out = tf.concat(
[tf.pad(word_embedding_out[:, 1:], ((0, 0), (0, 1), (0, 0))),
word_embedding_out,
tf.pad(word_embedding_out[:, :-1], ((0, 0), (1, 0), (0, 0)))],
axis=2)
word_embedding_out = self.word_embedding_proj(word_embedding_out)
pos_embedding_out = self.pos_embedding(word_embedding_out)
pos_embedding_out = self._apply_quantizer(
'pos_embedding_out', pos_embedding_out, training)
embedding_out = self.embedding_out_add_pos([
word_embedding_out, pos_embedding_out])
if token_type_ids is not None:
type_embedding_out = self.type_embedding(token_type_ids)
type_embedding_out = self._apply_quantizer(
'type_embedding_out', type_embedding_out, training)
embedding_out = self.embedding_out_add_type([
embedding_out, type_embedding_out])
embedding_out = self.layer_norm(embedding_out)
embedding_out = self.dropout_layer(embedding_out)
return embedding_out
class MobileBertTransformerQuantized(tf.keras.layers.Layer):
"""Transformer block for MobileBERT.
An implementation of one layer (block) of Transformer with bottleneck and
inverted-bottleneck for MobilerBERT.
Original paper for MobileBERT:
https://arxiv.org/pdf/2004.02984.pdf
"""
def __init__(self,
hidden_size=512,
num_attention_heads=4,
intermediate_size=512,
intermediate_act_fn='relu',
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
intra_bottleneck_size=128,
use_bottleneck_attention=False,
key_query_shared_bottleneck=True,
num_feedforward_networks=4,
normalization_type='no_norm',
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
**kwargs):
"""Class initialization.
Args:
hidden_size: Hidden size for the Transformer input and output tensor.
num_attention_heads: Number of attention heads in the Transformer.
intermediate_size: The size of the "intermediate" (a.k.a., feed forward)
layer.
intermediate_act_fn: The non-linear activation function to apply to the
output of the intermediate/feed-forward layer.
hidden_dropout_prob: Dropout probability for the hidden layers.
attention_probs_dropout_prob: Dropout probability of the attention
probabilities.
intra_bottleneck_size: Size of bottleneck.
use_bottleneck_attention: Use attention inputs from the bottleneck
transformation. If true, the following `key_query_shared_bottleneck`
will be ignored.
key_query_shared_bottleneck: Whether to share linear transformation for
keys and queries.
num_feedforward_networks: Number of stacked feed-forward networks.
normalization_type: The type of normalization_type, only `no_norm` and
`layer_norm` are supported. `no_norm` represents the element-wise linear
transformation for the student model, as suggested by the original
MobileBERT paper. `layer_norm` is used for the teacher model.
initializer: The initializer to use for the embedding weights and linear
projection weights.
**kwargs: keyword arguments.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.intermediate_act_fn = intermediate_act_fn
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.intra_bottleneck_size = intra_bottleneck_size
self.use_bottleneck_attention = use_bottleneck_attention
self.key_query_shared_bottleneck = key_query_shared_bottleneck
self.num_feedforward_networks = num_feedforward_networks
self.normalization_type = normalization_type
self.initializer = tf.keras.initializers.get(initializer)
if intra_bottleneck_size % num_attention_heads != 0:
raise ValueError(
(f'The bottleneck size {intra_bottleneck_size} is not a multiple '
f'of the number of attention heads {num_attention_heads}.'))
attention_head_size = int(intra_bottleneck_size / num_attention_heads)
self.block_layers = {}
# add input bottleneck
dense_layer_2d = _quantized_einsum_dense(
'abc,cd->abd',
output_shape=[None, self.intra_bottleneck_size],
bias_axes='d',
kernel_initializer=initializer,
name='bottleneck_input/dense')
layer_norm = _output_quantize(
_get_norm_layer(self.normalization_type,
name='bottleneck_input/norm'))
self.block_layers['bottleneck_input'] = [dense_layer_2d,
layer_norm]
if self.key_query_shared_bottleneck:
dense_layer_2d = _quantized_einsum_dense(
'abc,cd->abd',
output_shape=[None, self.intra_bottleneck_size],
bias_axes='d',
kernel_initializer=initializer,
name='kq_shared_bottleneck/dense')
layer_norm = _output_quantize(
_get_norm_layer(self.normalization_type,
name='kq_shared_bottleneck/norm'))
self.block_layers['kq_shared_bottleneck'] = [dense_layer_2d,
layer_norm]
# add attention layer
attention_layer = _quantized_multi_head_attention(
num_heads=self.num_attention_heads,
key_dim=attention_head_size,
value_dim=attention_head_size,
dropout=self.attention_probs_dropout_prob,
output_shape=self.intra_bottleneck_size,
kernel_initializer=initializer,
name='attention')
layer_norm = _output_quantize(
_get_norm_layer(self.normalization_type,
name='attention/norm'))
self.block_layers['attention'] = [attention_layer,
layer_norm]
# add stacked feed-forward networks (ffn)
self.block_layers['ffn'] = []
self.ffn_add_layers = []
for ffn_layer_idx in range(self.num_feedforward_networks):
layer_prefix = f'ffn_layer_{ffn_layer_idx}'
layer_name = layer_prefix + '/intermediate_dense'
intermediate_layer = _quantized_einsum_dense(
'abc,cd->abd',
activation=self.intermediate_act_fn,
output_shape=[None, self.intermediate_size],
bias_axes='d',
kernel_initializer=initializer,
name=layer_name)
layer_name = layer_prefix + '/output_dense'
output_layer = _quantized_einsum_dense(
'abc,cd->abd',
output_shape=[None, self.intra_bottleneck_size],
bias_axes='d',
kernel_initializer=initializer,
name=layer_name)
layer_name = layer_prefix + '/norm'
layer_norm = _output_quantize(
_get_norm_layer(self.normalization_type,
name=layer_name))
self.block_layers['ffn'].append([intermediate_layer,
output_layer,
layer_norm])
self.ffn_add_layers.append(_output_quantize(
tf.keras.layers.Add()))
# add output bottleneck
bottleneck = _quantized_einsum_dense(
'abc,cd->abd',
output_shape=[None, self.hidden_size],
activation=None,
bias_axes='d',
kernel_initializer=initializer,
name='bottleneck_output/dense')
dropout_layer = tf.keras.layers.Dropout(
self.hidden_dropout_prob,
name='bottleneck_output/dropout')
layer_norm = _output_quantize(
_get_norm_layer(self.normalization_type,
name='bottleneck_output/norm'))
self.block_layers['bottleneck_output'] = [bottleneck,
dropout_layer,
layer_norm]
self.attention_output_add = _output_quantize(
tf.keras.layers.Add())
self.output_add = _output_quantize(
tf.keras.layers.Add())
def get_config(self):
config = {
'hidden_size': self.hidden_size,
'num_attention_heads': self.num_attention_heads,
'intermediate_size': self.intermediate_size,
'intermediate_act_fn': self.intermediate_act_fn,
'hidden_dropout_prob': self.hidden_dropout_prob,
'attention_probs_dropout_prob': self.attention_probs_dropout_prob,
'intra_bottleneck_size': self.intra_bottleneck_size,
'use_bottleneck_attention': self.use_bottleneck_attention,
'key_query_shared_bottleneck': self.key_query_shared_bottleneck,
'num_feedforward_networks': self.num_feedforward_networks,
'normalization_type': self.normalization_type,
'initializer': tf.keras.initializers.serialize(self.initializer),
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self,
input_tensor,
attention_mask=None,
return_attention_scores=False):
"""Implementes the forward pass.
Args:
input_tensor: Float tensor of shape `(batch_size, seq_length,
hidden_size)`.
attention_mask: (optional) int32 tensor of shape `(batch_size, seq_length,
seq_length)`, with 1 for positions that can be attended to and 0 in
positions that should not be.
return_attention_scores: If return attention score.
Returns:
layer_output: Float tensor of shape
`(batch_size, seq_length, hidden_size)`.
attention_scores (Optional): Only when return_attention_scores is True.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
input_width = input_tensor.shape.as_list()[-1]
if input_width != self.hidden_size:
raise ValueError(
(f'The width of the input tensor {input_width} != '
f'hidden size {self.hidden_size}'))
prev_output = input_tensor
# input bottleneck
dense_layer = self.block_layers['bottleneck_input'][0]
layer_norm = self.block_layers['bottleneck_input'][1]
layer_input = dense_layer(prev_output)
layer_input = layer_norm(layer_input)
if self.use_bottleneck_attention:
key_tensor = layer_input
query_tensor = layer_input
value_tensor = layer_input
elif self.key_query_shared_bottleneck:
dense_layer = self.block_layers['kq_shared_bottleneck'][0]
layer_norm = self.block_layers['kq_shared_bottleneck'][1]
shared_attention_input = dense_layer(prev_output)
shared_attention_input = layer_norm(shared_attention_input)
key_tensor = shared_attention_input
query_tensor = shared_attention_input
value_tensor = prev_output
else:
key_tensor = prev_output
query_tensor = prev_output
value_tensor = prev_output
# attention layer
attention_layer = self.block_layers['attention'][0]
layer_norm = self.block_layers['attention'][1]
attention_output, attention_scores = attention_layer(
query_tensor,
value_tensor,
key_tensor,
attention_mask,
return_attention_scores=True,
)
attention_output = layer_norm(
self.attention_output_add([attention_output, layer_input]))
# stacked feed-forward networks
layer_input = attention_output
for ffn_idx in range(self.num_feedforward_networks):
intermediate_layer = self.block_layers['ffn'][ffn_idx][0]
output_layer = self.block_layers['ffn'][ffn_idx][1]
layer_norm = self.block_layers['ffn'][ffn_idx][2]
intermediate_output = intermediate_layer(layer_input)
layer_output = output_layer(intermediate_output)
layer_output = layer_norm(
self.ffn_add_layers[ffn_idx]([layer_output, layer_input]))
layer_input = layer_output
# output bottleneck
bottleneck = self.block_layers['bottleneck_output'][0]
dropout_layer = self.block_layers['bottleneck_output'][1]
layer_norm = self.block_layers['bottleneck_output'][2]
layer_output = bottleneck(layer_output)
layer_output = dropout_layer(layer_output)
layer_output = layer_norm(self.output_add([layer_output, prev_output]))
if return_attention_scores:
return layer_output, attention_scores
else:
return layer_output
| 19,438 | 38.997942 | 102 | py |
models | models-master/official/projects/qat/nlp/modeling/layers/multi_head_attention.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantized multi head attention layer."""
import math
import tensorflow as tf
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
from official.projects.qat.nlp.quantization import helper
# -6 for mask adder before softmax on int8 model. (e^-6 < 1/256)
_MASK_CONSTANT_FOR_INT8_QUANTIZATION = 6
class MultiHeadAttentionQuantized(helper.LayerQuantizerHelper,
tf.keras.layers.MultiHeadAttention):
"""Quantized multi head attention layer.
This layer only quantized _compute_attention part. EinsumDense child layers
should be quantized from the QuantizeConfig.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._compute_attention_first_call = True
def _build_from_signature(self, *args, **kwargs):
super()._build_from_signature( # pytype: disable=attribute-error # typed-keras
*args, **kwargs)
self._add_quantizer('query')
self._add_quantizer('attention_scores')
self._add_quantizer('attention_output')
self._add_quantizer('masked_softmax_attention_mask',
all_value_quantizer=True)
self._add_quantizer('masked_softmax_sub1')
self._add_quantizer('masked_softmax_mask1')
self._add_quantizer('masked_softmax_sub2')
self._add_quantizer('masked_softmax_clamp', all_value_quantizer=True)
self._add_quantizer('masked_softmax_mask2', all_value_quantizer=True)
self._add_quantizer('masked_softmax_adder_sub', all_value_quantizer=True)
self._add_quantizer('masked_softmax_adder_mul', all_value_quantizer=True)
self._add_quantizer('masked_softmax_add', all_value_quantizer=True)
def _masked_softmax(
self, attention_scores, attention_mask=None, training=None):
"""Normalize the attention scores to probabilities."""
# `attention_scores` = [B, N, T, S]
if attention_mask is None:
return self._softmax(attention_scores)
# The expand dim happens starting from the `num_heads` dimension,
# (<batch_dims>, num_heads, <query_attention_dims, key_attention_dims>)
mask_expansion_axes = [-len(self._attention_axes) * 2 - 1]
for _ in range(len(attention_scores.shape) - len(attention_mask.shape)):
attention_mask = array_ops.expand_dims(
attention_mask, axis=mask_expansion_axes)
if attention_scores.dtype != attention_mask.dtype:
attention_mask = tf.cast(attention_mask, attention_scores.dtype)
attention_mask = self._apply_quantizer(
'masked_softmax_attention_mask', attention_mask, training)
# Makes attention_scores >= 0 to avoid masked maximum value be 0.
attention_scores -= math_ops.reduce_min(
attention_scores, axis=-1, keepdims=True)
attention_scores = self._apply_quantizer(
'masked_softmax_sub1', attention_scores, training)
attention_scores *= attention_mask
attention_scores = self._apply_quantizer(
'masked_softmax_mask1', attention_scores, training)
# Makes attention_scores <= 0, and become max value be 0.
attention_scores -= math_ops.reduce_max(
attention_scores, axis=-1, keepdims=True)
attention_scores = self._apply_quantizer(
'masked_softmax_sub2', attention_scores, training)
# Clip the range of values [-6, 0].
attention_scores = tf.clip_by_value(
attention_scores, clip_value_min=-6, clip_value_max=0)
attention_scores = self._apply_quantizer(
'masked_softmax_clamp', attention_scores, training)
# We basically hard-code the to-be-masked-out part have -6.
# Maximum number is 0. It"s reasonable for 8 bit quantization because
# e^(0) / e^(-6) < 1/256
attention_scores *= attention_mask
attention_scores = self._apply_quantizer(
'masked_softmax_mask2', attention_scores, training)
adder = attention_mask - 1.0
adder = self._apply_quantizer('masked_softmax_adder_sub', adder, training)
adder *= _MASK_CONSTANT_FOR_INT8_QUANTIZATION
adder = self._apply_quantizer('masked_softmax_adder_mul', adder, training)
attention_scores += adder
attention_scores = self._apply_quantizer(
'masked_softmax_add', attention_scores, training)
return self._softmax(attention_scores)
def _compute_attention(self,
query,
key,
value,
attention_mask=None,
training=None):
"""Applies Dot-product attention with query, key, value tensors.
This function defines the computation inside `call` with projected
multi-head Q, K, V inputs. Users can override this function for customized
attention implementation.
Args:
query: Projected query `Tensor` of shape `[B, T, N, key_dim]`.
key: Projected key `Tensor` of shape `[B, T, N, key_dim]`.
value: Projected value `Tensor` of shape `[B, T, N, value_dim]`.
attention_mask: a boolean mask of shape `[B, T, S]`, that prevents
attention to certain positions.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Returns:
attention_output: Multi-headed outputs of attention computation.
attention_scores: Multi-headed attention weights.
"""
if self._compute_attention_first_call:
self._build_quantizer_vars()
# Note: Applying scalar multiply at the smaller end of einsum improves
# XLA performance, but may introduce slight numeric differences in
# the Transformer attention head.
query = math_ops.multiply(query, 1.0 / math.sqrt(float(self._key_dim)))
query = self._apply_quantizer('query', query, training)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
attention_scores = special_math_ops.einsum(self._dot_product_equation, key,
query)
attention_scores = self._apply_quantizer(
'attention_scores', attention_scores, training)
attention_scores = self._masked_softmax(
attention_scores, attention_mask, training)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_scores_dropout = self._dropout_layer(
attention_scores, training=training)
# `context_layer` = [B, T, N, H]
attention_output = special_math_ops.einsum(self._combine_equation,
attention_scores_dropout, value)
attention_output = self._apply_quantizer(
'attention_output', attention_output, training)
self._compute_attention_first_call = False
return attention_output, attention_scores
| 7,445 | 42.8 | 84 | py |
models | models-master/official/projects/qat/nlp/modeling/layers/transformer_encoder_block_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Keras-based quantized transformer block layer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.qat.nlp.modeling.layers.transformer_encoder_block import TransformerEncoderBlockQuantized
@parameterized.named_parameters(
('base', TransformerEncoderBlockQuantized))
class TransformerEncoderBlockQuantizedLayerTest(
tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(TransformerEncoderBlockQuantizedLayerTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy('float32')
def test_layer_creation(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_creation_with_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_invocation(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# Create a model from the test layer.
model = tf.keras.Model(data_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
_ = model.predict(input_data)
def test_layer_invocation_with_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_layer_output_range(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048, inner_activation='relu')
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu')
_ = new_layer([input_data, mask_data], output_range=1)
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer([input_data, mask_data], output_range=1)
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
def test_layer_output_range_without_mask(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048,
inner_activation='relu', norm_first=True)
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
output_tensor = test_layer(input_data)
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
norm_first=True)
_ = new_layer(input_data, output_range=1)
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer(input_data, output_range=1)
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
def test_layer_output_range_with_pre_norm(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10, inner_dim=2048,
inner_activation='relu', norm_first=True)
sequence_length = 21
width = 80
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
output_tensor = test_layer([input_data, mask_data])
# The layer only attends to the first token and outputs the first token
# embedding.
new_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
norm_first=True)
_ = new_layer([input_data, mask_data], output_range=1)
new_layer.set_weights(test_layer.get_weights())
new_output_tensor = new_layer([input_data, mask_data], output_range=1)
self.assertAllClose(
new_output_tensor, output_tensor[:, 0:1, :], atol=5e-5, rtol=0.003)
def test_transform_with_initializer(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list())
def test_dynamic_layer_sequence(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Create a 3-dimensional input (the first dimension is implicit).
width = 30
input_tensor = tf.keras.Input(shape=(None, width))
output_tensor = test_layer(input_tensor)
model = tf.keras.Model(input_tensor, output_tensor)
input_length = 17
input_data = np.ones((1, input_length, width))
output_data = model.predict(input_data)
self.assertAllEqual([1, input_length, width], output_data.shape)
def test_separate_qkv(self, transformer_cls):
test_layer = transformer_cls(
num_attention_heads=2,
inner_dim=128,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Forward path.
q_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)
kv_tensor = tf.zeros([2, 8, 16], dtype=tf.float32)
dummy_mask = tf.zeros([2, 4, 8], dtype=tf.float32)
inputs = [q_tensor, kv_tensor, dummy_mask]
output = test_layer(inputs)
self.assertEqual(output.shape, q_tensor.shape)
if __name__ == '__main__':
tf.test.main()
| 9,288 | 39.920705 | 112 | py |
models | models-master/official/projects/qat/nlp/modeling/layers/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/qat/nlp/tasks/question_answering.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Question/Answering configuration definition."""
import dataclasses
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.core import task_factory
from official.nlp import modeling
from official.nlp.tasks import question_answering
from official.projects.qat.nlp.modeling.layers import mobile_bert_layers
from official.projects.qat.nlp.modeling.layers import transformer_encoder_block
from official.projects.qat.nlp.modeling.models import bert_span_labeler
from official.projects.qat.nlp.quantization import configs
from official.projects.qat.nlp.quantization import schemes
@dataclasses.dataclass
class QuantizedModelQAConfig(question_answering.QuestionAnsweringConfig):
pass
@task_factory.register_task_cls(QuantizedModelQAConfig)
class QuantizedModelQATask(question_answering.QuestionAnsweringTask):
"""Task object for question answering with QAT."""
def build_model(self):
model = super(QuantizedModelQATask, self).build_model()
# pylint: disable=protected-access
encoder_network = model._network
# pylint: enable=protected-access
with tfmot.quantization.keras.quantize_scope({
'TruncatedNormal':
tf.keras.initializers.TruncatedNormal,
'MobileBertTransformerQuantized':
mobile_bert_layers.MobileBertTransformerQuantized,
'MobileBertEmbeddingQuantized':
mobile_bert_layers.MobileBertEmbeddingQuantized,
'TransformerEncoderBlockQuantized':
transformer_encoder_block.TransformerEncoderBlockQuantized,
'NoQuantizeConfig':
configs.NoQuantizeConfig,
}):
def quantize_annotate_layer(layer):
if isinstance(layer, (tf.keras.layers.LayerNormalization)):
return tfmot.quantization.keras.quantize_annotate_layer(
layer, configs.Default8BitOutputQuantizeConfig())
if isinstance(layer, (tf.keras.layers.Dense,
tf.keras.layers.Dropout)):
return tfmot.quantization.keras.quantize_annotate_layer(layer)
if isinstance(layer, (modeling.layers.TransformerEncoderBlock,
modeling.layers.MobileBertTransformer,
modeling.layers.MobileBertEmbedding)):
return tfmot.quantization.keras.quantize_annotate_layer(
layer, configs.NoQuantizeConfig())
return layer
annotated_encoder_network = tf.keras.models.clone_model(
encoder_network,
clone_function=quantize_annotate_layer,
)
quantized_encoder_network = tfmot.quantization.keras.quantize_apply(
annotated_encoder_network, scheme=schemes.Default8BitQuantizeScheme())
encoder_cfg = self.task_config.model.encoder.get()
model = bert_span_labeler.BertSpanLabelerQuantized(
network=quantized_encoder_network,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range))
return model
| 3,587 | 41.211765 | 80 | py |
models | models-master/official/projects/qat/nlp/tasks/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/qat/nlp/tasks/question_answering_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.tasks.question_answering."""
import json
import os
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.configs import encoders
from official.nlp.data import question_answering_dataloader
from official.nlp.tasks import question_answering as qa_cfg
from official.projects.qat.nlp.tasks import question_answering
class QuestionAnsweringTaskTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(QuestionAnsweringTaskTest, self).setUp()
self._encoder_config = encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1))
self._train_data_config = question_answering_dataloader.QADataConfig(
input_path="dummy", seq_length=128, global_batch_size=1)
val_data = {
"version":
"1.1",
"data": [{
"paragraphs": [{
"context":
"Sky is blue.",
"qas": [{
"question":
"What is blue?",
"id":
"1234",
"answers": [{
"text": "Sky",
"answer_start": 0
}, {
"text": "Sky",
"answer_start": 0
}, {
"text": "Sky",
"answer_start": 0
}]
}]
}]
}]
}
self._val_input_path = os.path.join(self.get_temp_dir(), "val_data.json")
with tf.io.gfile.GFile(self._val_input_path, "w") as writer:
writer.write(json.dumps(val_data, indent=4) + "\n")
self._test_vocab = os.path.join(self.get_temp_dir(), "vocab.txt")
with tf.io.gfile.GFile(self._test_vocab, "w") as writer:
writer.write("[PAD]\n[UNK]\n[CLS]\n[SEP]\n[MASK]\nsky\nis\nblue\n")
def _get_validation_data_config(self, version_2_with_negative=False):
return question_answering_dataloader.QADataConfig(
is_training=False,
input_path=self._val_input_path,
input_preprocessed_data_path=self.get_temp_dir(),
seq_length=128,
global_batch_size=1,
version_2_with_negative=version_2_with_negative,
vocab_file=self._test_vocab,
tokenization="WordPiece",
do_lower_case=True)
@parameterized.named_parameters(("squad1", False), ("squad2", True))
def test_predict(self, version_2_with_negative):
validation_data = self._get_validation_data_config(
version_2_with_negative=version_2_with_negative)
config = question_answering.QuantizedModelQAConfig(
model=qa_cfg.ModelConfig(encoder=self._encoder_config),
train_data=self._train_data_config,
validation_data=validation_data)
task = question_answering.QuantizedModelQATask(config)
model = task.build_model()
all_predictions, all_nbest, scores_diff = qa_cfg.predict(
task, validation_data, model)
self.assertLen(all_predictions, 1)
self.assertLen(all_nbest, 1)
if version_2_with_negative:
self.assertLen(scores_diff, 1)
else:
self.assertEmpty(scores_diff)
if __name__ == "__main__":
tf.test.main()
| 3,848 | 34.971963 | 77 | py |
models | models-master/official/projects/qat/vision/registry_imports.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All necessary imports for registration on qat project."""
# pylint: disable=unused-import
from official.projects.qat.vision import configs
from official.projects.qat.vision.modeling import layers
from official.projects.qat.vision.tasks import image_classification
from official.projects.qat.vision.tasks import retinanet
from official.projects.qat.vision.tasks import semantic_segmentation
| 1,002 | 44.590909 | 74 | py |
models | models-master/official/projects/qat/vision/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/qat/vision/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Model Garden Vision training driver, including QAT configs.."""
from absl import app
from official.common import flags as tfm_flags
from official.projects.qat.vision import registry_imports # pylint: disable=unused-import
from official.vision import train
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(train.main)
| 960 | 34.592593 | 90 | py |
models | models-master/official/projects/qat/vision/serving/export_saved_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Vision models export binary for serving/inference.
To export a trained checkpoint in saved_model format (shell script):
EXPERIMENT_TYPE = XX
CHECKPOINT_PATH = XX
EXPORT_DIR_PATH = XX
export_saved_model --experiment=${EXPERIMENT_TYPE} \
--export_dir=${EXPORT_DIR_PATH}/ \
--checkpoint_path=${CHECKPOINT_PATH} \
--batch_size=2 \
--input_image_size=224,224
To serve (python):
export_dir_path = XX
input_type = XX
input_images = XX
imported = tf.saved_model.load(export_dir_path)
model_fn = imported.signatures['serving_default']
output = model_fn(input_images)
"""
from absl import app
from absl import flags
from official.core import exp_factory
from official.modeling import hyperparams
from official.projects.qat.vision import registry_imports # pylint: disable=unused-import
from official.projects.qat.vision.serving import export_module
from official.vision import configs
from official.vision.serving import export_saved_model_lib
FLAGS = flags.FLAGS
_EXPERIMENT = flags.DEFINE_string(
'experiment', None, 'experiment type, e.g. retinanet_resnetfpn_coco')
_EXPORT_DIR = flags.DEFINE_string('export_dir', None, 'The export directory.')
_CHECKPOINT_PATH = flags.DEFINE_string('checkpoint_path', None,
'Checkpoint path.')
_CONFIG_FILE = flags.DEFINE_multi_string(
'config_file',
default=None,
help='YAML/JSON files which specifies overrides. The override order '
'follows the order of args. Note that each file '
'can be used as an override template to override the default parameters '
'specified in Python. If the same parameter is specified in both '
'`--config_file` and `--params_override`, `config_file` will be used '
'first, followed by params_override.')
_PARAMS_OVERRIDE = flags.DEFINE_string(
'params_override', '',
'The JSON/YAML file or string which specifies the parameter to be overriden'
' on top of `config_file` template.')
_BATCH_SIZE = flags.DEFINE_integer('batch_size', None, 'The batch size.')
_IMAGE_TYPE = flags.DEFINE_string(
'input_type', 'image_tensor',
'One of `image_tensor`, `image_bytes`, `tf_example` and `tflite`.')
_INPUT_IMAGE_SIZE = flags.DEFINE_string(
'input_image_size', '224,224',
'The comma-separated string of two integers representing the height,width '
'of the input to the model.')
_EXPORT_CHECKPOINT_SUBDIR = flags.DEFINE_string(
'export_checkpoint_subdir', 'checkpoint',
'The subdirectory for checkpoints.')
_EXPORT_SAVED_MODEL_SUBDIR = flags.DEFINE_string(
'export_saved_model_subdir', 'saved_model',
'The subdirectory for saved model.')
_LOG_MODEL_FLOPS_AND_PARAMS = flags.DEFINE_bool(
'log_model_flops_and_params', False,
'If true, logs model flops and parameters.')
_INPUT_NAME = flags.DEFINE_string(
'input_name', None,
'Input tensor name in signature def. Default at None which'
'produces input tensor name `inputs`.')
def main(_):
params = exp_factory.get_exp_config(_EXPERIMENT.value)
for config_file in _CONFIG_FILE.value or []:
params = hyperparams.override_params_dict(
params, config_file, is_strict=True)
if _PARAMS_OVERRIDE.value:
params = hyperparams.override_params_dict(
params, _PARAMS_OVERRIDE.value, is_strict=True)
params.validate()
params.lock()
input_image_size = [int(x) for x in _INPUT_IMAGE_SIZE.value.split(',')]
if isinstance(params.task,
configs.image_classification.ImageClassificationTask):
export_module_cls = export_module.ClassificationModule
elif isinstance(params.task, configs.retinanet.RetinaNetTask):
export_module_cls = export_module.DetectionModule
elif isinstance(params.task,
configs.semantic_segmentation.SemanticSegmentationTask):
export_module_cls = export_module.SegmentationModule
else:
raise TypeError(f'Export module for {type(params.task)} is not supported.')
module = export_module_cls(
params=params,
batch_size=_BATCH_SIZE.value,
input_image_size=input_image_size,
input_type=_IMAGE_TYPE.value,
num_channels=3)
export_saved_model_lib.export_inference_graph(
input_type=_IMAGE_TYPE.value,
batch_size=_BATCH_SIZE.value,
input_image_size=input_image_size,
params=params,
checkpoint_path=_CHECKPOINT_PATH.value,
export_dir=_EXPORT_DIR.value,
export_checkpoint_subdir=_EXPORT_CHECKPOINT_SUBDIR.value,
export_saved_model_subdir=_EXPORT_SAVED_MODEL_SUBDIR.value,
export_module=module,
log_model_flops_and_params=_LOG_MODEL_FLOPS_AND_PARAMS.value,
input_name=_INPUT_NAME.value)
if __name__ == '__main__':
app.run(main)
| 5,358 | 37.553957 | 90 | py |
models | models-master/official/projects/qat/vision/serving/export_module.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export modules for QAT model serving/inference."""
import tensorflow as tf
from official.projects.qat.vision.modeling import factory as qat_factory
from official.vision import configs
from official.vision.serving import detection
from official.vision.serving import image_classification
from official.vision.serving import semantic_segmentation
class ClassificationModule(image_classification.ClassificationModule):
"""Classification Module."""
def _build_model(self):
model = super()._build_model()
input_specs = tf.keras.layers.InputSpec(shape=[self._batch_size] +
self._input_image_size + [3])
return qat_factory.build_qat_classification_model(
model, self.params.task.quantization, input_specs,
self.params.task.model)
class SegmentationModule(semantic_segmentation.SegmentationModule):
"""Segmentation Module."""
def _build_model(self):
model = super()._build_model()
input_specs = tf.keras.layers.InputSpec(shape=[self._batch_size] +
self._input_image_size + [3])
return qat_factory.build_qat_segmentation_model(
model, self.params.task.quantization, input_specs)
class DetectionModule(detection.DetectionModule):
"""Detection Module."""
def _build_model(self):
model = super()._build_model()
if isinstance(self.params.task.model, configs.retinanet.RetinaNet):
model = qat_factory.build_qat_retinanet(model,
self.params.task.quantization,
self.params.task.model)
else:
raise ValueError('Detection module not implemented for {} model.'.format(
type(self.params.task.model)))
return model
| 2,395 | 37.031746 | 79 | py |
models | models-master/official/projects/qat/vision/serving/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/qat/vision/serving/export_tflite.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Binary to convert a saved model to TFLite model for the QAT model."""
from absl import app
from official.projects.qat.vision import registry_imports # pylint: disable=unused-import
from official.vision.serving import export_tflite
if __name__ == '__main__':
app.run(export_tflite.main)
| 904 | 36.708333 | 90 | py |
models | models-master/official/projects/qat/vision/configs/retinanet_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for retinanet."""
# pylint: disable=unused-import
from absl.testing import parameterized
import tensorflow as tf
from official import vision
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.qat.vision.configs import common
from official.projects.qat.vision.configs import retinanet as qat_exp_cfg
from official.vision.configs import retinanet as exp_cfg
class RetinaNetConfigTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('retinanet_mobile_coco_qat',),
)
def test_retinanet_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, qat_exp_cfg.RetinaNetTask)
self.assertIsInstance(config.task.model, exp_cfg.RetinaNet)
self.assertIsInstance(config.task.quantization, common.Quantization)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.validate()
config.task.train_data.is_training = None
with self.assertRaisesRegex(KeyError, 'Found inconsistency between key'):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 1,827 | 37.083333 | 77 | py |
models | models-master/official/projects/qat/vision/configs/retinanet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RetinaNet configuration definition."""
import dataclasses
from typing import Optional
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.qat.vision.configs import common
from official.vision.configs import retinanet
from official.vision.configs import backbones
@dataclasses.dataclass
class RetinaNetTask(retinanet.RetinaNetTask):
quantization: Optional[common.Quantization] = None
@exp_factory.register_config_factory('retinanet_mobile_coco_qat')
def retinanet_mobile_coco() -> cfg.ExperimentConfig:
"""Generates a config for COCO OD RetinaNet for mobile with QAT."""
config = retinanet.retinanet_spinenet_mobile_coco()
task = RetinaNetTask.from_args(
quantization=common.Quantization(), **config.task.as_dict())
task.model.backbone = backbones.Backbone(
type='spinenet_mobile',
spinenet_mobile=backbones.SpineNetMobile(
model_id='49',
stochastic_depth_drop_rate=0.2,
min_level=3,
max_level=7,
use_keras_upsampling_2d=True))
config.task = task
return config
| 1,721 | 34.875 | 74 | py |
models | models-master/official/projects/qat/vision/configs/semantic_segmentation_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for retinanet."""
# pylint: disable=unused-import
from absl.testing import parameterized
import tensorflow as tf
from official import vision
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.qat.vision.configs import common
from official.projects.qat.vision.configs import semantic_segmentation as qat_exp_cfg
from official.vision.configs import semantic_segmentation as exp_cfg
class SemanticSegmentationConfigTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(('mnv2_deeplabv3_pascal_qat',),
('mnv2_deeplabv3_cityscapes_qat',),
('mnv2_deeplabv3plus_cityscapes_qat'))
def test_semantic_segmentation_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, qat_exp_cfg.SemanticSegmentationTask)
self.assertIsInstance(config.task.model, exp_cfg.SemanticSegmentationModel)
self.assertIsInstance(config.task.quantization, common.Quantization)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.validate()
config.task.train_data.is_training = None
with self.assertRaisesRegex(KeyError, 'Found inconsistency between key'):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 2,021 | 41.125 | 85 | py |
models | models-master/official/projects/qat/vision/configs/semantic_segmentation.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RetinaNet configuration definition."""
import dataclasses
from typing import Optional
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.qat.vision.configs import common
from official.vision.configs import semantic_segmentation
@dataclasses.dataclass
class SemanticSegmentationTask(semantic_segmentation.SemanticSegmentationTask):
quantization: Optional[common.Quantization] = None
@exp_factory.register_config_factory('mnv2_deeplabv3_pascal_qat')
def mnv2_deeplabv3_pascal() -> cfg.ExperimentConfig:
"""Generates a config for MobileNet v2 + deeplab v3 with QAT."""
config = semantic_segmentation.mnv2_deeplabv3_pascal()
task = SemanticSegmentationTask.from_args(
quantization=common.Quantization(), **config.task.as_dict())
config.task = task
return config
@exp_factory.register_config_factory('mnv2_deeplabv3_cityscapes_qat')
def mnv2_deeplabv3_cityscapes() -> cfg.ExperimentConfig:
"""Generates a config for MobileNet v2 + deeplab v3 with QAT."""
config = semantic_segmentation.mnv2_deeplabv3_cityscapes()
task = SemanticSegmentationTask.from_args(
quantization=common.Quantization(), **config.task.as_dict())
config.task = task
return config
@exp_factory.register_config_factory('mnv2_deeplabv3plus_cityscapes_qat')
def mnv2_deeplabv3plus_cityscapes() -> cfg.ExperimentConfig:
"""Generates a config for MobileNet v2 + deeplab v3+ with QAT."""
config = semantic_segmentation.mnv2_deeplabv3plus_cityscapes()
task = SemanticSegmentationTask.from_args(
quantization=common.Quantization(), **config.task.as_dict())
config.task = task
return config
| 2,280 | 38.327586 | 79 | py |
models | models-master/official/projects/qat/vision/configs/common.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image classification configuration definition."""
import dataclasses
from typing import Optional
from official.modeling import hyperparams
@dataclasses.dataclass
class Quantization(hyperparams.Config):
"""Quantization parameters.
Attributes:
pretrained_original_checkpoint: A string indicate pretrained checkpoint
location.
change_num_bits: A `bool` indicates whether to manually allocate num_bits.
num_bits_weight: An `int` number of bits for weight. Default to 8.
num_bits_activation: An `int` number of bits for activation. Default to 8.
quantize_detection_decoder: A `bool` indicates whether to quantize detection
decoder. It only works for detection model.
quantize_detection_head: A `bool` indicates whether to quantize detection
head. It only works for detection model.
"""
pretrained_original_checkpoint: Optional[str] = None
change_num_bits: bool = False
num_bits_weight: int = 8
num_bits_activation: int = 8
quantize_detection_decoder: bool = False
quantize_detection_head: bool = False
| 1,674 | 37.068182 | 80 | py |
models | models-master/official/projects/qat/vision/configs/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configs package definition."""
from official.projects.qat.vision.configs import common
from official.projects.qat.vision.configs import image_classification
from official.projects.qat.vision.configs import retinanet
from official.projects.qat.vision.configs import semantic_segmentation
| 899 | 44 | 74 | py |
models | models-master/official/projects/qat/vision/configs/image_classification_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for image_classification."""
# pylint: disable=unused-import
from absl.testing import parameterized
import tensorflow as tf
from official import vision
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.qat.vision.configs import common
from official.projects.qat.vision.configs import image_classification as qat_exp_cfg
from official.vision.configs import image_classification as exp_cfg
class ImageClassificationConfigTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('resnet_imagenet_qat',),
('mobilenet_imagenet_qat',),
)
def test_image_classification_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, qat_exp_cfg.ImageClassificationTask)
self.assertIsInstance(config.task.model,
exp_cfg.ImageClassificationModel)
self.assertIsInstance(config.task.quantization, common.Quantization)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.task.train_data.is_training = None
with self.assertRaisesRegex(KeyError, 'Found inconsistency between key'):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 1,939 | 38.591837 | 84 | py |
models | models-master/official/projects/qat/vision/configs/image_classification.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image classification configuration definition."""
import dataclasses
from typing import Optional
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.qat.vision.configs import common
from official.vision.configs import image_classification
@dataclasses.dataclass
class ImageClassificationTask(image_classification.ImageClassificationTask):
quantization: Optional[common.Quantization] = None
@exp_factory.register_config_factory('resnet_imagenet_qat')
def image_classification_imagenet() -> cfg.ExperimentConfig:
"""Builds an image classification config for the resnet with QAT."""
config = image_classification.image_classification_imagenet()
task = ImageClassificationTask.from_args(
quantization=common.Quantization(), **config.task.as_dict())
config.task = task
runtime = cfg.RuntimeConfig(enable_xla=False)
config.runtime = runtime
return config
@exp_factory.register_config_factory('mobilenet_imagenet_qat')
def image_classification_imagenet_mobilenet() -> cfg.ExperimentConfig:
"""Builds an image classification config for the mobilenetV2 with QAT."""
config = image_classification.image_classification_imagenet_mobilenet()
task = ImageClassificationTask.from_args(
quantization=common.Quantization(), **config.task.as_dict())
config.task = task
return config
| 1,986 | 36.490566 | 76 | py |
models | models-master/official/projects/qat/vision/n_bit/schemes.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantization schemes."""
from typing import Type
# Import libraries
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.projects.qat.vision.n_bit import configs
from official.projects.qat.vision.n_bit import nn_blocks
keras = tf.keras
default_n_bit_transforms = tfmot.quantization.keras.experimental.default_n_bit.default_n_bit_transforms
_LayerNode = tfmot.quantization.keras.graph_transformations.transforms.LayerNode
_LayerPattern = tfmot.quantization.keras.graph_transformations.transforms.LayerPattern
_ModelTransformer = tfmot.quantization.keras.graph_transformations.model_transformer.ModelTransformer
_QUANTIZATION_WEIGHT_NAMES = [
'output_max', 'output_min', 'optimizer_step',
'kernel_min', 'kernel_max',
'depthwise_kernel_min', 'depthwise_kernel_max',
'reduce_mean_quantizer_vars_min', 'reduce_mean_quantizer_vars_max']
_ORIGINAL_WEIGHT_NAME = [
'kernel', 'depthwise_kernel',
'gamma', 'beta', 'moving_mean', 'moving_variance',
'bias']
class CustomLayerQuantize(
tfmot.quantization.keras.graph_transformations.transforms.Transform):
"""Add QAT support for Keras Custom layer."""
def __init__(self,
original_layer_pattern: str,
quantized_layer_class: Type[keras.layers.Layer],
num_bits_weight: int = 8,
num_bits_activation: int = 8):
super().__init__()
self._original_layer_pattern = original_layer_pattern
self._quantized_layer_class = quantized_layer_class
self._num_bits_weight = num_bits_weight
self._num_bits_activation = num_bits_activation
def pattern(self) -> _LayerPattern:
"""See base class."""
return _LayerPattern(self._original_layer_pattern)
def _is_quantization_weight_name(self, name):
simple_name = name.split('/')[-1].split(':')[0]
if simple_name in _QUANTIZATION_WEIGHT_NAMES:
return True
if simple_name in _ORIGINAL_WEIGHT_NAME:
return False
raise ValueError(f'Variable name {simple_name} is not supported on '
'CustomLayerQuantize({self._original_layer_pattern}) '
'transform.')
def replacement(self, match_layer: _LayerNode) -> _LayerNode:
"""See base class."""
bottleneck_layer = match_layer.layer
bottleneck_config = bottleneck_layer['config']
bottleneck_config['num_bits_weight'] = self._num_bits_weight
bottleneck_config['num_bits_activation'] = self._num_bits_activation
bottleneck_names_and_weights = list(match_layer.names_and_weights)
quantized_layer = self._quantized_layer_class(
**bottleneck_config)
dummy_input_shape = [1, 1, 1, 1]
quantized_layer.compute_output_shape(dummy_input_shape)
quantized_names_and_weights = zip(
[weight.name for weight in quantized_layer.weights],
quantized_layer.get_weights())
match_idx = 0
names_and_weights = []
for name_and_weight in quantized_names_and_weights:
if not self._is_quantization_weight_name(name=name_and_weight[0]):
name_and_weight = bottleneck_names_and_weights[match_idx]
match_idx = match_idx + 1
names_and_weights.append(name_and_weight)
if match_idx != len(bottleneck_names_and_weights):
raise ValueError('{}/{} of Bottleneck weights is transformed.'.format(
match_idx, len(bottleneck_names_and_weights)))
quantized_layer_config = keras.layers.serialize(quantized_layer)
quantized_layer_config['name'] = quantized_layer_config['config']['name']
layer_metadata = {
'quantize_config':
configs.DefaultNBitOutputQuantizeConfig(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation)}
return _LayerNode(
quantized_layer_config,
metadata=layer_metadata,
names_and_weights=names_and_weights)
class QuantizeLayoutTransform(
tfmot.quantization.keras.QuantizeLayoutTransform):
"""Default model transformations."""
def __init__(self, num_bits_weight: int = 8, num_bits_activation: int = 8):
self._num_bits_weight = num_bits_weight
self._num_bits_activation = num_bits_activation
def apply(self, model, layer_quantize_map):
"""Implement default 8-bit transforms.
Currently this means the following.
1. Pull activations into layers, and apply fuse activations. (TODO)
2. Modify range in incoming layers for Concat. (TODO)
3. Fuse Conv2D/DepthwiseConv2D + BN into single layer.
Args:
model: Keras model to be quantized.
layer_quantize_map: Map with keys as layer names, and values as dicts
containing custom `QuantizeConfig`s which may have been passed with
layers.
Returns:
(Transformed Keras model to better match TensorFlow Lite backend, updated
layer quantize map.)
"""
transforms = [
default_n_bit_transforms.InputLayerQuantize(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
default_n_bit_transforms.SeparableConv1DQuantize(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
default_n_bit_transforms.SeparableConvQuantize(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
default_n_bit_transforms.Conv2DReshapeBatchNormReLUQuantize(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
default_n_bit_transforms.Conv2DReshapeBatchNormActivationQuantize(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
default_n_bit_transforms.Conv2DBatchNormReLUQuantize(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
default_n_bit_transforms.Conv2DBatchNormActivationQuantize(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
default_n_bit_transforms.Conv2DReshapeBatchNormQuantize(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
default_n_bit_transforms.Conv2DBatchNormQuantize(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
default_n_bit_transforms.ConcatTransform6Inputs(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
default_n_bit_transforms.ConcatTransform5Inputs(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
default_n_bit_transforms.ConcatTransform4Inputs(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
default_n_bit_transforms.ConcatTransform3Inputs(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
default_n_bit_transforms.ConcatTransform(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
default_n_bit_transforms.LayerReLUQuantize(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
default_n_bit_transforms.LayerReluActivationQuantize(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
CustomLayerQuantize(
'Vision>BottleneckBlock',
nn_blocks.BottleneckBlockNBitQuantized,
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
CustomLayerQuantize(
'Vision>InvertedBottleneckBlock',
nn_blocks.InvertedBottleneckBlockNBitQuantized,
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation),
CustomLayerQuantize(
'Vision>Conv2DBNBlock',
nn_blocks.Conv2DBNBlockNBitQuantized,
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation)
]
return _ModelTransformer(model, transforms, set(layer_quantize_map.keys()),
layer_quantize_map).transform()
class DefaultNBitQuantizeScheme(tfmot.quantization.keras.experimental
.default_n_bit.DefaultNBitQuantizeScheme):
"""Default N-bit Scheme."""
def __init__(self, num_bits_weight: int = 8, num_bits_activation: int = 8):
super(DefaultNBitQuantizeScheme, self).__init__(
num_bits_weight=num_bits_weight,
num_bits_activation=num_bits_activation)
self._num_bits_weight = num_bits_weight
self._num_bits_activation = num_bits_activation
def get_layout_transformer(self):
return QuantizeLayoutTransform(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation)
| 9,733 | 42.455357 | 103 | py |
models | models-master/official/projects/qat/vision/n_bit/nn_layers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains common building blocks for neural networks."""
from typing import Any, Callable, Dict, Union
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.modeling import tf_utils
from official.projects.qat.vision.n_bit import configs
from official.vision.modeling.layers import nn_layers
# Type annotations.
States = Dict[str, tf.Tensor]
Activation = Union[str, Callable]
class NoOpActivation:
"""No-op activation which simply returns the incoming tensor.
This activation is required to distinguish between `keras.activations.linear`
which does the same thing. The main difference is that NoOpActivation should
not have any quantize operation applied to it.
"""
def __call__(self, x: tf.Tensor) -> tf.Tensor:
return x
def get_config(self) -> Dict[str, Any]:
"""Get a config of this object."""
return {}
def __eq__(self, other: Any) -> bool:
return isinstance(other, NoOpActivation)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def _quantize_wrapped_layer(cls, quantize_config):
def constructor(*arg, **kwargs):
return tfmot.quantization.keras.QuantizeWrapperV2(
cls(*arg, **kwargs),
quantize_config)
return constructor
@tf.keras.utils.register_keras_serializable(package='Vision')
class SqueezeExcitationNBitQuantized(tf.keras.layers.Layer):
"""Creates a squeeze and excitation layer."""
def __init__(self,
in_filters,
out_filters,
se_ratio,
divisible_by=1,
use_3d_input=False,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
gating_activation='sigmoid',
num_bits_weight=8,
num_bits_activation=8,
**kwargs):
"""Initializes a squeeze and excitation layer.
Args:
in_filters: An `int` number of filters of the input tensor.
out_filters: An `int` number of filters of the output tensor.
se_ratio: A `float` or None. If not None, se ratio for the squeeze and
excitation layer.
divisible_by: An `int` that ensures all inner dimensions are divisible by
this number.
use_3d_input: A `bool` of whether input is 2D or 3D image.
kernel_initializer: A `str` of kernel_initializer for convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d.
Default to None.
activation: A `str` name of the activation function.
gating_activation: A `str` name of the activation function for final
gating function.
num_bits_weight: An `int` number of bits for the weight. Default to 8.
num_bits_activation: An `int` number of bits for the weight. Default to 8.
**kwargs: Additional keyword arguments to be passed.
"""
super().__init__(**kwargs)
self._in_filters = in_filters
self._out_filters = out_filters
self._se_ratio = se_ratio
self._divisible_by = divisible_by
self._use_3d_input = use_3d_input
self._activation = activation
self._gating_activation = gating_activation
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._num_bits_weight = num_bits_weight
self._num_bits_activation = num_bits_activation
if tf.keras.backend.image_data_format() == 'channels_last':
if not use_3d_input:
self._spatial_axis = [1, 2]
else:
self._spatial_axis = [1, 2, 3]
else:
if not use_3d_input:
self._spatial_axis = [2, 3]
else:
self._spatial_axis = [2, 3, 4]
self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(activation, use_keras_layer=True),
configs.DefaultNBitActivationQuantizeConfig(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation))
self._gating_activation_layer = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(gating_activation, use_keras_layer=True),
configs.DefaultNBitActivationQuantizeConfig(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation))
def build(self, input_shape):
conv2d_quantized = _quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.DefaultNBitConvQuantizeConfig(
['kernel'], ['activation'], False,
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation))
conv2d_quantized_output_quantized = _quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.DefaultNBitConvQuantizeConfig(
['kernel'], ['activation'], True,
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation))
num_reduced_filters = nn_layers.make_divisible(
max(1, int(self._in_filters * self._se_ratio)),
divisor=self._divisible_by)
self._se_reduce = conv2d_quantized(
filters=num_reduced_filters,
kernel_size=1,
strides=1,
padding='same',
use_bias=True,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=NoOpActivation())
self._se_expand = conv2d_quantized_output_quantized(
filters=self._out_filters,
kernel_size=1,
strides=1,
padding='same',
use_bias=True,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=NoOpActivation())
self._multiply = tfmot.quantization.keras.QuantizeWrapperV2(
tf.keras.layers.Multiply(),
configs.DefaultNBitQuantizeConfig(
[], [], True, num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation))
self._reduce_mean_quantizer = (
tfmot.quantization.keras.quantizers.MovingAverageQuantizer(
num_bits=self._num_bits_activation, per_axis=False,
symmetric=False, narrow_range=False)) # activation/output
self._reduce_mean_quantizer_vars = self._reduce_mean_quantizer.build(
None, 'reduce_mean_quantizer_vars', self)
super().build(input_shape)
def get_config(self):
config = {
'in_filters': self._in_filters,
'out_filters': self._out_filters,
'se_ratio': self._se_ratio,
'divisible_by': self._divisible_by,
'use_3d_input': self._use_3d_input,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'gating_activation': self._gating_activation,
'num_bits_weight': self._num_bits_weight,
'num_bits_activation': self._num_bits_activation
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, training=None):
x = tf.reduce_mean(inputs, self._spatial_axis, keepdims=True)
x = self._reduce_mean_quantizer(
x, training, self._reduce_mean_quantizer_vars)
x = self._activation_layer(self._se_reduce(x))
x = self._gating_activation_layer(self._se_expand(x))
x = self._multiply([x, inputs])
return x
| 8,359 | 37.703704 | 80 | py |
models | models-master/official/projects/qat/vision/n_bit/nn_blocks_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for nn_blocks."""
from typing import Any, Iterable, Tuple
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.projects.qat.vision.n_bit import nn_blocks
def distribution_strategy_combinations() -> Iterable[Tuple[Any, ...]]:
"""Returns the combinations of end-to-end tests to run."""
return combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
)
class NNBlocksTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(nn_blocks.BottleneckBlockNBitQuantized, 1, False, 0.0, None, 4, 4),
(nn_blocks.BottleneckBlockNBitQuantized, 2, True, 0.2, 0.25, 4, 4),
)
def test_bottleneck_block_creation(self, block_fn, strides, use_projection,
stochastic_depth_drop_rate, se_ratio,
num_bits_weight, num_bits_activation):
input_size = 128
filter_size = 256
inputs = tf.keras.Input(
shape=(input_size, input_size, filter_size * 4), batch_size=1)
block = block_fn(
filter_size,
strides,
use_projection=use_projection,
se_ratio=se_ratio,
stochastic_depth_drop_rate=stochastic_depth_drop_rate,
num_bits_weight=num_bits_weight,
num_bits_activation=num_bits_activation)
features = block(inputs)
self.assertAllEqual(
[1, input_size // strides, input_size // strides, filter_size * 4],
features.shape.as_list())
@parameterized.parameters(
(nn_blocks.InvertedBottleneckBlockNBitQuantized, 1, 1, None, None, 4, 4),
(nn_blocks.InvertedBottleneckBlockNBitQuantized, 6, 1, None, None, 4, 4),
(nn_blocks.InvertedBottleneckBlockNBitQuantized, 1, 2, None, None, 4, 4),
(nn_blocks.InvertedBottleneckBlockNBitQuantized, 1, 1, 0.2, None, 4, 4),
(nn_blocks.InvertedBottleneckBlockNBitQuantized, 1, 1, None, 0.2, 4, 4),
)
def test_invertedbottleneck_block_creation(
self, block_fn, expand_ratio, strides, se_ratio,
stochastic_depth_drop_rate, num_bits_weight, num_bits_activation):
input_size = 128
in_filters = 24
out_filters = 40
inputs = tf.keras.Input(
shape=(input_size, input_size, in_filters), batch_size=1)
block = block_fn(
in_filters=in_filters,
out_filters=out_filters,
expand_ratio=expand_ratio,
strides=strides,
se_ratio=se_ratio,
stochastic_depth_drop_rate=stochastic_depth_drop_rate,
num_bits_weight=num_bits_weight,
num_bits_activation=num_bits_activation)
features = block(inputs)
self.assertAllEqual(
[1, input_size // strides, input_size // strides, out_filters],
features.shape.as_list())
if __name__ == '__main__':
tf.test.main()
| 3,653 | 35.54 | 79 | py |
models | models-master/official/projects/qat/vision/n_bit/configs_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for configs.py."""
# Import libraries
import numpy as np
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.modeling import tf_utils
from official.projects.qat.vision.n_bit import configs
class _TestHelper(object):
def _convert_list(self, list_of_tuples):
"""Transforms a list of 2-tuples to a tuple of 2 lists.
`QuantizeConfig` methods return a list of 2-tuples in the form
[(weight1, quantizer1), (weight2, quantizer2)]. This function converts
it into a 2-tuple of lists. ([weight1, weight2]), (quantizer1, quantizer2).
Args:
list_of_tuples: List of 2-tuples.
Returns:
2-tuple of lists.
"""
list1 = []
list2 = []
for a, b in list_of_tuples:
list1.append(a)
list2.append(b)
return list1, list2
# TODO(pulkitb): Consider asserting on full equality for quantizers.
def _assert_weight_quantizers(self, quantizer_list):
for quantizer in quantizer_list:
self.assertIsInstance(
quantizer,
tfmot.quantization.keras.quantizers.LastValueQuantizer)
def _assert_activation_quantizers(self, quantizer_list):
for quantizer in quantizer_list:
self.assertIsInstance(
quantizer,
tfmot.quantization.keras.quantizers.MovingAverageQuantizer)
def _assert_kernel_equality(self, a, b):
self.assertAllEqual(a.numpy(), b.numpy())
class DefaultNBitQuantizeConfigTest(tf.test.TestCase, _TestHelper):
def _simple_dense_layer(self):
layer = tf.keras.layers.Dense(2)
layer.build(input_shape=(3,))
return layer
def testGetsQuantizeWeightsAndQuantizers(self):
layer = self._simple_dense_layer()
num_bits_weight = 4
num_bits_activation = 4
quantize_config = configs.DefaultNBitQuantizeConfig(
['kernel'], ['activation'], False, num_bits_weight, num_bits_activation)
(weights, weight_quantizers) = self._convert_list(
quantize_config.get_weights_and_quantizers(layer))
self._assert_weight_quantizers(weight_quantizers)
self.assertEqual([layer.kernel], weights)
def testGetsQuantizeActivationsAndQuantizers(self):
layer = self._simple_dense_layer()
num_bits_weight = 4
num_bits_activation = 4
quantize_config = configs.DefaultNBitQuantizeConfig(
['kernel'], ['activation'], False, num_bits_weight, num_bits_activation)
(activations, activation_quantizers) = self._convert_list(
quantize_config.get_activations_and_quantizers(layer))
self._assert_activation_quantizers(activation_quantizers)
self.assertEqual([layer.activation], activations)
def testSetsQuantizeWeights(self):
layer = self._simple_dense_layer()
quantize_kernel = tf.keras.backend.variable(
np.ones(layer.kernel.shape.as_list()))
num_bits_weight = 4
num_bits_activation = 4
quantize_config = configs.DefaultNBitQuantizeConfig(
['kernel'], ['activation'], False, num_bits_weight, num_bits_activation)
quantize_config.set_quantize_weights(layer, [quantize_kernel])
self._assert_kernel_equality(layer.kernel, quantize_kernel)
def testSetsQuantizeActivations(self):
layer = self._simple_dense_layer()
quantize_activation = tf.keras.activations.relu
num_bits_weight = 4
num_bits_activation = 4
quantize_config = configs.DefaultNBitQuantizeConfig(
['kernel'], ['activation'], False, num_bits_weight, num_bits_activation)
quantize_config.set_quantize_activations(layer, [quantize_activation])
self.assertEqual(layer.activation, quantize_activation)
def testSetsQuantizeWeights_ErrorOnWrongNumberOfWeights(self):
layer = self._simple_dense_layer()
quantize_kernel = tf.keras.backend.variable(
np.ones(layer.kernel.shape.as_list()))
num_bits_weight = 4
num_bits_activation = 4
quantize_config = configs.DefaultNBitQuantizeConfig(
['kernel'], ['activation'], False, num_bits_weight, num_bits_activation)
with self.assertRaises(ValueError):
quantize_config.set_quantize_weights(layer, [])
with self.assertRaises(ValueError):
quantize_config.set_quantize_weights(layer,
[quantize_kernel, quantize_kernel])
def testSetsQuantizeWeights_ErrorOnWrongShapeOfWeight(self):
layer = self._simple_dense_layer()
quantize_kernel = tf.keras.backend.variable(np.ones([1, 2]))
num_bits_weight = 4
num_bits_activation = 4
quantize_config = configs.DefaultNBitQuantizeConfig(
['kernel'], ['activation'], False, num_bits_weight, num_bits_activation)
with self.assertRaises(ValueError):
quantize_config.set_quantize_weights(layer, [quantize_kernel])
def testSetsQuantizeActivations_ErrorOnWrongNumberOfActivations(self):
layer = self._simple_dense_layer()
quantize_activation = tf.keras.activations.relu
num_bits_weight = 4
num_bits_activation = 4
quantize_config = configs.DefaultNBitQuantizeConfig(
['kernel'], ['activation'], False, num_bits_weight, num_bits_activation)
with self.assertRaises(ValueError):
quantize_config.set_quantize_activations(layer, [])
with self.assertRaises(ValueError):
quantize_config.set_quantize_activations(
layer, [quantize_activation, quantize_activation])
def testGetsResultQuantizers_ReturnsQuantizer(self):
layer = self._simple_dense_layer()
num_bits_weight = 4
num_bits_activation = 4
quantize_config = configs.DefaultNBitQuantizeConfig(
[], [], True, num_bits_weight, num_bits_activation)
output_quantizers = quantize_config.get_output_quantizers(layer)
self.assertLen(output_quantizers, 1)
self._assert_activation_quantizers(output_quantizers)
def testGetsResultQuantizers_EmptyWhenFalse(self):
layer = self._simple_dense_layer()
num_bits_weight = 4
num_bits_activation = 4
quantize_config = configs.DefaultNBitQuantizeConfig(
[], [], False, num_bits_weight, num_bits_activation)
output_quantizers = quantize_config.get_output_quantizers(layer)
self.assertEqual([], output_quantizers)
def testSerialization(self):
num_bits_weight = 4
num_bits_activation = 4
quantize_config = configs.DefaultNBitQuantizeConfig(
['kernel'], ['activation'], False, num_bits_weight, num_bits_activation)
expected_config = {
'class_name': 'DefaultNBitQuantizeConfig',
'config': {
'weight_attrs': ['kernel'],
'activation_attrs': ['activation'],
'quantize_output': False,
'num_bits_weight': 4,
'num_bits_activation': 4
}
}
serialized_quantize_config = tf_utils.serialize_keras_object(
quantize_config
)
self.assertEqual(expected_config, serialized_quantize_config)
quantize_config_from_config = (
tf_utils.deserialize_keras_object(
serialized_quantize_config,
module_objects=globals(),
custom_objects=configs._types_dict(),
)
)
self.assertEqual(quantize_config, quantize_config_from_config)
if __name__ == '__main__':
tf.test.main()
| 7,762 | 32.752174 | 80 | py |
models | models-master/official/projects/qat/vision/n_bit/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configs package definition."""
from official.projects.qat.vision.n_bit import configs
from official.projects.qat.vision.n_bit import schemes
from official.projects.qat.vision.n_bit.nn_blocks import BottleneckBlockNBitQuantized
from official.projects.qat.vision.n_bit.nn_blocks import Conv2DBNBlockNBitQuantized
from official.projects.qat.vision.n_bit.nn_blocks import InvertedBottleneckBlockNBitQuantized
| 1,018 | 45.318182 | 93 | py |
models | models-master/official/projects/qat/vision/n_bit/nn_blocks.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains quantized neural blocks for the QAT."""
from typing import Any, Dict, Optional, Sequence, Union
# Import libraries
from absl import logging
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.modeling import tf_utils
from official.projects.qat.vision.n_bit import configs
from official.projects.qat.vision.n_bit import nn_layers as qat_nn_layers
from official.vision.modeling.layers import nn_layers
class NoOpActivation:
"""No-op activation which simply returns the incoming tensor.
This activation is required to distinguish between `keras.activations.linear`
which does the same thing. The main difference is that NoOpActivation should
not have any quantize operation applied to it.
"""
def __call__(self, x: tf.Tensor) -> tf.Tensor:
return x
def get_config(self) -> Dict[str, Any]:
"""Get a config of this object."""
return {}
def __eq__(self, other: Any) -> bool:
if not other or not isinstance(other, NoOpActivation):
return False
return True
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def _quantize_wrapped_layer(cls, quantize_config):
def constructor(*arg, **kwargs):
return tfmot.quantization.keras.QuantizeWrapperV2(
cls(*arg, **kwargs),
quantize_config)
return constructor
# This class is copied from modeling.layers.nn_blocks.BottleneckBlock and apply
# QAT.
@tf.keras.utils.register_keras_serializable(package='Vision')
class BottleneckBlockNBitQuantized(tf.keras.layers.Layer):
"""A quantized standard bottleneck block."""
def __init__(self,
filters: int,
strides: int,
dilation_rate: int = 1,
use_projection: bool = False,
se_ratio: Optional[float] = None,
resnetd_shortcut: bool = False,
stochastic_depth_drop_rate: Optional[float] = None,
kernel_initializer: str = 'VarianceScaling',
kernel_regularizer: tf.keras.regularizers.Regularizer = None,
bias_regularizer: tf.keras.regularizers.Regularizer = None,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
bn_trainable: bool = True,
num_bits_weight: int = 8,
num_bits_activation: int = 8, # pytype: disable=annotation-type-mismatch # typed-keras
**kwargs):
"""Initializes a standard bottleneck block with BN after convolutions.
Args:
filters: An `int` number of filters for the first two convolutions. Note
that the third and final convolution will use 4 times as many filters.
strides: An `int` block stride. If greater than 1, this block will
ultimately downsample the input.
dilation_rate: An `int` dilation_rate of convolutions. Default to 1.
use_projection: A `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
se_ratio: A `float` or None. Ratio of the Squeeze-and-Excitation layer.
resnetd_shortcut: A `bool`. If True, apply the resnetd style modification
to the shortcut connection.
stochastic_depth_drop_rate: A `float` or None. If not None, drop rate for
the stochastic depth layer.
kernel_initializer: A `str` of kernel_initializer for convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d.
Default to None.
activation: A `str` name of the activation function.
use_sync_bn: A `bool`. If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
bn_trainable: A `bool` that indicates whether batch norm layers should be
trainable. Default to True.
num_bits_weight: An `int` number of bits for the weight. Default to 8.
num_bits_activation: An `int` number of bits for the weight. Default to 8.
**kwargs: Additional keyword arguments to be passed.
"""
super().__init__(**kwargs)
self._filters = filters
self._strides = strides
self._dilation_rate = dilation_rate
self._use_projection = use_projection
self._se_ratio = se_ratio
self._resnetd_shortcut = resnetd_shortcut
self._use_sync_bn = use_sync_bn
self._activation = activation
self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
self._kernel_initializer = kernel_initializer
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._num_bits_weight = num_bits_weight
self._num_bits_activation = num_bits_activation
if use_sync_bn:
self._norm = _quantize_wrapped_layer(
tf.keras.layers.experimental.SyncBatchNormalization,
configs.NoOpQuantizeConfig())
self._norm_with_quantize = _quantize_wrapped_layer(
tf.keras.layers.experimental.SyncBatchNormalization,
configs.DefaultNBitOutputQuantizeConfig(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation))
else:
self._norm = _quantize_wrapped_layer(
tf.keras.layers.BatchNormalization,
configs.NoOpQuantizeConfig())
self._norm_with_quantize = _quantize_wrapped_layer(
tf.keras.layers.BatchNormalization,
configs.DefaultNBitOutputQuantizeConfig(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation))
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._bn_trainable = bn_trainable
def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]):
"""Build variables and child layers to prepare for calling."""
conv2d_quantized = _quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.DefaultNBitConvQuantizeConfig(
['kernel'], ['activation'], False,
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation))
if self._use_projection:
if self._resnetd_shortcut:
self._shortcut0 = tf.keras.layers.AveragePooling2D(
pool_size=2, strides=self._strides, padding='same')
self._shortcut1 = conv2d_quantized(
filters=self._filters * 4,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=NoOpActivation())
else:
self._shortcut = conv2d_quantized(
filters=self._filters * 4,
kernel_size=1,
strides=self._strides,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=NoOpActivation())
self._norm0 = self._norm_with_quantize(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
trainable=self._bn_trainable)
self._conv1 = conv2d_quantized(
filters=self._filters,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=NoOpActivation())
self._norm1 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
trainable=self._bn_trainable)
self._activation1 = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(self._activation, use_keras_layer=True),
configs.DefaultNBitActivationQuantizeConfig(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation))
self._conv2 = conv2d_quantized(
filters=self._filters,
kernel_size=3,
strides=self._strides,
dilation_rate=self._dilation_rate,
padding='same',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=NoOpActivation())
self._norm2 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
trainable=self._bn_trainable)
self._activation2 = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(self._activation, use_keras_layer=True),
configs.DefaultNBitActivationQuantizeConfig(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation))
self._conv3 = conv2d_quantized(
filters=self._filters * 4,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=NoOpActivation())
self._norm3 = self._norm_with_quantize(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
trainable=self._bn_trainable)
self._activation3 = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(self._activation, use_keras_layer=True),
configs.DefaultNBitActivationQuantizeConfig(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation))
if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1:
self._squeeze_excitation = qat_nn_layers.SqueezeExcitationNBitQuantized(
in_filters=self._filters * 4,
out_filters=self._filters * 4,
se_ratio=self._se_ratio,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation)
else:
self._squeeze_excitation = None
if self._stochastic_depth_drop_rate:
self._stochastic_depth = nn_layers.StochasticDepth(
self._stochastic_depth_drop_rate)
else:
self._stochastic_depth = None
self._add = tfmot.quantization.keras.QuantizeWrapperV2(
tf.keras.layers.Add(),
configs.DefaultNBitQuantizeConfig(
[], [], True,
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation))
super().build(input_shape)
def get_config(self) -> Dict[str, Any]:
"""Get a config of this layer."""
config = {
'filters': self._filters,
'strides': self._strides,
'dilation_rate': self._dilation_rate,
'use_projection': self._use_projection,
'se_ratio': self._se_ratio,
'resnetd_shortcut': self._resnetd_shortcut,
'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'bn_trainable': self._bn_trainable,
'num_bits_weight': self._num_bits_weight,
'num_bits_activation': self._num_bits_activation
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(
self,
inputs: tf.Tensor,
training: Optional[Union[bool, tf.Tensor]] = None) -> tf.Tensor:
"""Run the BottleneckBlockQuantized logics."""
shortcut = inputs
if self._use_projection:
if self._resnetd_shortcut:
shortcut = self._shortcut0(shortcut)
shortcut = self._shortcut1(shortcut)
else:
shortcut = self._shortcut(shortcut)
shortcut = self._norm0(shortcut)
x = self._conv1(inputs)
x = self._norm1(x)
x = self._activation1(x)
x = self._conv2(x)
x = self._norm2(x)
x = self._activation2(x)
x = self._conv3(x)
x = self._norm3(x)
if self._squeeze_excitation:
x = self._squeeze_excitation(x)
if self._stochastic_depth:
x = self._stochastic_depth(x, training=training)
x = self._add([x, shortcut])
return self._activation3(x)
# This class is copied from modeling.backbones.mobilenet.Conv2DBNBlock and apply
# QAT.
@tf.keras.utils.register_keras_serializable(package='Vision')
class Conv2DBNBlockNBitQuantized(tf.keras.layers.Layer):
"""A quantized convolution block with batch normalization."""
def __init__(
self,
filters: int,
kernel_size: int = 3,
strides: int = 1,
use_bias: bool = False,
activation: str = 'relu6',
kernel_initializer: str = 'VarianceScaling',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
use_normalization: bool = True,
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
num_bits_weight: int = 8,
num_bits_activation: int = 8,
**kwargs):
"""A convolution block with batch normalization.
Args:
filters: An `int` number of filters for the first two convolutions. Note
that the third and final convolution will use 4 times as many filters.
kernel_size: An `int` specifying the height and width of the 2D
convolution window.
strides: An `int` of block stride. If greater than 1, this block will
ultimately downsample the input.
use_bias: If True, use bias in the convolution layer.
activation: A `str` name of the activation function.
kernel_initializer: A `str` for kernel initializer of convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
Default to None.
use_normalization: If True, use batch normalization.
use_sync_bn: If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
num_bits_weight: An `int` number of bits for the weight. Default to 8.
num_bits_activation: An `int` number of bits for the weight. Default to 8.
**kwargs: Additional keyword arguments to be passed.
"""
super().__init__(**kwargs)
self._filters = filters
self._kernel_size = kernel_size
self._strides = strides
self._activation = activation
self._use_bias = use_bias
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._use_normalization = use_normalization
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._num_bits_weight = num_bits_weight
self._num_bits_activation = num_bits_activation
if use_sync_bn:
self._norm = _quantize_wrapped_layer(
tf.keras.layers.experimental.SyncBatchNormalization,
configs.NoOpQuantizeConfig())
else:
self._norm = _quantize_wrapped_layer(
tf.keras.layers.BatchNormalization,
configs.NoOpQuantizeConfig())
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
def get_config(self) -> Dict[str, Any]:
"""Get a config of this layer."""
config = {
'filters': self._filters,
'strides': self._strides,
'kernel_size': self._kernel_size,
'use_bias': self._use_bias,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'use_normalization': self._use_normalization,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'num_bits_weight': self._num_bits_weight,
'num_bits_activation': self._num_bits_activation
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]):
"""Build variables and child layers to prepare for calling."""
conv2d_quantized = _quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.DefaultNBitConvQuantizeConfig(
['kernel'], ['activation'], False,
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation))
self._conv0 = conv2d_quantized(
filters=self._filters,
kernel_size=self._kernel_size,
strides=self._strides,
padding='same',
use_bias=self._use_bias,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=NoOpActivation())
if self._use_normalization:
self._norm0 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(self._activation, use_keras_layer=True),
configs.DefaultNBitActivationQuantizeConfig(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation))
super(Conv2DBNBlockNBitQuantized, self).build(input_shape)
def call(
self,
inputs: tf.Tensor,
training: Optional[Union[bool, tf.Tensor]] = None) -> tf.Tensor:
"""Run the Conv2DBNBlockNBitQuantized logics."""
x = self._conv0(inputs)
if self._use_normalization:
x = self._norm0(x)
return self._activation_layer(x)
@tf.keras.utils.register_keras_serializable(package='Vision')
class InvertedBottleneckBlockNBitQuantized(tf.keras.layers.Layer):
"""A quantized inverted bottleneck block."""
def __init__(self,
in_filters,
out_filters,
expand_ratio,
strides,
kernel_size=3,
se_ratio=None,
stochastic_depth_drop_rate=None,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
se_inner_activation='relu',
se_gating_activation='sigmoid',
expand_se_in_filters=False,
depthwise_activation=None,
use_sync_bn=False,
dilation_rate=1,
divisible_by=1,
regularize_depthwise=False,
use_depthwise=True,
use_residual=True,
norm_momentum=0.99,
norm_epsilon=0.001,
num_bits_weight: int = 8,
num_bits_activation: int = 8,
**kwargs):
"""Initializes an inverted bottleneck block with BN after convolutions.
Args:
in_filters: An `int` number of filters of the input tensor.
out_filters: An `int` number of filters of the output tensor.
expand_ratio: An `int` of expand_ratio for an inverted bottleneck block.
strides: An `int` block stride. If greater than 1, this block will
ultimately downsample the input.
kernel_size: An `int` kernel_size of the depthwise conv layer.
se_ratio: A `float` or None. If not None, se ratio for the squeeze and
excitation layer.
stochastic_depth_drop_rate: A `float` or None. if not None, drop rate for
the stochastic depth layer.
kernel_initializer: A `str` of kernel_initializer for convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d.
Default to None.
activation: A `str` name of the activation function.
se_inner_activation: A `str` name of squeeze-excitation inner activation.
se_gating_activation: A `str` name of squeeze-excitation gating
activation.
expand_se_in_filters: A `bool` of whether or not to expand in_filter in
squeeze and excitation layer.
depthwise_activation: A `str` name of the activation function for
depthwise only.
use_sync_bn: A `bool`. If True, use synchronized batch normalization.
dilation_rate: An `int` that specifies the dilation rate to use for.
divisible_by: An `int` that ensures all inner dimensions are divisible by
this number.
dilated convolution: An `int` to specify the same value for all spatial
dimensions.
regularize_depthwise: A `bool` of whether or not apply regularization on
depthwise.
use_depthwise: A `bool` of whether to uses fused convolutions instead of
depthwise.
use_residual: A `bool` of whether to include residual connection between
input and output.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
num_bits_weight: An `int` number of bits for the weight. Default to 8.
num_bits_activation: An `int` number of bits for the weight. Default to 8.
**kwargs: Additional keyword arguments to be passed.
"""
super().__init__(**kwargs)
self._in_filters = in_filters
self._out_filters = out_filters
self._expand_ratio = expand_ratio
self._strides = strides
self._kernel_size = kernel_size
self._se_ratio = se_ratio
self._divisible_by = divisible_by
self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
self._dilation_rate = dilation_rate
self._use_sync_bn = use_sync_bn
self._regularize_depthwise = regularize_depthwise
self._use_depthwise = use_depthwise
self._use_residual = use_residual
self._activation = activation
self._se_inner_activation = se_inner_activation
self._se_gating_activation = se_gating_activation
self._depthwise_activation = depthwise_activation
self._kernel_initializer = kernel_initializer
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._expand_se_in_filters = expand_se_in_filters
self._num_bits_weight = num_bits_weight
self._num_bits_activation = num_bits_activation
if use_sync_bn:
self._norm = _quantize_wrapped_layer(
tf.keras.layers.experimental.SyncBatchNormalization,
configs.NoOpQuantizeConfig())
self._norm_with_quantize = _quantize_wrapped_layer(
tf.keras.layers.experimental.SyncBatchNormalization,
configs.DefaultNBitOutputQuantizeConfig(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation))
else:
self._norm = _quantize_wrapped_layer(
tf.keras.layers.BatchNormalization,
configs.NoOpQuantizeConfig())
self._norm_with_quantize = _quantize_wrapped_layer(
tf.keras.layers.BatchNormalization,
configs.DefaultNBitOutputQuantizeConfig(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation))
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
if not depthwise_activation:
self._depthwise_activation = activation
if regularize_depthwise:
self._depthsize_regularizer = kernel_regularizer
else:
self._depthsize_regularizer = None
def build(self, input_shape: Optional[Union[Sequence[int], tf.Tensor]]):
"""Build variables and child layers to prepare for calling."""
conv2d_quantized = _quantize_wrapped_layer(
tf.keras.layers.Conv2D,
configs.DefaultNBitConvQuantizeConfig(
['kernel'], ['activation'], False,
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation))
depthwise_conv2d_quantized = _quantize_wrapped_layer(
tf.keras.layers.DepthwiseConv2D,
configs.DefaultNBitConvQuantizeConfig(
['depthwise_kernel'], ['activation'], False,
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation))
expand_filters = self._in_filters
if self._expand_ratio > 1:
# First 1x1 conv for channel expansion.
expand_filters = nn_layers.make_divisible(
self._in_filters * self._expand_ratio, self._divisible_by)
expand_kernel = 1 if self._use_depthwise else self._kernel_size
expand_stride = 1 if self._use_depthwise else self._strides
self._conv0 = conv2d_quantized(
filters=expand_filters,
kernel_size=expand_kernel,
strides=expand_stride,
padding='same',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=NoOpActivation())
self._norm0 = self._norm_with_quantize(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._activation_layer = tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(self._activation, use_keras_layer=True),
configs.DefaultNBitActivationQuantizeConfig(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation))
if self._use_depthwise:
# Depthwise conv.
self._conv1 = depthwise_conv2d_quantized(
kernel_size=(self._kernel_size, self._kernel_size),
strides=self._strides,
padding='same',
depth_multiplier=1,
dilation_rate=self._dilation_rate,
use_bias=False,
depthwise_initializer=self._kernel_initializer,
depthwise_regularizer=self._depthsize_regularizer,
bias_regularizer=self._bias_regularizer,
activation=NoOpActivation())
self._norm1 = self._norm_with_quantize(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._depthwise_activation_layer = (
tfmot.quantization.keras.QuantizeWrapperV2(
tf_utils.get_activation(self._depthwise_activation,
use_keras_layer=True),
configs.DefaultNBitActivationQuantizeConfig(
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation)))
# Squeeze and excitation.
if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1:
logging.info('Use Squeeze and excitation.')
in_filters = self._in_filters
if self._expand_se_in_filters:
in_filters = expand_filters
self._squeeze_excitation = qat_nn_layers.SqueezeExcitationNBitQuantized(
in_filters=in_filters,
out_filters=expand_filters,
se_ratio=self._se_ratio,
divisible_by=self._divisible_by,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._se_inner_activation,
gating_activation=self._se_gating_activation,
num_bits_weight=self._num_bits_weight,
num_bits_activation=self._num_bits_activation)
else:
self._squeeze_excitation = None
# Last 1x1 conv.
self._conv2 = conv2d_quantized(
filters=self._out_filters,
kernel_size=1,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=NoOpActivation())
self._norm2 = self._norm_with_quantize(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
if self._stochastic_depth_drop_rate:
self._stochastic_depth = nn_layers.StochasticDepth(
self._stochastic_depth_drop_rate)
else:
self._stochastic_depth = None
self._add = tf.keras.layers.Add()
super().build(input_shape)
def get_config(self) -> Dict[str, Any]:
"""Get a config of this layer."""
config = {
'in_filters': self._in_filters,
'out_filters': self._out_filters,
'expand_ratio': self._expand_ratio,
'strides': self._strides,
'kernel_size': self._kernel_size,
'se_ratio': self._se_ratio,
'divisible_by': self._divisible_by,
'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'se_inner_activation': self._se_inner_activation,
'se_gating_activation': self._se_gating_activation,
'expand_se_in_filters': self._expand_se_in_filters,
'depthwise_activation': self._depthwise_activation,
'dilation_rate': self._dilation_rate,
'use_sync_bn': self._use_sync_bn,
'regularize_depthwise': self._regularize_depthwise,
'use_depthwise': self._use_depthwise,
'use_residual': self._use_residual,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'num_bits_weight': self._num_bits_weight,
'num_bits_activation': self._num_bits_activation
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(
self,
inputs: tf.Tensor,
training: Optional[Union[bool, tf.Tensor]] = None) -> tf.Tensor:
"""Run the InvertedBottleneckBlockNBitQuantized logics."""
shortcut = inputs
if self._expand_ratio > 1:
x = self._conv0(inputs)
x = self._norm0(x)
x = self._activation_layer(x)
else:
x = inputs
if self._use_depthwise:
x = self._conv1(x)
x = self._norm1(x)
x = self._depthwise_activation_layer(x)
if self._squeeze_excitation:
x = self._squeeze_excitation(x)
x = self._conv2(x)
x = self._norm2(x)
if (self._use_residual and
self._in_filters == self._out_filters and
self._strides == 1):
if self._stochastic_depth:
x = self._stochastic_depth(x, training=training)
x = self._add([x, shortcut])
return x
| 32,587 | 39.735 | 103 | py |
models | models-master/official/projects/qat/vision/n_bit/configs.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default 8-bit QuantizeConfigs."""
from typing import Sequence, Callable, Tuple, Any, Dict
import tensorflow as tf
import tensorflow_model_optimization as tfmot
Quantizer = tfmot.quantization.keras.quantizers.Quantizer
Layer = tf.keras.layers.Layer
Activation = Callable[[tf.Tensor], tf.Tensor]
WeightAndQuantizer = Tuple[tf.Variable, Quantizer]
ActivationAndQuantizer = Tuple[Activation, Quantizer]
class DefaultNBitOutputQuantizeConfig(
tfmot.quantization.keras.QuantizeConfig):
"""QuantizeConfig which only quantizes the output from a layer."""
def __init__(self, num_bits_weight: int = 8, num_bits_activation: int = 8):
self._num_bits_weight = num_bits_weight
self._num_bits_activation = num_bits_activation
def get_weights_and_quantizers(
self, layer: Layer) -> Sequence[WeightAndQuantizer]:
return []
def get_activations_and_quantizers(
self, layer: Layer) -> Sequence[ActivationAndQuantizer]:
return []
def set_quantize_weights(self,
layer: Layer,
quantize_weights: Sequence[tf.Tensor]):
pass
def set_quantize_activations(self,
layer: Layer,
quantize_activations: Sequence[Activation]):
pass
def get_output_quantizers(self, layer: Layer) -> Sequence[Quantizer]:
return [
tfmot.quantization.keras.quantizers.MovingAverageQuantizer(
num_bits=self._num_bits_activation, per_axis=False,
symmetric=False, narrow_range=False) # activation/output
]
def get_config(self) -> Dict[str, Any]:
return {
'num_bits_weight': self._num_bits_weight,
'num_bits_activation': self._num_bits_activation,
}
class NoOpQuantizeConfig(tfmot.quantization.keras.QuantizeConfig):
"""QuantizeConfig which does not quantize any part of the layer."""
def __init__(self, num_bits_weight: int = 8, num_bits_activation: int = 8):
self._num_bits_weight = num_bits_weight
self._num_bits_activation = num_bits_activation
def get_weights_and_quantizers(
self, layer: Layer) -> Sequence[WeightAndQuantizer]:
return []
def get_activations_and_quantizers(
self, layer: Layer) -> Sequence[ActivationAndQuantizer]:
return []
def set_quantize_weights(
self,
layer: Layer,
quantize_weights: Sequence[tf.Tensor]):
pass
def set_quantize_activations(
self,
layer: Layer,
quantize_activations: Sequence[Activation]):
pass
def get_output_quantizers(self, layer: Layer) -> Sequence[Quantizer]:
return []
def get_config(self) -> Dict[str, Any]:
return {
'num_bits_weight': self._num_bits_weight,
'num_bits_activation': self._num_bits_activation,
}
class DefaultNBitQuantizeConfig(tfmot.quantization.keras.QuantizeConfig):
"""QuantizeConfig for non recurrent Keras layers."""
def __init__(self,
weight_attrs: Sequence[str],
activation_attrs: Sequence[str],
quantize_output: bool,
num_bits_weight: int = 8,
num_bits_activation: int = 8):
"""Initializes a default N-bit quantize config."""
self.weight_attrs = weight_attrs
self.activation_attrs = activation_attrs
self.quantize_output = quantize_output
self._num_bits_weight = num_bits_weight
self._num_bits_activation = num_bits_activation
# TODO(pulkitb): For some layers such as Conv2D, per_axis should be True.
# Add mapping for which layers support per_axis.
self.weight_quantizer = tfmot.quantization.keras.quantizers.LastValueQuantizer(
num_bits=num_bits_weight, per_axis=False,
symmetric=True, narrow_range=True) # weight
self.activation_quantizer = tfmot.quantization.keras.quantizers.MovingAverageQuantizer(
num_bits=num_bits_activation, per_axis=False,
symmetric=False, narrow_range=False) # activation/output
def get_weights_and_quantizers(
self, layer: Layer) -> Sequence[WeightAndQuantizer]:
"""See base class."""
return [(getattr(layer, weight_attr), self.weight_quantizer)
for weight_attr in self.weight_attrs]
def get_activations_and_quantizers(
self, layer: Layer) -> Sequence[ActivationAndQuantizer]:
"""See base class."""
return [(getattr(layer, activation_attr), self.activation_quantizer)
for activation_attr in self.activation_attrs]
def set_quantize_weights(
self,
layer: Layer,
quantize_weights: Sequence[tf.Tensor]):
"""See base class."""
if len(self.weight_attrs) != len(quantize_weights):
raise ValueError(
'`set_quantize_weights` called on layer {} with {} '
'weight parameters, but layer expects {} values.'.format(
layer.name, len(quantize_weights), len(self.weight_attrs)))
for weight_attr, weight in zip(self.weight_attrs, quantize_weights):
current_weight = getattr(layer, weight_attr)
if current_weight.shape != weight.shape:
raise ValueError('Existing layer weight shape {} is incompatible with'
'provided weight shape {}'.format(
current_weight.shape, weight.shape))
setattr(layer, weight_attr, weight)
def set_quantize_activations(
self,
layer: Layer,
quantize_activations: Sequence[Activation]):
"""See base class."""
if len(self.activation_attrs) != len(quantize_activations):
raise ValueError(
'`set_quantize_activations` called on layer {} with {} '
'activation parameters, but layer expects {} values.'.format(
layer.name, len(quantize_activations),
len(self.activation_attrs)))
for activation_attr, activation in zip(
self.activation_attrs, quantize_activations):
setattr(layer, activation_attr, activation)
def get_output_quantizers(self, layer: Layer) -> Sequence[Quantizer]:
"""See base class."""
if self.quantize_output:
return [self.activation_quantizer]
return []
@classmethod
def from_config(cls, config: Dict[str, Any]) -> object:
"""Instantiates a `DefaultNBitQuantizeConfig` from its config.
Args:
config: Output of `get_config()`.
Returns:
A `DefaultNBitQuantizeConfig` instance.
"""
return cls(**config)
def get_config(self) -> Dict[str, Any]:
"""Get a config for this quantize config."""
# TODO(pulkitb): Add weight and activation quantizer to config.
# Currently it's created internally, but ideally the quantizers should be
# part of the constructor and passed in from the registry.
return {
'weight_attrs': self.weight_attrs,
'activation_attrs': self.activation_attrs,
'quantize_output': self.quantize_output,
'num_bits_weight': self._num_bits_weight,
'num_bits_activation': self._num_bits_activation
}
def __eq__(self, other):
if not isinstance(other, DefaultNBitQuantizeConfig):
return False
return (self.weight_attrs == other.weight_attrs and
self.activation_attrs == self.activation_attrs and
self.weight_quantizer == other.weight_quantizer and
self.activation_quantizer == other.activation_quantizer and
self.quantize_output == other.quantize_output)
def __ne__(self, other):
return not self.__eq__(other)
class DefaultNBitConvWeightsQuantizer(
tfmot.quantization.keras.quantizers.LastValueQuantizer):
"""Quantizer for handling weights in Conv2D/DepthwiseConv2D layers."""
def __init__(self, num_bits_weight: int = 8, num_bits_activation: int = 8):
"""Construct LastValueQuantizer with params specific for TFLite Convs."""
super(DefaultNBitConvWeightsQuantizer, self).__init__(
num_bits=num_bits_weight, per_axis=True,
symmetric=True, narrow_range=True) # weight
self._num_bits_weight = num_bits_weight
self._num_bits_activation = num_bits_activation
def build(self,
tensor_shape: tf.TensorShape,
name: str,
layer: Layer):
"""Build min/max quantization variables."""
min_weight = layer.add_weight(
name + '_min',
shape=(tensor_shape[-1],),
initializer=tf.keras.initializers.Constant(-6.0),
trainable=False)
max_weight = layer.add_weight(
name + '_max',
shape=(tensor_shape[-1],),
initializer=tf.keras.initializers.Constant(6.0),
trainable=False)
return {'min_var': min_weight, 'max_var': max_weight}
class NoQuantizer(tfmot.quantization.keras.quantizers.Quantizer):
"""Dummy quantizer for explicitly not quantize."""
def __call__(self, inputs, training, weights, **kwargs):
return tf.identity(inputs)
def get_config(self):
return {}
def build(self, tensor_shape, name, layer):
return {}
class DefaultNBitConvQuantizeConfig(DefaultNBitQuantizeConfig):
"""QuantizeConfig for Conv2D/DepthwiseConv2D layers."""
def __init__(self,
weight_attrs: Sequence[str],
activation_attrs: Sequence[str],
quantize_output: bool,
num_bits_weight: int = 8,
num_bits_activation: int = 8):
"""Initializes default N-bit quantization config for the conv layer."""
super().__init__(weight_attrs=weight_attrs,
activation_attrs=activation_attrs,
quantize_output=quantize_output,
num_bits_weight=num_bits_weight,
num_bits_activation=num_bits_activation)
self._num_bits_weight = num_bits_weight
self._num_bits_activation = num_bits_activation
self.weight_quantizer = DefaultNBitConvWeightsQuantizer(
num_bits_weight=num_bits_weight,
num_bits_activation=num_bits_activation)
class DefaultNBitActivationQuantizeConfig(
tfmot.quantization.keras.QuantizeConfig):
"""QuantizeConfig for keras.layers.Activation.
`keras.layers.Activation` needs a separate `QuantizeConfig` since the
decision to quantize depends on the specific activation type.
"""
def __init__(self, num_bits_weight: int = 8, num_bits_activation: int = 8):
self._num_bits_weight = num_bits_weight
self._num_bits_activation = num_bits_activation
def _assert_activation_layer(self, layer: Layer):
if not isinstance(layer, tf.keras.layers.Activation):
raise RuntimeError(
'DefaultNBitActivationQuantizeConfig can only be used with '
'`keras.layers.Activation`.')
def get_weights_and_quantizers(
self, layer: Layer) -> Sequence[WeightAndQuantizer]:
"""See base class."""
self._assert_activation_layer(layer)
return []
def get_activations_and_quantizers(
self, layer: Layer) -> Sequence[ActivationAndQuantizer]:
"""See base class."""
self._assert_activation_layer(layer)
return []
def set_quantize_weights(
self,
layer: Layer,
quantize_weights: Sequence[tf.Tensor]):
"""See base class."""
self._assert_activation_layer(layer)
def set_quantize_activations(
self,
layer: Layer,
quantize_activations: Sequence[Activation]):
"""See base class."""
self._assert_activation_layer(layer)
def get_output_quantizers(self, layer: Layer) -> Sequence[Quantizer]:
"""See base class."""
self._assert_activation_layer(layer)
if not hasattr(layer.activation, '__name__'):
raise ValueError('Activation {} not supported by '
'DefaultNBitActivationQuantizeConfig.'.format(
layer.activation))
# This code is copied from TFMOT repo, but added relu6 to support mobilenet.
if layer.activation.__name__ in ['relu', 'relu6', 'swish']:
# 'relu' should generally get fused into the previous layer.
return [tfmot.quantization.keras.quantizers.MovingAverageQuantizer(
num_bits=self._num_bits_activation, per_axis=False,
symmetric=False, narrow_range=False)] # activation/output
elif layer.activation.__name__ in ['linear', 'softmax', 'sigmoid']:
return []
raise ValueError('Activation {} not supported by '
'DefaultNBitActivationQuantizeConfig.'.format(
layer.activation))
def get_config(self) -> Dict[str, Any]:
"""Get a config for this quantizer config."""
return {
'num_bits_weight': self._num_bits_weight,
'num_bits_activation': self._num_bits_activation,
}
def _types_dict():
return {
'DefaultNBitOutputQuantizeConfig':
DefaultNBitOutputQuantizeConfig,
'NoOpQuantizeConfig':
NoOpQuantizeConfig,
'DefaultNBitQuantizeConfig':
DefaultNBitQuantizeConfig,
'DefaultNBitConvWeightsQuantizer':
DefaultNBitConvWeightsQuantizer,
'DefaultNBitConvQuantizeConfig':
DefaultNBitConvQuantizeConfig,
'DefaultNBitActivationQuantizeConfig':
DefaultNBitActivationQuantizeConfig,
}
| 13,651 | 34.832021 | 91 | py |
models | models-master/official/projects/qat/vision/quantization/layer_transforms.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains custom quantization layer transforms."""
from typing import Any, Type, Mapping, List, Union, Tuple
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.modeling import tf_utils
from official.projects.qat.vision.modeling.layers import nn_blocks as quantized_nn_blocks
from official.projects.qat.vision.modeling.layers import nn_layers as quantized_nn_layers
from official.projects.qat.vision.quantization import configs
from official.projects.qat.vision.quantization import helper
keras = tf.keras
LayerNode = tfmot.quantization.keras.graph_transformations.transforms.LayerNode
LayerPattern = tfmot.quantization.keras.graph_transformations.transforms.LayerPattern
_LAYER_NAMES = [
'Vision>Conv2DBNBlock', 'Vision>InvertedBottleneckBlock',
'Vision>SegmentationHead', 'Vision>SpatialPyramidPooling', 'Vision>ASPP'
]
class CustomLayerQuantize(
tfmot.quantization.keras.graph_transformations.transforms.Transform):
"""Add QAT support for Keras Custom layer."""
def __init__(self, original_layer_pattern: str,
quantized_layer_class: Type[keras.layers.Layer]):
super(CustomLayerQuantize, self).__init__()
self._original_layer_pattern = original_layer_pattern
self._quantized_layer_class = quantized_layer_class
def pattern(self) -> LayerPattern:
"""See base class."""
return LayerPattern(self._original_layer_pattern)
def _create_layer_metadata(
self, layer_class_name: str
) -> Mapping[str, tfmot.quantization.keras.QuantizeConfig]:
if layer_class_name in _LAYER_NAMES:
layer_metadata = {'quantize_config': configs.NoOpQuantizeConfig()}
else:
layer_metadata = {
'quantize_config': configs.Default8BitOutputQuantizeConfig()
}
return layer_metadata
def _create_dummy_input_shape(
self, quantized_layer: tf.keras.layers.Layer
) -> Union[List[int], Tuple[Any, Any]]:
dummy_input_shape = [1, 128, 128, 1]
# SegmentationHead layer requires a tuple of 2 tensors.
if isinstance(quantized_layer,
quantized_nn_layers.SegmentationHeadQuantized):
dummy_input_shape = ([1, 1, 1, 1], [1, 1, 1, 1])
return dummy_input_shape
def replacement(self, match_layer: LayerNode) -> LayerNode:
"""See base class."""
bottleneck_layer = match_layer.layer
bottleneck_config = bottleneck_layer['config']
bottleneck_names_and_weights = list(match_layer.names_and_weights)
quantized_layer = self._quantized_layer_class(**bottleneck_config)
dummy_input_shape = self._create_dummy_input_shape(quantized_layer)
quantized_layer.compute_output_shape(dummy_input_shape)
quantized_names_and_weights = zip(
[weight.name for weight in quantized_layer.weights],
quantized_layer.get_weights())
match_idx = 0
names_and_weights = []
for name_and_weight in quantized_names_and_weights:
if not helper.is_quantization_weight_name(name=name_and_weight[0]):
name_and_weight = bottleneck_names_and_weights[match_idx]
match_idx = match_idx + 1
names_and_weights.append(name_and_weight)
if match_idx != len(bottleneck_names_and_weights):
raise ValueError('{}/{} of Bottleneck weights is transformed.'.format(
match_idx, len(bottleneck_names_and_weights)))
quantized_layer_config = tf_utils.serialize_layer(
quantized_layer, use_legacy_format=True
)
quantized_layer_config['name'] = quantized_layer_config['config']['name']
layer_metadata = self._create_layer_metadata(bottleneck_layer['class_name'])
return LayerNode(
quantized_layer_config,
metadata=layer_metadata,
names_and_weights=names_and_weights)
CUSTOM_TRANSFORMS = [
CustomLayerQuantize('Vision>BottleneckBlock',
quantized_nn_blocks.BottleneckBlockQuantized),
CustomLayerQuantize('Vision>InvertedBottleneckBlock',
quantized_nn_blocks.InvertedBottleneckBlockQuantized),
CustomLayerQuantize('Vision>Conv2DBNBlock',
quantized_nn_blocks.Conv2DBNBlockQuantized),
CustomLayerQuantize('Vision>SegmentationHead',
quantized_nn_layers.SegmentationHeadQuantized),
CustomLayerQuantize('Vision>SpatialPyramidPooling',
quantized_nn_layers.SpatialPyramidPoolingQuantized),
CustomLayerQuantize('Vision>ASPP', quantized_nn_layers.ASPPQuantized)
]
| 5,057 | 41.504202 | 89 | py |
models | models-master/official/projects/qat/vision/quantization/helper_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for helper."""
import numpy as np
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.projects.qat.vision.quantization import helper
class HelperTest(tf.test.TestCase):
def create_simple_model(self):
return tf.keras.models.Sequential([
tf.keras.layers.Dense(8, input_shape=(16,)),
])
def test_copy_original_weights_for_simple_model_with_custom_weights(self):
one_model = self.create_simple_model()
one_weights = [np.ones_like(weight) for weight in one_model.get_weights()]
one_model.set_weights(one_weights)
qat_model = tfmot.quantization.keras.quantize_model(
self.create_simple_model())
zero_weights = [np.zeros_like(weight) for weight in qat_model.get_weights()]
qat_model.set_weights(zero_weights)
helper.copy_original_weights(one_model, qat_model)
qat_model_weights = qat_model.get_weights()
count = 0
for idx, weight in enumerate(qat_model.weights):
if not helper.is_quantization_weight_name(weight.name):
self.assertAllEqual(
qat_model_weights[idx], np.ones_like(qat_model_weights[idx]))
count += 1
self.assertLen(one_model.weights, count)
self.assertGreater(len(qat_model.weights), len(one_model.weights))
if __name__ == '__main__':
tf.test.main()
| 1,928 | 34.072727 | 80 | py |
models | models-master/official/projects/qat/vision/quantization/schemes.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantization schemes."""
# Import libraries
import tensorflow_model_optimization as tfmot
from official.projects.qat.vision.quantization import layer_transforms
default_8bit_transforms = tfmot.quantization.keras.default_8bit.default_8bit_transforms
class QuantizeLayoutTransform(
tfmot.quantization.keras.QuantizeLayoutTransform):
"""Default model transformations."""
def apply(self, model, layer_quantize_map):
"""Implement default 8-bit transforms.
Currently this means the following.
1. Pull activations into layers, and apply fuse activations. (TODO)
2. Modify range in incoming layers for Concat. (TODO)
3. Fuse Conv2D/DepthwiseConv2D + BN into single layer.
Args:
model: Keras model to be quantized.
layer_quantize_map: Map with keys as layer names, and values as dicts
containing custom `QuantizeConfig`s which may have been passed with
layers.
Returns:
(Transformed Keras model to better match TensorFlow Lite backend, updated
layer quantize map.)
"""
transforms = [
default_8bit_transforms.InputLayerQuantize(),
default_8bit_transforms.SeparableConv1DQuantize(),
default_8bit_transforms.SeparableConvQuantize(),
default_8bit_transforms.Conv2DReshapeBatchNormReLUQuantize(),
default_8bit_transforms.Conv2DReshapeBatchNormActivationQuantize(),
default_8bit_transforms.Conv2DBatchNormReLUQuantize(),
default_8bit_transforms.Conv2DBatchNormActivationQuantize(),
default_8bit_transforms.Conv2DReshapeBatchNormQuantize(),
default_8bit_transforms.Conv2DBatchNormQuantize(),
default_8bit_transforms.ConcatTransform6Inputs(),
default_8bit_transforms.ConcatTransform5Inputs(),
default_8bit_transforms.ConcatTransform4Inputs(),
default_8bit_transforms.ConcatTransform3Inputs(),
default_8bit_transforms.ConcatTransform(),
default_8bit_transforms.LayerReLUQuantize(),
default_8bit_transforms.LayerReluActivationQuantize()
]
transforms += layer_transforms.CUSTOM_TRANSFORMS
return tfmot.quantization.keras.graph_transformations.model_transformer.ModelTransformer(
model, transforms,
set(layer_quantize_map.keys()), layer_quantize_map).transform()
class Default8BitQuantizeScheme(
tfmot.quantization.keras.default_8bit.Default8BitQuantizeScheme):
def get_layout_transformer(self):
return QuantizeLayoutTransform()
| 3,089 | 39.12987 | 93 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.