repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/official/projects/yolo/modeling/heads/yolo_head_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yolo heads."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.projects.yolo.modeling.heads import yolo_head as heads
class YoloDecoderTest(parameterized.TestCase, tf.test.TestCase):
def test_network_creation(self):
"""Test creation of YOLO family models."""
tf.keras.backend.set_image_data_format('channels_last')
input_shape = {
'3': [1, 52, 52, 256],
'4': [1, 26, 26, 512],
'5': [1, 13, 13, 1024]
}
classes = 100
bps = 3
head = heads.YoloHead(3, 5, classes=classes, boxes_per_level=bps)
inputs = {}
for key in input_shape:
inputs[key] = tf.ones(input_shape[key], dtype=tf.float32)
endpoints = head(inputs)
# print(endpoints)
for key in endpoints.keys():
expected_input_shape = input_shape[key]
expected_input_shape[-1] = (classes + 5) * bps
self.assertAllEqual(endpoints[key].shape.as_list(), expected_input_shape)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
tf.keras.backend.set_image_data_format('channels_last')
input_shape = {
'3': [1, 52, 52, 256],
'4': [1, 26, 26, 512],
'5': [1, 13, 13, 1024]
}
classes = 100
bps = 3
head = heads.YoloHead(3, 5, classes=classes, boxes_per_level=bps)
inputs = {}
for key in input_shape:
inputs[key] = tf.ones(input_shape[key], dtype=tf.float32)
_ = head(inputs)
configs = head.get_config()
head_from_config = heads.YoloHead.from_config(configs)
self.assertAllEqual(head.get_config(), head_from_config.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,323 | 30.405405 | 79 | py |
models | models-master/official/projects/yolo/modeling/heads/yolov7_head.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YOLOv7 heads."""
import tensorflow as tf
from official.projects.yolo.ops import initializer_ops
class YoloV7DetectionHead(tf.keras.layers.Layer):
"""YOLOv7 Detection Head."""
def __init__(
self,
num_classes=80,
min_level=3,
max_level=5,
num_anchors=3,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_initializer='zeros',
bias_regularizer=None,
use_separable_conv=False,
**kwargs,
):
"""Initializes YOLOv7 head.
Args:
num_classes: integer.
min_level: minimum feature level.
max_level: maximum feature level.
num_anchors: integer for number of anchors at each location.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
bias_initializer: bias initializer for convolutional layers.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
use_separable_conv: `bool` wether to use separable convs.
**kwargs: other keyword arguments.
"""
super().__init__(**kwargs)
self._num_classes = num_classes
self._min_level = min_level
self._max_level = max_level
self._num_anchors = num_anchors
self._kernel_initializer = initializer_ops.pytorch_kernel_initializer(
kernel_initializer
)
self._kernel_regularizer = kernel_regularizer
self._bias_initializer = bias_initializer
self._bias_regularizer = bias_regularizer
self._use_separable_conv = use_separable_conv
def _bias_init(self, scale, in_channels, isize=640, no_per_conf=8):
def bias(shape, dtype):
init = tf.keras.initializers.VarianceScaling(
scale=1 / 3, mode='fan_in', distribution='uniform')
base = init([in_channels, *shape], dtype=dtype)[0]
base = tf.reshape(base, [self._num_anchors, -1])
box, conf, classes = tf.split(base, [4, 1, -1], axis=-1)
conf += tf.math.log(no_per_conf / ((isize / scale)**2))
classes += tf.math.log(0.6 / (self._num_classes - 0.99))
base = tf.concat([box, conf, classes], axis=-1)
base = tf.reshape(base, [-1])
return base
return bias
def build(self, input_shape):
self._convs = []
self._implicit_adds = []
self._implicit_muls = []
conv_op = (
tf.keras.layers.SeparableConv2D
if self._use_separable_conv
else tf.keras.layers.Conv2D
)
for level in range(self._min_level, self._max_level + 1):
# Note that we assume height == width.
h = input_shape[str(level)][2]
scale = 2 ** int(level)
in_channels = input_shape[str(level)][-1]
# Outputs are num_classes + 5 (box coordinates + objectness score)
self._convs.append(
conv_op(
(self._num_classes + 5) * self._num_anchors,
kernel_size=1,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_initializer=self._bias_init(scale, in_channels, h * scale),
)
)
self._implicit_adds.append(
self.add_weight(
name=f'implicit_adds_l{level}',
shape=[1, 1, 1, in_channels],
initializer=tf.keras.initializers.random_normal(
mean=0.0, stddev=0.02
),
trainable=True,
)
)
self._implicit_muls.append(
self.add_weight(
name=f'implicit_muls_l{level}',
shape=[1, 1, 1, (self._num_classes + 5) * self._num_anchors],
initializer=tf.keras.initializers.random_normal(
mean=1.0, stddev=0.02
),
trainable=True,
)
)
super().build(input_shape)
def call(self, inputs, training=False):
outputs = {}
for i, level in enumerate(range(self._min_level, self._max_level + 1)):
x = inputs[str(level)]
x = self._implicit_adds[i] + x
x = self._convs[i](x)
x = self._implicit_muls[i] * x
_, h, w, _ = x.get_shape().as_list()
x = tf.reshape(x, [-1, h, w, self._num_anchors, self._num_classes + 5])
outputs[str(level)] = x
return outputs
def get_config(self):
config = dict(
num_classes=self._num_classes,
min_level=self._min_level,
max_level=self._max_level,
num_anchors=self._num_anchors,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_initializer=self._bias_initializer,
bias_regularizer=self._bias_regularizer,
use_separable_conv=self._use_separable_conv,
)
return config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 5,454 | 33.525316 | 78 | py |
models | models-master/official/projects/yolo/modeling/heads/yolo_head.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Yolo heads."""
import tensorflow as tf
from official.projects.yolo.modeling.layers import nn_blocks
class YoloHead(tf.keras.layers.Layer):
"""YOLO Prediction Head."""
def __init__(self,
min_level,
max_level,
classes=80,
boxes_per_level=3,
output_extras=0,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation=None,
smart_bias=False,
use_separable_conv=False,
**kwargs):
"""Yolo Prediction Head initialization function.
Args:
min_level: `int`, the minimum backbone output level.
max_level: `int`, the maximum backbone output level.
classes: `int`, number of classes per category.
boxes_per_level: `int`, number of boxes to predict per level.
output_extras: `int`, number of additional output channels that the head.
should predict for non-object detection and non-image classification
tasks.
norm_momentum: `float`, normalization momentum for the moving average.
norm_epsilon: `float`, small float added to variance to avoid dividing by
zero.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
activation: `str`, the activation function to use typically leaky or mish.
smart_bias: `bool`, whether to use smart bias.
use_separable_conv: `bool` wether to use separable convs.
**kwargs: keyword arguments to be passed.
"""
super().__init__(**kwargs)
self._min_level = min_level
self._max_level = max_level
self._key_list = [
str(key) for key in range(self._min_level, self._max_level + 1)
]
self._classes = classes
self._boxes_per_level = boxes_per_level
self._output_extras = output_extras
self._output_conv = (classes + output_extras + 5) * boxes_per_level
self._smart_bias = smart_bias
self._use_separable_conv = use_separable_conv
self._base_config = dict(
activation=activation,
norm_momentum=norm_momentum,
norm_epsilon=norm_epsilon,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)
self._conv_config = dict(
filters=self._output_conv,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
use_bn=False,
use_separable_conv=self._use_separable_conv,
**self._base_config)
def bias_init(self, scale, inshape, isize=640, no_per_conf=8):
def bias(shape, dtype):
init = tf.keras.initializers.Zeros()
base = init(shape, dtype=dtype)
if self._smart_bias:
base = tf.reshape(base, [self._boxes_per_level, -1])
box, conf, classes = tf.split(base, [4, 1, -1], axis=-1)
conf += tf.math.log(no_per_conf / ((isize / scale)**2))
classes += tf.math.log(0.6 / (self._classes - 0.99))
base = tf.concat([box, conf, classes], axis=-1)
base = tf.reshape(base, [-1])
return base
return bias
def build(self, input_shape):
self._head = dict()
for key in self._key_list:
scale = 2**int(key)
self._head[key] = nn_blocks.ConvBN(
bias_initializer=self.bias_init(scale, input_shape[key][-1]),
**self._conv_config)
def call(self, inputs):
outputs = dict()
for key in self._key_list:
outputs[key] = self._head[key](inputs[key])
return outputs
@property
def output_depth(self):
return (self._classes + self._output_extras + 5) * self._boxes_per_level
@property
def num_boxes(self):
if self._min_level is None or self._max_level is None:
raise Exception(
'Model has to be built before number of boxes can be determined.')
return (self._max_level - self._min_level + 1) * self._boxes_per_level
@property
def num_heads(self):
return self._max_level - self._min_level + 1
def get_config(self):
config = dict(
min_level=self._min_level,
max_level=self._max_level,
classes=self._classes,
boxes_per_level=self._boxes_per_level,
output_extras=self._output_extras,
**self._base_config)
return config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 5,234 | 33.440789 | 80 | py |
models | models-master/official/projects/yolo/modeling/heads/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/yolo/tasks/yolov7.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains classes used to train Yolo."""
from typing import Optional
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import base_task
from official.core import config_definitions
from official.core import input_reader
from official.core import task_factory
from official.modeling import performance
from official.projects.yolo import optimization
from official.projects.yolo.configs import yolov7 as exp_cfg
from official.projects.yolo.dataloaders import tf_example_decoder
from official.projects.yolo.dataloaders import yolo_input
from official.projects.yolo.losses import yolov7_loss
from official.projects.yolo.modeling import factory
from official.projects.yolo.ops import kmeans_anchors
from official.projects.yolo.ops import mosaic
from official.projects.yolo.ops import preprocessing_ops
from official.projects.yolo.tasks import task_utils
from official.vision.dataloaders import tfds_factory
from official.vision.dataloaders import tf_example_label_map_decoder
from official.vision.evaluation import coco_evaluator
from official.vision.ops import box_ops
OptimizationConfig = optimization.OptimizationConfig
RuntimeConfig = config_definitions.RuntimeConfig
@task_factory.register_task_cls(exp_cfg.YoloV7Task)
class YoloV7Task(base_task.Task):
"""A single-replica view of training procedure.
YOLO task provides artifacts for training/evalution procedures, including
loading/iterating over Datasets, initializing the model, calculating the loss,
post-processing, and customized metrics with reduction.
"""
def __init__(self, params, logging_dir: Optional[str] = None):
super().__init__(params, logging_dir)
min_level = self.task_config.model.min_level
max_level = self.task_config.model.max_level
anchors_dict = self.task_config.model.anchor_boxes.get(
min_level, max_level)[0]
anchors, strides = [], []
for level in range(min_level, max_level + 1):
anchors.append(anchors_dict[str(level)])
strides.append(2 ** level)
loss_config = self.task_config.model.loss
if loss_config.use_ota:
loss_fn = yolov7_loss.YoloV7LossOTA
else:
loss_fn = yolov7_loss.YoloV7Loss
self._loss_fn = loss_fn(
anchors=anchors,
strides=strides,
input_size=self.task_config.model.input_size[:2],
alpha=loss_config.alpha,
gamma=loss_config.gamma,
box_weight=loss_config.box_weight,
obj_weight=loss_config.obj_weight,
cls_weight=loss_config.cls_weight,
label_smoothing=loss_config.label_smoothing,
anchor_threshold=loss_config.anchor_threshold,
iou_mix_ratio=loss_config.iou_mix_ratio,
num_classes=self.task_config.model.num_classes,
auto_balance=loss_config.auto_balance,
)
self._coco_91_to_80 = False
self._metrics = []
# globally set the random seed
preprocessing_ops.set_random_seeds(seed=params.seed)
if self.task_config.model.anchor_boxes.generate_anchors:
self.generate_anchors()
return
def generate_anchors(self):
"""Generate Anchor boxes for an arbitrary object detection dataset."""
input_size = self.task_config.model.input_size
anchor_cfg = self.task_config.model.anchor_boxes
backbone = self.task_config.model.backbone.get()
dataset = self.task_config.train_data
decoder = self._get_data_decoder(dataset)
num_anchors = backbone.max_level - backbone.min_level + 1
num_anchors *= anchor_cfg.anchors_per_scale
gbs = dataset.global_batch_size
dataset.global_batch_size = 1
box_reader = kmeans_anchors.BoxGenInputReader(
dataset,
dataset_fn=dataset_fn.pick_dataset_fn(
self.task_config.train_data.file_type),
decoder_fn=decoder.decode)
boxes = box_reader.read(
k=num_anchors,
anchors_per_scale=anchor_cfg.anchors_per_scale,
image_resolution=input_size,
scaling_mode=anchor_cfg.scaling_mode,
box_generation_mode=anchor_cfg.box_generation_mode,
num_samples=anchor_cfg.num_samples)
dataset.global_batch_size = gbs
with open('anchors.txt', 'w') as f:
f.write(f'input resolution: {input_size} \n boxes: \n {boxes}')
logging.info('INFO: boxes will be saved to anchors.txt, mack sure to save'
'them and update the boxes feild in you yaml config file.')
anchor_cfg.set_boxes(boxes)
return boxes
def build_model(self):
"""Build an instance of Yolo."""
model_base_cfg = self.task_config.model
l2_weight_decay = self.task_config.weight_decay / 2.0
input_size = model_base_cfg.input_size.copy()
input_specs = tf.keras.layers.InputSpec(shape=[None] + input_size)
l2_regularizer = (
tf.keras.regularizers.l2(l2_weight_decay) if l2_weight_decay else None)
model = factory.build_yolov7(input_specs, model_base_cfg, l2_regularizer)
model.build(input_specs.shape)
model.summary(print_fn=logging.info)
# save for later usage within the task.
self._model = model
return model
def _get_data_decoder(self, params):
"""Get a decoder object to decode the dataset."""
if params.tfds_name:
decoder = tfds_factory.get_detection_decoder(params.tfds_name)
else:
decoder_cfg = params.decoder.get()
if params.decoder.type == 'simple_decoder':
self._coco_91_to_80 = decoder_cfg.coco91_to_80
decoder = tf_example_decoder.TfExampleDecoder(
coco91_to_80=decoder_cfg.coco91_to_80,
regenerate_source_id=decoder_cfg.regenerate_source_id)
elif params.decoder.type == 'label_map_decoder':
decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(
label_map=decoder_cfg.label_map,
regenerate_source_id=decoder_cfg.regenerate_source_id)
else:
raise ValueError('Unknown decoder type: {}!'.format(
params.decoder.type))
return decoder
def build_inputs(self, params, input_context=None):
"""Build input dataset."""
model = self.task_config.model
# get anchor boxes dict based on models min and max level
backbone = model.backbone.get()
anchor_dict, level_limits = model.anchor_boxes.get(backbone.min_level,
backbone.max_level)
params.seed = self.task_config.seed
# set shared patamters between mosaic and yolo_input
base_config = dict(
letter_box=params.parser.letter_box,
aug_rand_translate=params.parser.aug_rand_translate,
aug_rand_angle=params.parser.aug_rand_angle,
aug_rand_perspective=params.parser.aug_rand_perspective,
area_thresh=params.parser.area_thresh,
random_flip=params.parser.random_flip,
seed=params.seed,
)
# get the decoder
decoder = self._get_data_decoder(params)
# init Mosaic
sample_fn = mosaic.Mosaic(
output_size=model.input_size,
mosaic_frequency=params.parser.mosaic.mosaic_frequency,
mosaic9_frequency=params.parser.mosaic.mosaic9_frequency,
mixup_frequency=params.parser.mosaic.mixup_frequency,
jitter=params.parser.mosaic.jitter,
mosaic_center=params.parser.mosaic.mosaic_center,
mosaic9_center=params.parser.mosaic.mosaic9_center,
mosaic_crop_mode=params.parser.mosaic.mosaic_crop_mode,
aug_scale_min=params.parser.mosaic.aug_scale_min,
aug_scale_max=params.parser.mosaic.aug_scale_max,
**base_config)
# init Parser
parser = yolo_input.Parser(
output_size=model.input_size,
anchors=anchor_dict,
use_tie_breaker=params.parser.use_tie_breaker,
jitter=params.parser.jitter,
aug_scale_min=params.parser.aug_scale_min,
aug_scale_max=params.parser.aug_scale_max,
aug_rand_hue=params.parser.aug_rand_hue,
aug_rand_saturation=params.parser.aug_rand_saturation,
aug_rand_brightness=params.parser.aug_rand_brightness,
max_num_instances=params.parser.max_num_instances,
scale_xy=model.detection_generator.scale_xy.get(),
expanded_strides=model.detection_generator.path_scales.get(),
darknet=False,
best_match_only=params.parser.best_match_only,
anchor_t=params.parser.anchor_thresh,
random_pad=params.parser.random_pad,
level_limits=level_limits,
dtype=params.dtype,
**base_config,
)
# init the dataset reader
reader = input_reader.InputReader(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
sample_fn=sample_fn.mosaic_fn(is_training=params.is_training),
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def build_metrics(self, training=True):
"""Build detection metrics."""
metrics = []
metrics = [
task_utils.ListMetrics(
['box_loss', 'obj_loss', 'cls_loss', 'iou'], 'separate_losses'
),
task_utils.ListMetrics(
['num_matchings', 'num_gts', 'num_duplicates'], 'stats'
),
]
self._metrics = metrics
if not training:
annotation_file = self.task_config.annotation_file
if self._coco_91_to_80:
annotation_file = None
self.coco_metric = coco_evaluator.COCOEvaluator(
annotation_file=annotation_file,
include_mask=False,
need_rescale_bboxes=False,
per_category_metrics=self._task_config.per_category_metrics,
max_num_eval_detections=self.task_config.max_num_eval_detections)
return metrics
def build_losses(self, outputs, labels, aux_losses=None):
"""Build YOLOv7 losses."""
return self._loss_fn(labels, outputs)
def train_step(self, inputs, model, optimizer, metrics=None):
"""Train Step.
Forward step and backwards propagate the model.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
image, label = inputs
with tf.GradientTape(persistent=False) as tape:
# Compute a prediction
y_pred = model(image, training=True)
# Cast to float32 for gradietn computation
y_pred = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), y_pred)
# Get the total loss
loss = self.build_losses(y_pred['raw_output'], label)
scaled_loss = loss
# Scale the loss for numerical stability
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
# Compute the gradient
train_vars = model.trainable_variables
gradients = tape.gradient(scaled_loss, train_vars)
# Get unscaled loss if we are using the loss scale optimizer on fp16
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
gradients = optimizer.get_unscaled_gradients(gradients)
# Apply gradients to the model
optimizer.apply_gradients(zip(gradients, train_vars))
logs = {self.loss: loss}
# Compute all metrics
if metrics:
metrics[0].update_state(self._loss_fn.report_separate_losses())
logs.update({metrics[0].name: metrics[0].result()})
metrics[1].update_state(self._loss_fn.report_stats())
logs.update({metrics[1].name: metrics[1].result()})
return logs
def _reorg_boxes(self, boxes, info, num_detections):
"""Scale and Clean boxes prior to Evaluation."""
mask = tf.sequence_mask(num_detections, maxlen=tf.shape(boxes)[1])
mask = tf.cast(tf.expand_dims(mask, axis=-1), boxes.dtype)
# Denormalize the boxes by the shape of the image
inshape = tf.expand_dims(info[:, 1, :], axis=1)
ogshape = tf.expand_dims(info[:, 0, :], axis=1)
scale = tf.expand_dims(info[:, 2, :], axis=1)
offset = tf.expand_dims(info[:, 3, :], axis=1)
boxes = box_ops.denormalize_boxes(boxes, inshape)
boxes = box_ops.clip_boxes(boxes, inshape)
boxes += tf.tile(offset, [1, 1, 2])
boxes /= tf.tile(scale, [1, 1, 2])
boxes = box_ops.clip_boxes(boxes, ogshape)
# Mask the boxes for usage
boxes *= mask
boxes += (mask - 1)
return boxes
def validation_step(self, inputs, model, metrics=None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
image, label = inputs
# Step the model once
y_pred = model(image, training=False)
y_pred = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), y_pred)
loss_val = self.build_losses(y_pred['raw_output'], label)
logs = {self.loss: loss_val}
# Reorganize and rescale the boxes
info = label['groundtruths']['image_info']
boxes = self._reorg_boxes(y_pred['bbox'], info, y_pred['num_detections'])
# Build the input for the coc evaluation metric
coco_model_outputs = {
'detection_boxes': boxes,
'detection_scores': y_pred['confidence'],
'detection_classes': y_pred['classes'],
'num_detections': y_pred['num_detections'],
'source_id': label['groundtruths']['source_id'],
'image_info': label['groundtruths']['image_info']
}
# Compute all metrics
if metrics:
logs.update(
{self.coco_metric.name: (label['groundtruths'], coco_model_outputs)})
if metrics:
metrics[0].update_state(self._loss_fn.report_separate_losses())
logs.update({metrics[0].name: metrics[0].result()})
metrics[1].update_state(self._loss_fn.report_stats())
logs.update({metrics[1].name: metrics[1].result()})
return logs
def aggregate_logs(self, state=None, step_outputs=None):
"""Get Metric Results."""
if not state:
self.coco_metric.reset_states()
state = self.coco_metric
self.coco_metric.update_state(step_outputs[self.coco_metric.name][0],
step_outputs[self.coco_metric.name][1])
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
"""Reduce logs and remove unneeded items. Update with COCO results."""
res = self.coco_metric.result()
return res
def initialize(self, model: tf.keras.Model):
"""Loading pretrained checkpoint."""
if not self.task_config.init_checkpoint:
logging.info('Training from Scratch.')
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if self.task_config.init_checkpoint_modules == 'all':
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
ckpt_items = {}
if 'backbone' in self.task_config.init_checkpoint_modules:
ckpt_items.update(backbone=model.backbone)
if 'decoder' in self.task_config.init_checkpoint_modules:
ckpt_items.update(decoder=model.decoder)
ckpt = tf.train.Checkpoint(**ckpt_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def create_optimizer(self,
optimizer_config: OptimizationConfig,
runtime_config: Optional[RuntimeConfig] = None):
"""Creates an TF optimizer from configurations.
Args:
optimizer_config: the parameters of the Optimization settings.
runtime_config: the parameters of the runtime.
Returns:
A tf.optimizers.Optimizer object.
"""
opt_factory = optimization.YoloOptimizerFactory(optimizer_config)
# pylint: disable=protected-access
ema = opt_factory._use_ema
opt_factory._use_ema = False
opt_type = opt_factory._optimizer_type
if opt_type == 'sgd_torch':
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
optimizer.set_bias_lr(
opt_factory.get_bias_lr_schedule(self._task_config.smart_bias_lr))
optimizer.search_and_set_variable_groups(self._model.trainable_variables)
else:
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
opt_factory._use_ema = ema
if ema:
logging.info('EMA is enabled.')
optimizer = opt_factory.add_ema(optimizer)
# pylint: enable=protected-access
if runtime_config and runtime_config.loss_scale:
use_float16 = runtime_config.mixed_precision_dtype == 'float16'
optimizer = performance.configure_optimizer(
optimizer,
use_float16=use_float16,
loss_scale=runtime_config.loss_scale)
return optimizer
| 17,679 | 35.833333 | 80 | py |
models | models-master/official/projects/yolo/tasks/yolo.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains classes used to train Yolo."""
import collections
from typing import Optional
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import base_task
from official.core import config_definitions
from official.core import input_reader
from official.core import task_factory
from official.modeling import performance
from official.projects.yolo import optimization
from official.projects.yolo.configs import yolo as exp_cfg
from official.projects.yolo.dataloaders import tf_example_decoder
from official.projects.yolo.dataloaders import yolo_input
from official.projects.yolo.modeling import factory
from official.projects.yolo.ops import kmeans_anchors
from official.projects.yolo.ops import mosaic
from official.projects.yolo.ops import preprocessing_ops
from official.projects.yolo.tasks import task_utils
from official.vision.dataloaders import tfds_factory
from official.vision.dataloaders import tf_example_label_map_decoder
from official.vision.evaluation import coco_evaluator
from official.vision.ops import box_ops
OptimizationConfig = optimization.OptimizationConfig
RuntimeConfig = config_definitions.RuntimeConfig
@task_factory.register_task_cls(exp_cfg.YoloTask)
class YoloTask(base_task.Task):
"""A single-replica view of training procedure.
YOLO task provides artifacts for training/evalution procedures, including
loading/iterating over Datasets, initializing the model, calculating the loss,
post-processing, and customized metrics with reduction.
"""
def __init__(self, params, logging_dir: Optional[str] = None):
super().__init__(params, logging_dir)
self.coco_metric = None
self._loss_fn = None
self._model = None
self._coco_91_to_80 = False
self._metrics = []
# globally set the random seed
preprocessing_ops.set_random_seeds(seed=params.seed)
if self.task_config.model.anchor_boxes.generate_anchors:
self.generate_anchors()
return
def generate_anchors(self):
"""Generate Anchor boxes for an arbitrary object detection dataset."""
input_size = self.task_config.model.input_size
anchor_cfg = self.task_config.model.anchor_boxes
backbone = self.task_config.model.backbone.get()
dataset = self.task_config.train_data
decoder = self._get_data_decoder(dataset)
num_anchors = backbone.max_level - backbone.min_level + 1
num_anchors *= anchor_cfg.anchors_per_scale
gbs = dataset.global_batch_size
dataset.global_batch_size = 1
box_reader = kmeans_anchors.BoxGenInputReader(
dataset,
dataset_fn=dataset_fn.pick_dataset_fn(
self.task_config.train_data.file_type),
decoder_fn=decoder.decode)
boxes = box_reader.read(
k=num_anchors,
anchors_per_scale=anchor_cfg.anchors_per_scale,
image_resolution=input_size,
scaling_mode=anchor_cfg.scaling_mode,
box_generation_mode=anchor_cfg.box_generation_mode,
num_samples=anchor_cfg.num_samples)
dataset.global_batch_size = gbs
with open('anchors.txt', 'w') as f:
f.write(f'input resolution: {input_size} \n boxes: \n {boxes}')
logging.info('INFO: boxes will be saved to anchors.txt, mack sure to save'
'them and update the boxes feild in you yaml config file.')
anchor_cfg.set_boxes(boxes)
return boxes
def build_model(self):
"""Build an instance of Yolo."""
model_base_cfg = self.task_config.model
l2_weight_decay = self.task_config.weight_decay / 2.0
input_size = model_base_cfg.input_size.copy()
input_specs = tf.keras.layers.InputSpec(shape=[None] + input_size)
l2_regularizer = (
tf.keras.regularizers.l2(l2_weight_decay) if l2_weight_decay else None)
model, losses = factory.build_yolo(
input_specs, model_base_cfg, l2_regularizer)
model.build(input_specs.shape)
model.summary(print_fn=logging.info)
# save for later usage within the task.
self._loss_fn = losses
self._model = model
return model
def _get_data_decoder(self, params):
"""Get a decoder object to decode the dataset."""
if params.tfds_name:
decoder = tfds_factory.get_detection_decoder(params.tfds_name)
else:
decoder_cfg = params.decoder.get()
if params.decoder.type == 'simple_decoder':
self._coco_91_to_80 = decoder_cfg.coco91_to_80
decoder = tf_example_decoder.TfExampleDecoder(
coco91_to_80=decoder_cfg.coco91_to_80,
regenerate_source_id=decoder_cfg.regenerate_source_id)
elif params.decoder.type == 'label_map_decoder':
decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(
label_map=decoder_cfg.label_map,
regenerate_source_id=decoder_cfg.regenerate_source_id)
else:
raise ValueError('Unknown decoder type: {}!'.format(
params.decoder.type))
return decoder
def build_inputs(self, params, input_context=None):
"""Build input dataset."""
model = self.task_config.model
# get anchor boxes dict based on models min and max level
backbone = model.backbone.get()
anchor_dict, level_limits = model.anchor_boxes.get(backbone.min_level,
backbone.max_level)
params.seed = self.task_config.seed
# set shared patamters between mosaic and yolo_input
base_config = dict(
letter_box=params.parser.letter_box,
aug_rand_translate=params.parser.aug_rand_translate,
aug_rand_angle=params.parser.aug_rand_angle,
aug_rand_perspective=params.parser.aug_rand_perspective,
area_thresh=params.parser.area_thresh,
random_flip=params.parser.random_flip,
seed=params.seed,
)
# get the decoder
decoder = self._get_data_decoder(params)
# init Mosaic
sample_fn = mosaic.Mosaic(
output_size=model.input_size,
mosaic_frequency=params.parser.mosaic.mosaic_frequency,
mixup_frequency=params.parser.mosaic.mixup_frequency,
jitter=params.parser.mosaic.jitter,
mosaic_center=params.parser.mosaic.mosaic_center,
mosaic_crop_mode=params.parser.mosaic.mosaic_crop_mode,
aug_scale_min=params.parser.mosaic.aug_scale_min,
aug_scale_max=params.parser.mosaic.aug_scale_max,
**base_config)
# init Parser
parser = yolo_input.Parser(
output_size=model.input_size,
anchors=anchor_dict,
use_tie_breaker=params.parser.use_tie_breaker,
jitter=params.parser.jitter,
aug_scale_min=params.parser.aug_scale_min,
aug_scale_max=params.parser.aug_scale_max,
aug_rand_hue=params.parser.aug_rand_hue,
aug_rand_saturation=params.parser.aug_rand_saturation,
aug_rand_brightness=params.parser.aug_rand_brightness,
max_num_instances=params.parser.max_num_instances,
scale_xy=model.detection_generator.scale_xy.get(),
expanded_strides=model.detection_generator.path_scales.get(),
darknet=model.darknet_based_model,
best_match_only=params.parser.best_match_only,
anchor_t=params.parser.anchor_thresh,
random_pad=params.parser.random_pad,
level_limits=level_limits,
dtype=params.dtype,
**base_config)
# init the dataset reader
reader = input_reader.InputReader(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
sample_fn=sample_fn.mosaic_fn(is_training=params.is_training),
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def build_metrics(self, training=True):
"""Build detection metrics."""
metrics = []
backbone = self.task_config.model.backbone.get()
metric_names = collections.defaultdict(list)
for key in range(backbone.min_level, backbone.max_level + 1):
key = str(key)
metric_names[key].append('loss')
metric_names[key].append('avg_iou')
metric_names[key].append('avg_obj')
metric_names['net'].append('box')
metric_names['net'].append('class')
metric_names['net'].append('conf')
for _, key in enumerate(metric_names.keys()):
metrics.append(task_utils.ListMetrics(metric_names[key], name=key))
self._metrics = metrics
if not training:
annotation_file = self.task_config.annotation_file
if self._coco_91_to_80:
annotation_file = None
self.coco_metric = coco_evaluator.COCOEvaluator(
annotation_file=annotation_file,
include_mask=False,
need_rescale_bboxes=False,
per_category_metrics=self._task_config.per_category_metrics,
max_num_eval_detections=self.task_config.max_num_eval_detections)
return metrics
def build_losses(self, outputs, labels, aux_losses=None):
"""Build YOLO losses."""
return self._loss_fn(labels, outputs)
def train_step(self, inputs, model, optimizer, metrics=None):
"""Train Step.
Forward step and backwards propagate the model.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
image, label = inputs
with tf.GradientTape(persistent=False) as tape:
# Compute a prediction
y_pred = model(image, training=True)
# Cast to float32 for gradietn computation
y_pred = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), y_pred)
# Get the total loss
(scaled_loss, metric_loss,
loss_metrics) = self.build_losses(y_pred['raw_output'], label)
# Scale the loss for numerical stability
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
# Compute the gradient
train_vars = model.trainable_variables
gradients = tape.gradient(scaled_loss, train_vars)
# Get unscaled loss if we are using the loss scale optimizer on fp16
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
gradients = optimizer.get_unscaled_gradients(gradients)
# Apply gradients to the model
optimizer.apply_gradients(zip(gradients, train_vars))
logs = {self.loss: metric_loss}
# Compute all metrics
if metrics:
for m in metrics:
m.update_state(loss_metrics[m.name])
logs.update({m.name: m.result()})
return logs
def _reorg_boxes(self, boxes, info, num_detections):
"""Scale and Clean boxes prior to Evaluation."""
mask = tf.sequence_mask(num_detections, maxlen=tf.shape(boxes)[1])
mask = tf.cast(tf.expand_dims(mask, axis=-1), boxes.dtype)
# Denormalize the boxes by the shape of the image
inshape = tf.expand_dims(info[:, 1, :], axis=1)
ogshape = tf.expand_dims(info[:, 0, :], axis=1)
scale = tf.expand_dims(info[:, 2, :], axis=1)
offset = tf.expand_dims(info[:, 3, :], axis=1)
boxes = box_ops.denormalize_boxes(boxes, inshape)
boxes = box_ops.clip_boxes(boxes, inshape)
boxes += tf.tile(offset, [1, 1, 2])
boxes /= tf.tile(scale, [1, 1, 2])
boxes = box_ops.clip_boxes(boxes, ogshape)
# Mask the boxes for usage
boxes *= mask
boxes += (mask - 1)
return boxes
def validation_step(self, inputs, model, metrics=None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
image, label = inputs
# Step the model once
y_pred = model(image, training=False)
y_pred = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), y_pred)
(_, metric_loss, loss_metrics) = self.build_losses(y_pred['raw_output'],
label)
logs = {self.loss: metric_loss}
# Reorganize and rescale the boxes
info = label['groundtruths']['image_info']
boxes = self._reorg_boxes(y_pred['bbox'], info, y_pred['num_detections'])
# Build the input for the coc evaluation metric
coco_model_outputs = {
'detection_boxes': boxes,
'detection_scores': y_pred['confidence'],
'detection_classes': y_pred['classes'],
'num_detections': y_pred['num_detections'],
'source_id': label['groundtruths']['source_id'],
'image_info': label['groundtruths']['image_info']
}
# Compute all metrics
if metrics:
logs.update(
{self.coco_metric.name: (label['groundtruths'], coco_model_outputs)})
for m in metrics:
m.update_state(loss_metrics[m.name])
logs.update({m.name: m.result()})
return logs
def aggregate_logs(self, state=None, step_outputs=None):
"""Get Metric Results."""
if not state:
self.coco_metric.reset_states()
state = self.coco_metric
self.coco_metric.update_state(step_outputs[self.coco_metric.name][0],
step_outputs[self.coco_metric.name][1])
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
"""Reduce logs and remove unneeded items. Update with COCO results."""
res = self.coco_metric.result()
return res
def initialize(self, model: tf.keras.Model):
"""Loading pretrained checkpoint."""
if not self.task_config.init_checkpoint:
logging.info('Training from Scratch.')
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if self.task_config.init_checkpoint_modules == 'all':
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
ckpt_items = {}
if 'backbone' in self.task_config.init_checkpoint_modules:
ckpt_items.update(backbone=model.backbone)
if 'decoder' in self.task_config.init_checkpoint_modules:
ckpt_items.update(decoder=model.decoder)
ckpt = tf.train.Checkpoint(**ckpt_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def create_optimizer(self,
optimizer_config: OptimizationConfig,
runtime_config: Optional[RuntimeConfig] = None):
"""Creates an TF optimizer from configurations.
Args:
optimizer_config: the parameters of the Optimization settings.
runtime_config: the parameters of the runtime.
Returns:
A tf.optimizers.Optimizer object.
"""
opt_factory = optimization.YoloOptimizerFactory(optimizer_config)
# pylint: disable=protected-access
ema = opt_factory._use_ema
opt_factory._use_ema = False
opt_type = opt_factory._optimizer_type
if opt_type == 'sgd_torch':
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
optimizer.set_bias_lr(
opt_factory.get_bias_lr_schedule(self._task_config.smart_bias_lr))
optimizer.search_and_set_variable_groups(self._model.trainable_variables)
else:
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
opt_factory._use_ema = ema
if ema:
logging.info('EMA is enabled.')
optimizer = opt_factory.add_ema(optimizer)
# pylint: enable=protected-access
if runtime_config and runtime_config.loss_scale:
use_float16 = runtime_config.mixed_precision_dtype == 'float16'
optimizer = performance.configure_optimizer(
optimizer,
use_float16=use_float16,
loss_scale=runtime_config.loss_scale)
return optimizer
| 16,647 | 35.669604 | 80 | py |
models | models-master/official/projects/yolo/tasks/task_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for yolo task."""
import tensorflow as tf
class ListMetrics:
"""Private class used to cleanly place the matric values for each level."""
def __init__(self, metric_names, name="ListMetrics"):
self.name = name
self._metric_names = metric_names
self._metrics = self.build_metric()
return
def build_metric(self):
metric_names = self._metric_names
metrics = []
for name in metric_names:
metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))
return metrics
def update_state(self, loss_metrics):
metrics = self._metrics
for m in metrics:
m.update_state(loss_metrics[m.name])
return
def result(self):
logs = dict()
metrics = self._metrics
for m in metrics:
logs.update({m.name: m.result()})
return logs
def reset_states(self):
metrics = self._metrics
for m in metrics:
m.reset_states()
return
| 1,526 | 27.811321 | 77 | py |
models | models-master/official/projects/yolo/tasks/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/yolo/tasks/image_classification.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image classification task definition."""
from official.common import dataset_fn
from official.core import task_factory
from official.projects.yolo.configs import darknet_classification as exp_cfg
from official.projects.yolo.dataloaders import classification_input
from official.vision.dataloaders import classification_input as classification_input_base
from official.vision.dataloaders import input_reader_factory
from official.vision.dataloaders import tfds_factory
from official.vision.tasks import image_classification
@task_factory.register_task_cls(exp_cfg.ImageClassificationTask)
class ImageClassificationTask(image_classification.ImageClassificationTask):
"""A task for image classification."""
def build_inputs(self, params, input_context=None):
"""Builds classification input."""
num_classes = self.task_config.model.num_classes
input_size = self.task_config.model.input_size
image_field_key = self.task_config.train_data.image_field_key
label_field_key = self.task_config.train_data.label_field_key
is_multilabel = self.task_config.train_data.is_multilabel
if params.tfds_name:
decoder = tfds_factory.get_classification_decoder(params.tfds_name)
else:
decoder = classification_input_base.Decoder(
image_field_key=image_field_key,
label_field_key=label_field_key,
is_multilabel=is_multilabel)
parser = classification_input.Parser(
output_size=input_size[:2],
num_classes=num_classes,
image_field_key=image_field_key,
label_field_key=label_field_key,
decode_jpeg_only=params.decode_jpeg_only,
aug_rand_hflip=params.aug_rand_hflip,
aug_type=params.aug_type,
is_multilabel=is_multilabel,
dtype=params.dtype)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
| 2,693 | 39.818182 | 89 | py |
models | models-master/official/projects/yolo/ops/kmeans_anchors.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""K-means for generation of anchor boxes for YOLO."""
import logging
import numpy as np
import tensorflow as tf
from official.core import input_reader
from official.projects.yolo.ops import box_ops
def _iou(x, centroids_x, iou_type="iou"):
"""Compute the WH IOU between the ground truths and the centroids."""
# set the center of the boxes to zeros
x = tf.concat([tf.zeros_like(x), x], axis=-1)
centroids = tf.concat([tf.zeros_like(centroids_x), centroids_x], axis=-1)
# compute IOU
if iou_type == "iou":
iou, _ = box_ops.compute_giou(x, centroids)
else:
_, iou = box_ops.compute_giou(x, centroids)
return iou
class AnchorKMeans:
"""Box Anchor K-means."""
@property
def boxes(self):
return self._boxes.numpy()
def get_box_from_dataset(self, dataset, num_samples=-1):
"""Load all the boxes in the dataset into memory."""
box_list = []
for i, sample in enumerate(dataset):
if num_samples > 0 and i > num_samples:
break
width = sample["width"]
height = sample["height"]
boxes = sample["groundtruth_boxes"]
# convert the box format from yxyx to xywh to allow
# kmeans by width height IOU
scale = tf.cast([width, height], boxes.dtype)
# scale the boxes then remove excessily small boxes that are
# less than 1 pixel in width or height
boxes = box_ops.yxyx_to_xcycwh(boxes)[..., 2:] * scale
boxes = boxes[tf.reduce_max(boxes, axis=-1) >= 1] / scale
box_list.append(boxes)
# loading is slow, so log the current iteration as a progress bar
tf.print("loading sample: ", i, end="\r")
box_list = tf.concat(box_list, axis=0)
inds = tf.argsort(tf.reduce_prod(box_list, axis=-1), axis=0)
box_list = tf.gather(box_list, inds, axis=0)
self._boxes = box_list
def get_init_centroids(self, boxes, k):
"""Initialize centroids by splitting the sorted boxes into k groups."""
box_num = tf.shape(boxes)[0]
# fixed_means
split = box_num // k
bn2 = split * k
boxes = boxes[:bn2, :]
cluster_groups = tf.split(boxes, k, axis=0)
clusters = []
for c in cluster_groups:
clusters.append(tf.reduce_mean(c, axis=0))
clusters = tf.convert_to_tensor(clusters).numpy()
return clusters
def iou(self, boxes, clusters):
"""Computes iou."""
# broadcast the clusters to the same shape as the boxes
n = tf.shape(boxes)[0]
k = tf.shape(clusters)[0]
boxes = tf.repeat(boxes, k, axis=0)
boxes = tf.reshape(boxes, (n, k, -1))
boxes = tf.cast(boxes, tf.float32)
clusters = tf.tile(clusters, [n, 1])
clusters = tf.reshape(clusters, (n, k, -1))
clusters = tf.cast(clusters, tf.float32)
# compute the IOU
return _iou(boxes, clusters)
def maximization(self, boxes, clusters, assignments):
"""K-means maximization term."""
for i in range(clusters.shape[0]):
hold = tf.math.reduce_mean(boxes[assignments == i], axis=0)
clusters = tf.tensor_scatter_nd_update(clusters, [[i]], [hold])
return clusters
def _kmeans(self, boxes, clusters, k, max_iters=1000):
"""Run Kmeans on arbitrary boxes and clusters with k centers."""
assignments = tf.zeros((boxes.shape[0]), dtype=tf.int64) - 1
dists = tf.zeros((boxes.shape[0], k))
num_iters = 1
# do one iteration outside of the optimization loop
dists = 1 - self.iou(boxes, clusters)
curr = tf.math.argmin(dists, axis=-1)
clusters = self.maximization(boxes, clusters, curr)
# iterate the boxes until the clusters not longer change
while not tf.math.reduce_all(curr == assignments) and num_iters < max_iters:
# get the distiance
assignments = curr
dists = 1 - self.iou(boxes, clusters)
curr = tf.math.argmin(dists, axis=-1)
clusters = self.maximization(boxes, clusters, curr)
tf.print("k-Means box generation iteration: ", num_iters, end="\r")
num_iters += 1
tf.print("k-Means box generation iteration: ", num_iters, end="\n")
assignments = curr
# sort the clusters by area then get the final assigments
clusters = tf.convert_to_tensor(
np.array(sorted(clusters.numpy(), key=lambda x: x[0] * x[1])))
dists = 1 - self.iou(boxes, clusters)
assignments = tf.math.argmin(dists, axis=-1)
return clusters, assignments
def run_kmeans(self, k, boxes, clusters=None):
"""Kmeans Wrapping function."""
if clusters is None:
clusters = self.get_init_centroids(boxes, k)
clusters, assignments = self._kmeans(boxes, clusters, k)
return clusters.numpy(), assignments.numpy()
def _avg_iou(self, boxes, clusters, assignments):
"""Compute the IOU between the centroid and the boxes in the centroid."""
ious = []
num_boxes = []
clusters1 = tf.split(clusters, clusters.shape[0], axis=0)
for i, c in enumerate(clusters1):
hold = boxes[assignments == i]
iou = tf.reduce_mean(self.iou(hold, c)).numpy()
ious.append(iou)
num_boxes.append(hold.shape[0])
clusters = np.floor(np.array(sorted(clusters, key=lambda x: x[0] * x[1])))
print("boxes: ", clusters.tolist())
print("iou over cluster : ", ious)
print("boxes per cluster: ", num_boxes)
print("dataset avgiou: ", np.mean(iou))
return ious
def avg_iou_total(self, boxes, clusters):
clusters = tf.convert_to_tensor(clusters)
dists = 1 - self.iou(boxes, clusters)
assignments = tf.math.argmin(dists, axis=-1)
ious = self._avg_iou(boxes, clusters, assignments)
return clusters, assignments, ious
def get_boxes(self, boxes_, clusters, assignments=None):
"""given a the clusters, the boxes in each cluster."""
if assignments is None:
dists = 1 - self.iou(boxes_, np.array(clusters))
assignments = tf.math.argmin(dists, axis=-1)
boxes = []
clusters = tf.split(clusters, clusters.shape[0], axis=0)
for i, _ in enumerate(clusters):
hold = boxes_[assignments == i]
if hasattr(hold, "numpy"):
hold = hold.numpy()
boxes.append(hold)
return boxes
def __call__(self,
dataset,
k,
anchors_per_scale=None,
scaling_mode="sqrt_log",
box_generation_mode="across_level",
image_resolution=(512, 512, 3),
num_samples=-1):
"""Run k-means on th eboxes for a given input resolution.
Args:
dataset: `tf.data.Dataset` for the decoded object detection dataset. The
boxes must have the key 'groundtruth_boxes'.
k: `int` for the number for centroids to generate.
anchors_per_scale: `int` for how many anchor boxes to use per level.
scaling_mode: `str` for the type of box scaling to used when generating
anchor boxes. Must be in the set {sqrt, default}.
box_generation_mode: `str` for the type of kmeans to use when generating
anchor boxes. Must be in the set {across_level, per_level}.
image_resolution: `List[int]` for the resolution of the boxes to run
k-means for.
num_samples: `int` for number of samples to process in the dataset.
Returns:
boxes: `List[List[int]]` of shape [k, 2] for the anchor boxes to use for
box predicitons.
"""
self.get_box_from_dataset(dataset, num_samples=num_samples)
if scaling_mode == "sqrt":
boxes_ls = tf.math.sqrt(self._boxes.numpy())
else:
boxes_ls = self._boxes.numpy()
if isinstance(image_resolution, int):
image_resolution = [image_resolution, image_resolution]
else:
image_resolution = image_resolution[:2]
image_resolution = image_resolution[::-1]
if box_generation_mode == "even_split":
clusters = self.get_init_centroids(boxes_ls, k)
dists = 1 - self.iou(boxes_ls, np.array(clusters))
assignments = tf.math.argmin(dists, axis=-1)
elif box_generation_mode == "across_level":
clusters = self.get_init_centroids(boxes_ls, k)
clusters, assignments = self.run_kmeans(k, boxes_ls, clusters)
else:
# generate a box region for each FPN level
clusters = self.get_init_centroids(boxes_ls, k//anchors_per_scale)
# square off the clusters
clusters += np.roll(clusters, 1, axis=-1)
clusters /= 2
# for each contained box set, compute K means
boxes_sets = self.get_boxes(boxes_ls, clusters)
clusters = []
for boxes in boxes_sets:
cluster_set, assignments = self.run_kmeans(anchors_per_scale, boxes)
clusters.extend(cluster_set)
clusters = np.array(clusters)
dists = 1 - self.iou(boxes_ls, np.array(clusters))
assignments = tf.math.argmin(dists, axis=-1)
if scaling_mode == "sqrt":
clusters = tf.square(clusters)
self._boxes *= tf.convert_to_tensor(image_resolution, self._boxes.dtype)
clusters = self.maximization(self._boxes, clusters, assignments)
if hasattr(clusters, "numpy"):
clusters = clusters.numpy()
_, _, _ = self.avg_iou_total(self._boxes, clusters)
clusters = np.floor(np.array(sorted(clusters, key=lambda x: x[0] * x[1])))
return clusters.tolist()
class BoxGenInputReader(input_reader.InputReader):
"""Input reader that returns a tf.data.Dataset instance."""
def read(self,
k,
anchors_per_scale,
scaling_mode="sqrt",
box_generation_mode="across_level",
image_resolution=(512, 512, 3),
num_samples=-1):
"""Run k-means on th eboxes for a given input resolution.
Args:
k: `int` for the number for centroids to generate.
anchors_per_scale: `int` for how many anchor boxes to use per level.
scaling_mode: `str` for the type of box scaling to used when generating
anchor boxes. Must be in the set {sqrt, none}. By default we use sqrt
to get an even distribution of anchor boxes across FPN levels.
box_generation_mode: `str` for the type of kmeans to use when generating
anchor boxes. Must be in the set {across_level, per_level}.
image_resolution: `List[int]` for the resolution of the boxes to run
k-means for.
num_samples: `Optional[int]` for the number of samples to use for kmeans,
typically about 5000 samples are all that are needed, but for the best
results use -1 to run the entire dataset.
Returns:
boxes: `List[List[int]]` of shape [k, 2] for the anchor boxes to use for
box predicitons.
"""
self._is_training = False
dataset = super().read()
dataset = dataset.unbatch()
kmeans_gen = AnchorKMeans()
boxes = kmeans_gen(
dataset,
k,
anchors_per_scale=anchors_per_scale,
image_resolution=image_resolution,
scaling_mode=scaling_mode,
box_generation_mode=box_generation_mode,
num_samples=num_samples)
del kmeans_gen # free the memory
del dataset
logging.info("clusting complete -> default boxes used ::")
logging.info(boxes)
return boxes
| 11,616 | 35.531447 | 80 | py |
models | models-master/official/projects/yolo/ops/mosaic.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mosaic op."""
import random
import tensorflow as tf
from official.projects.yolo.ops import preprocessing_ops
from official.vision.ops import augment
from official.vision.ops import box_ops
from official.vision.ops import preprocess_ops
class Mosaic:
"""Stitch together sets of 4 (2x2) or 9 (3x3) images to generate samples with more boxes."""
def __init__(
self,
output_size,
mosaic_frequency=1.0,
mosaic9_frequency=0.0,
mixup_frequency=0.0,
letter_box=True,
jitter=0.0,
mosaic_crop_mode='scale',
mosaic_center=0.25,
mosaic9_center=0.33,
aug_scale_min=1.0,
aug_scale_max=1.0,
aug_rand_angle=0.0,
aug_rand_perspective=0.0,
aug_rand_translate=0.0,
random_pad=False,
random_flip=False,
area_thresh=0.1,
pad_value=preprocessing_ops.PAD_VALUE,
seed=None,
):
"""Initializes parameters for mosaic.
Args:
output_size: `Tensor` or `List` for [height, width] of output image.
mosaic_frequency: `float` indicating how often to apply mosaic.
mosaic9_frequency: `float` indicating how often to apply a 3x3 mosaic
instead of 2x2.
mixup_frequency: `float` indicating how often to apply mixup.
letter_box: `boolean` indicating whether upon start of the datapipeline
regardless of the preprocessing ops that are used, the aspect ratio of
the images should be preserved.
jitter: `float` for the maximum change in aspect ratio expected in each
preprocessing step.
mosaic_crop_mode: `str` the type of mosaic to apply. The options are
{crop, scale, None}, crop will construct a mosaic by slicing images
togther, scale will create a mosaic by concatnating and shifting the
image, and None will default to scale and apply no post processing to
the created mosaic.
mosaic_center: `float` indicating how much to randomly deviate from the
center of the image when creating a mosaic.
mosaic9_center: `float` indicating how much to randomly deviate from the
center of the image when creating a mosaic9.
aug_scale_min: `float` indicating the minimum scaling value for image
scale jitter.
aug_scale_max: `float` indicating the maximum scaling value for image
scale jitter.
aug_rand_angle: `float` indicating the maximum angle value for angle.
angle will be changes between 0 and value.
aug_rand_perspective: `float` ranging from 0.000 to 0.001 indicating how
much to prespective warp the image.
aug_rand_translate: `float` ranging from 0 to 1 indicating the maximum
amount to randomly translate an image.
random_pad: `bool` indiccating wether to use padding to apply random
translation true for darknet yolo false for scaled yolo.
random_flip: `bool` whether or not to random flip the image.
area_thresh: `float` for the minimum area of a box to allow to pass
through for optimization.
pad_value: `int` padding value.
seed: `int` the seed for random number generation.
"""
self._output_size = output_size
self._area_thresh = area_thresh
self._mosaic_frequency = mosaic_frequency
self._mosaic9_frequency = mosaic9_frequency
self._mixup_frequency = mixup_frequency
self._letter_box = letter_box
self._random_crop = jitter
self._mosaic_crop_mode = mosaic_crop_mode
self._mosaic_center = mosaic_center
self._mosaic9_center = mosaic9_center
self._aug_scale_min = aug_scale_min
self._aug_scale_max = aug_scale_max
self._random_pad = random_pad
self._aug_rand_translate = aug_rand_translate
self._aug_rand_angle = aug_rand_angle
self._aug_rand_perspective = aug_rand_perspective
self._random_flip = random_flip
self._pad_value = pad_value
self._deterministic = seed is not None
self._seed = seed if seed is not None else random.randint(0, 2**30)
def _generate_cut(self, num_tiles, mosaic_center):
"""Generate a random center to use for slicing and patching the images."""
if self._mosaic_crop_mode == 'crop':
min_offset = mosaic_center
cut_x = preprocessing_ops.random_uniform_strong(
self._output_size[1] * min_offset,
self._output_size[1] * (1 - min_offset),
seed=self._seed)
cut_y = preprocessing_ops.random_uniform_strong(
self._output_size[0] * min_offset,
self._output_size[0] * (1 - min_offset),
seed=self._seed)
cut = [cut_y, cut_x]
ishape = tf.convert_to_tensor(
[self._output_size[0], self._output_size[1], 3])
else:
cut = None
ishape = tf.convert_to_tensor([
self._output_size[0] * num_tiles,
self._output_size[1] * num_tiles,
3,
])
return cut, ishape
def scale_boxes(self, patch, ishape, boxes, x_offset, y_offset):
"""Scale and translate the boxes for each image prior to patching."""
x_offset = tf.cast(x_offset, boxes.dtype)
y_offset = tf.cast(y_offset, boxes.dtype)
pshape = tf.cast(tf.shape(patch), boxes.dtype)
ishape = tf.cast(ishape, boxes.dtype)
y_offset = ishape[0] * y_offset
x_offset = ishape[1] * x_offset
boxes = box_ops.denormalize_boxes(boxes, pshape[:2])
boxes = boxes + tf.cast(
[y_offset, x_offset, y_offset, x_offset], boxes.dtype
)
boxes = box_ops.normalize_boxes(boxes, ishape[:2])
return boxes
def _select_ind(self, inds, *args):
items = []
for item in args:
items.append(tf.gather(item, inds))
return items
def _augment_image(
self,
image,
boxes,
classes,
is_crowd,
area,
xs=0.0,
ys=0.0,
cut=None,
letter_box=False,
):
"""Process a single image prior to the application of patching."""
if self._random_flip:
# Randomly flip the image horizontally.
image, boxes, _ = preprocess_ops.random_horizontal_flip(
image, boxes, seed=self._seed)
# Augment the image without resizing
image, infos, crop_points = preprocessing_ops.resize_and_jitter_image(
image,
[self._output_size[0], self._output_size[1]],
random_pad=False,
letter_box=letter_box,
jitter=self._random_crop,
shiftx=xs,
shifty=ys,
cut=cut,
seed=self._seed,
)
# Clip and clean boxes.
boxes, inds = preprocessing_ops.transform_and_clip_boxes(
boxes,
infos,
area_thresh=self._area_thresh,
shuffle_boxes=False,
filter_and_clip_boxes=True,
seed=self._seed)
classes, is_crowd, area = self._select_ind(inds, classes, is_crowd, area) # pylint:disable=unbalanced-tuple-unpacking
return image, boxes, classes, is_crowd, area, crop_points
def _mosaic_crop_image(
self, image, boxes, classes, is_crowd, area, mosaic_center):
"""Process a patched image in preperation for final output."""
if self._mosaic_crop_mode != 'crop':
shape = tf.cast(preprocessing_ops.get_image_shape(image), tf.float32)
center = shape * mosaic_center
# shift the center of the image by applying a translation to the whole
# image
ch = tf.math.round(
preprocessing_ops.random_uniform_strong(
-center[0], center[0], seed=self._seed))
cw = tf.math.round(
preprocessing_ops.random_uniform_strong(
-center[1], center[1], seed=self._seed))
# clip the boxes to fit within the image
image = augment.translate(
image, [cw, ch], fill_value=self._pad_value, fill_mode='constant'
)
boxes = box_ops.denormalize_boxes(boxes, shape[:2])
boxes = boxes + tf.cast([ch, cw, ch, cw], boxes.dtype)
boxes = box_ops.clip_boxes(boxes, shape[:2])
inds = box_ops.get_non_empty_box_indices(boxes)
boxes = box_ops.normalize_boxes(boxes, shape[:2])
boxes, classes, is_crowd, area = self._select_ind(inds, boxes, classes, # pylint:disable=unbalanced-tuple-unpacking
is_crowd, area)
# warp and scale the fully stitched sample
image, _, affine = preprocessing_ops.affine_warp_image(
image, [self._output_size[0], self._output_size[1]],
scale_min=self._aug_scale_min,
scale_max=self._aug_scale_max,
translate=self._aug_rand_translate,
degrees=self._aug_rand_angle,
perspective=self._aug_rand_perspective,
random_pad=self._random_pad,
seed=self._seed)
height, width = self._output_size[0], self._output_size[1]
image = tf.image.resize(image, (height, width))
# clip and clean boxes
boxes, inds = preprocessing_ops.transform_and_clip_boxes(
boxes,
None,
affine=affine,
area_thresh=self._area_thresh,
seed=self._seed)
classes, is_crowd, area = self._select_ind(inds, classes, is_crowd, area) # pylint:disable=unbalanced-tuple-unpacking
return image, boxes, classes, is_crowd, area, area
# mosaic full frequency doubles model speed
def _process_image(self, sample, shiftx, shifty, cut, letter_box):
"""Process and augment an image."""
(image, boxes, classes, is_crowd, area, crop_points) = self._augment_image(
sample['image'],
sample['groundtruth_boxes'],
sample['groundtruth_classes'],
sample['groundtruth_is_crowd'],
sample['groundtruth_area'],
shiftx,
shifty,
cut,
letter_box,
)
# Make a copy so this method is functional.
sample = sample.copy()
sample['image'] = image
sample['groundtruth_boxes'] = boxes
sample['groundtruth_classes'] = classes
sample['groundtruth_is_crowd'] = is_crowd
sample['groundtruth_area'] = area
sample['shiftx'] = shiftx
sample['shifty'] = shifty
sample['crop_points'] = crop_points
return sample
def _update_patched_sample(
self, sample, image, boxes, classes, is_crowds, areas, mosaic_center
):
"""Returns a shallow copy of sample with updated values."""
boxes = tf.concat(boxes, axis=0)
classes = tf.concat(classes, axis=0)
is_crowds = tf.concat(is_crowds, axis=0)
areas = tf.concat(areas, axis=0)
if self._mosaic_crop_mode is not None:
image, boxes, classes, is_crowds, areas, _ = self._mosaic_crop_image(
image, boxes, classes, is_crowds, areas, mosaic_center
)
height, width = preprocessing_ops.get_image_shape(image)
# Shallow copy of dict is needed to keep this method functional and
# AutoGraph happy.
sample = sample.copy()
sample['image'] = tf.cast(image, tf.uint8)
sample['groundtruth_boxes'] = boxes
sample['groundtruth_area'] = areas
sample['groundtruth_classes'] = tf.cast(
classes, sample['groundtruth_classes'].dtype
)
sample['groundtruth_is_crowd'] = tf.cast(is_crowds, tf.bool)
sample['width'] = tf.cast(width, sample['width'].dtype)
sample['height'] = tf.cast(height, sample['height'].dtype)
sample['num_detections'] = tf.shape(sample['groundtruth_boxes'])[1]
sample['is_mosaic'] = tf.cast(1.0, tf.bool)
del sample['shiftx']
del sample['shifty']
del sample['crop_points']
return sample
def _patch(self, patches, ishape, num_rows, num_cols, mosaic_center):
"""Combines patches into a num_patches x num_patches mosaic and translates the bounding boxes."""
rows = []
for row_idx in range(num_rows):
row_patches = [
patches[row_idx * num_cols + col_idx]['image']
for col_idx in range(num_cols)
]
rows.append(tf.concat(row_patches, axis=-2))
image = tf.concat(rows, axis=-3)
boxes = []
classes = []
is_crowds = []
areas = []
# Shift boxes to their new coordinates in the mosaic.
for row_idx in range(num_rows):
for col_idx in range(num_cols):
patch = patches[row_idx * num_cols + col_idx]
transformed_boxes = self.scale_boxes(
patch['image'],
ishape,
patch['groundtruth_boxes'],
col_idx / num_cols,
row_idx / num_rows,
)
boxes.append(transformed_boxes)
classes.append(patch['groundtruth_classes'])
is_crowds.append(patch['groundtruth_is_crowd'])
areas.append(patch['groundtruth_area'])
return self._update_patched_sample(
patches[0], image, boxes, classes, is_crowds, areas, mosaic_center
)
def _mosaic(self, *patch_samples):
"""Builds a 2x2 or 3x3 mosaic."""
if self._mosaic_frequency >= 1.0:
mosaic_prob = 1.0
else:
mosaic_prob = preprocessing_ops.random_uniform_strong(
0.0, 1.0, dtype=tf.float32, seed=self._seed
)
sample = patch_samples[0].copy()
if mosaic_prob >= (1 - self._mosaic_frequency):
mosaic9_prob = preprocessing_ops.random_uniform_strong(
0.0, 1.0, dtype=tf.float32, seed=self._seed + 1
)
if self._mosaic9_frequency > 0 and mosaic9_prob >= (
1 - self._mosaic9_frequency
):
return self._mosaic9(*patch_samples)
else:
return self._mosaic4(*patch_samples)
else:
return self._add_param(sample)
def _mosaic4(self, *samples):
"""Stitches together 4 images to build a 2x2 mosaic."""
cut, ishape = self._generate_cut(2, self._mosaic_center)
samples = [
self._process_image(
samples[0], 1.0, 1.0, cut, letter_box=self._letter_box
),
self._process_image(
samples[1], 0.0, 1.0, cut, letter_box=self._letter_box
),
self._process_image(
samples[2], 1.0, 0.0, cut, letter_box=self._letter_box
),
self._process_image(
samples[3], 0.0, 0.0, cut, letter_box=self._letter_box
),
]
stitched = self._patch(samples, ishape, 2, 2, self._mosaic_center)
return stitched
def _mosaic9(self, *samples):
"""Stitches together 9 images to build a 3x3 mosaic."""
cut, ishape = self._generate_cut(3, self._mosaic9_center)
# Only corner images can be letterboxed to prevent gaps in the image.
samples = [
self._process_image(
samples[0], 1.0, 1.0, cut, letter_box=self._letter_box
),
self._process_image(samples[1], 0.0, 0.0, cut, letter_box=False),
self._process_image(
samples[2], 0.0, 1.0, cut, letter_box=self._letter_box
),
self._process_image(samples[3], 0.0, 0.0, cut, letter_box=False),
self._process_image(samples[4], 0.0, 0.0, cut, letter_box=False),
self._process_image(samples[5], 0.0, 0.0, cut, letter_box=False),
self._process_image(
samples[6], 1.0, 0.0, cut, letter_box=self._letter_box
),
self._process_image(samples[7], 0.0, 0.0, cut, letter_box=False),
self._process_image(
samples[8], 0.0, 0.0, cut, letter_box=self._letter_box
),
]
stitched = self._patch(samples, ishape, 3, 3, self._mosaic9_center)
return stitched
def _beta(self, alpha, beta):
"""Generates a random number using the beta distribution."""
a = tf.random.gamma([], alpha)
b = tf.random.gamma([], beta)
return b / (a + b)
def _mixup(self, one, two):
"""Blend together 2 images for the mixup data augmentation."""
if self._mixup_frequency >= 1.0:
domo = 1.0
else:
domo = preprocessing_ops.random_uniform_strong(
0.0, 1.0, dtype=tf.float32, seed=self._seed)
noop = one.copy()
if domo >= (1 - self._mixup_frequency):
sample = one
otype = one['image'].dtype
r = self._beta(8.0, 8.0)
sample['image'] = (
r * tf.cast(one['image'], tf.float32) +
(1 - r) * tf.cast(two['image'], tf.float32))
sample['image'] = tf.cast(sample['image'], otype)
sample['groundtruth_boxes'] = tf.concat(
[one['groundtruth_boxes'], two['groundtruth_boxes']], axis=0)
sample['groundtruth_classes'] = tf.concat(
[one['groundtruth_classes'], two['groundtruth_classes']], axis=0)
sample['groundtruth_is_crowd'] = tf.concat(
[one['groundtruth_is_crowd'], two['groundtruth_is_crowd']], axis=0)
sample['groundtruth_area'] = tf.concat(
[one['groundtruth_area'], two['groundtruth_area']], axis=0)
return sample
else:
return self._add_param(noop)
def _add_param(self, sample):
"""Add parameters to handle skipped images."""
if 'is_mosaic' not in sample:
sample['is_mosaic'] = tf.cast(0.0, tf.bool)
sample['num_detections'] = tf.shape(sample['groundtruth_boxes'])[0]
return sample
def _apply(self, dataset):
"""Apply mosaic to an input dataset."""
determ = self._deterministic
dataset = dataset.prefetch(tf.data.AUTOTUNE)
patch_datasets = []
num_patches = 9 if self._mosaic9_frequency > 0.0 else 4
for i in range(num_patches):
patch_datasets.append(
dataset.shuffle(
100, seed=self._seed + i, reshuffle_each_iteration=True
)
)
dataset = tf.data.Dataset.zip(tuple(patch_datasets))
dataset = dataset.map(
self._mosaic, num_parallel_calls=tf.data.AUTOTUNE, deterministic=determ)
if self._mixup_frequency > 0:
one = dataset.shuffle(
100, seed=self._seed + num_patches, reshuffle_each_iteration=True
)
two = dataset.shuffle(
100,
seed=self._seed + num_patches + 1,
reshuffle_each_iteration=True,
)
dataset = tf.data.Dataset.zip((one, two))
dataset = dataset.map(
self._mixup,
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=determ)
return dataset
def _skip(self, dataset):
"""Skip samples in a dataset."""
determ = self._deterministic
return dataset.map(
self._add_param,
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=determ)
def mosaic_fn(self, is_training=True):
"""Determine which function to apply based on whether model is training."""
if is_training and self._mosaic_frequency > 0.0:
return self._apply
else:
return self._skip
| 18,885 | 35.529981 | 122 | py |
models | models-master/official/projects/yolo/ops/loss_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Yolo loss utility functions."""
import numpy as np
import tensorflow as tf
from official.projects.yolo.ops import box_ops
from official.projects.yolo.ops import math_ops
@tf.custom_gradient
def sigmoid_bce(y, x_prime, label_smoothing):
"""Applies the Sigmoid Cross Entropy Loss.
Implements the same derivative as that found in the Darknet C library.
The derivative of this method is not the same as the standard binary cross
entropy with logits function.
The BCE with logits function equation is as follows:
x = 1 / (1 + exp(-x_prime))
bce = -ylog(x) - (1 - y)log(1 - x)
The standard BCE with logits function derivative is as follows:
dloss = -y/x + (1-y)/(1-x)
dsigmoid = x * (1 - x)
dx = dloss * dsigmoid
This derivative can be reduced simply to:
dx = (-y + x)
This simplification is used by the darknet library in order to improve
training stability. The gradient is almost the same
as tf.keras.losses.binary_crossentropy but varies slightly and
yields different performance.
Args:
y: `Tensor` holding ground truth data.
x_prime: `Tensor` holding the predictions prior to application of the
sigmoid operation.
label_smoothing: float value between 0.0 and 1.0 indicating the amount of
smoothing to apply to the data.
Returns:
bce: Tensor of the be applied loss values.
delta: callable function indicating the custom gradient for this operation.
"""
eps = 1e-9
x = tf.math.sigmoid(x_prime)
y = tf.stop_gradient(y * (1 - label_smoothing) + 0.5 * label_smoothing)
bce = -y * tf.math.log(x + eps) - (1 - y) * tf.math.log(1 - x + eps)
def delta(dpass):
x = tf.math.sigmoid(x_prime)
dx = (-y + x) * dpass
dy = tf.zeros_like(y)
return dy, dx, 0.0
return bce, delta
def apply_mask(mask, x, value=0):
"""This function is used for gradient masking.
The YOLO loss function makes extensive use of dynamically shaped tensors.
To allow this use case on the TPU while preserving the gradient correctly
for back propagation we use this masking function to use a tf.where operation
to hard set masked location to have a gradient and a value of zero.
Args:
mask: A `Tensor` with the same shape as x used to select values of
importance.
x: A `Tensor` with the same shape as mask that will be getting masked.
value: `float` constant additive value.
Returns:
x: A masked `Tensor` with the same shape as x.
"""
mask = tf.cast(mask, tf.bool)
masked = tf.where(mask, x, tf.zeros_like(x) + value)
return masked
def build_grid(indexes, truths, preds, ind_mask, update=False, grid=None):
"""This function is used to broadcast elements into the output shape.
This function is used to broadcasts a list of truths into the correct index
in the output shape. This is used for the ground truth map construction in
the scaled loss and the classification map in the darknet loss.
Args:
indexes: A `Tensor` for the indexes
truths: A `Tensor` for the ground truth.
preds: A `Tensor` for the predictions.
ind_mask: A `Tensor` for the index masks.
update: A `bool` for updating the grid.
grid: A `Tensor` for the grid.
Returns:
grid: A `Tensor` representing the augmented grid.
"""
# this function is used to broadcast all the indexes to the correct
# into the correct ground truth mask, used for iou detection map
# in the scaled loss and the classification mask in the darknet loss
num_flatten = tf.shape(preds)[-1]
# is there a way to verify that we are not on the CPU?
ind_mask = tf.cast(ind_mask, indexes.dtype)
# find all the batch indexes using the cumulated sum of a ones tensor
# cumsum(ones) - 1 yeild the zero indexed batches
bhep = tf.reduce_max(tf.ones_like(indexes), axis=-1, keepdims=True)
bhep = tf.math.cumsum(bhep, axis=0) - 1
# concatnate the batch sizes to the indexes
indexes = tf.concat([bhep, indexes], axis=-1)
indexes = apply_mask(tf.cast(ind_mask, indexes.dtype), indexes)
indexes = (indexes + (ind_mask - 1))
# mask truths
truths = apply_mask(tf.cast(ind_mask, truths.dtype), truths)
truths = (truths + (tf.cast(ind_mask, truths.dtype) - 1))
# reshape the indexes into the correct shape for the loss,
# just flatten all indexes but the last
indexes = tf.reshape(indexes, [-1, 4])
# also flatten the ground truth value on all axis but the last
truths = tf.reshape(truths, [-1, num_flatten])
# build a zero grid in the samve shape as the predicitons
if grid is None:
grid = tf.zeros_like(preds)
# remove invalid values from the truths that may have
# come up from computation, invalid = nan and inf
truths = math_ops.rm_nan_inf(truths)
# scatter update the zero grid
if update:
grid = tf.tensor_scatter_nd_update(grid, indexes, truths)
else:
grid = tf.tensor_scatter_nd_max(grid, indexes, truths)
# stop gradient and return to avoid TPU errors and save compute
# resources
return grid
class GridGenerator:
"""Grid generator that generates anchor grids for box decoding."""
def __init__(self, anchors, scale_anchors=None):
"""Initialize Grid Generator.
Args:
anchors: A `List[List[int]]` for the anchor boxes that are used in the
model at all levels.
scale_anchors: An `int` for how much to scale this level to get the
original input shape.
"""
self.dtype = tf.keras.backend.floatx()
self._scale_anchors = scale_anchors
self._anchors = tf.convert_to_tensor(anchors)
return
def _build_grid_points(self, lheight, lwidth, anchors, dtype):
"""Generate a grid of fixed grid edges for box center decoding."""
with tf.name_scope('center_grid'):
y = tf.range(0, lheight)
x = tf.range(0, lwidth)
x_left = tf.tile(
tf.transpose(tf.expand_dims(x, axis=-1), perm=[1, 0]), [lheight, 1])
y_left = tf.tile(tf.expand_dims(y, axis=-1), [1, lwidth])
x_y = tf.stack([x_left, y_left], axis=-1)
x_y = tf.cast(x_y, dtype=dtype)
num = tf.shape(anchors)[0]
x_y = tf.expand_dims(
tf.tile(tf.expand_dims(x_y, axis=-2), [1, 1, num, 1]), axis=0)
return x_y
def _build_anchor_grid(self, height, width, anchors, dtype):
"""Get the transformed anchor boxes for each dimention."""
with tf.name_scope('anchor_grid'):
num = tf.shape(anchors)[0]
anchors = tf.cast(anchors, dtype=dtype)
anchors = tf.reshape(anchors, [1, 1, 1, num, 2])
anchors = tf.tile(anchors, [1, tf.cast(height, tf.int32),
tf.cast(width, tf.int32), 1, 1])
return anchors
def _extend_batch(self, grid, batch_size):
return tf.tile(grid, [batch_size, 1, 1, 1, 1])
def __call__(self, height, width, batch_size, dtype=None):
if dtype is None:
self.dtype = tf.keras.backend.floatx()
else:
self.dtype = dtype
grid_points = self._build_grid_points(height, width, self._anchors,
self.dtype)
anchor_grid = self._build_anchor_grid(
height, width,
tf.cast(self._anchors, self.dtype) /
tf.cast(self._scale_anchors, self.dtype), self.dtype)
grid_points = self._extend_batch(grid_points, batch_size)
anchor_grid = self._extend_batch(anchor_grid, batch_size)
return grid_points, anchor_grid
TILE_SIZE = 50
class PairWiseSearch:
"""Apply a pairwise search between the ground truth and the labels.
The goal is to indicate the locations where the predictions overlap with
ground truth for dynamic ground truth associations.
"""
def __init__(self,
iou_type='iou',
any_match=True,
min_conf=0.0,
track_boxes=False,
track_classes=False):
"""Initialization of Pair Wise Search.
Args:
iou_type: An `str` for the iou type to use.
any_match: A `bool` for any match(no class match).
min_conf: An `int` for minimum confidence threshold.
track_boxes: A `bool` dynamic box assignment.
track_classes: A `bool` dynamic class assignment.
"""
self.iou_type = iou_type
self._any = any_match
self._min_conf = min_conf
self._track_boxes = track_boxes
self._track_classes = track_classes
return
def box_iou(self, true_box, pred_box):
# based on the type of loss, compute the iou loss for a box
# compute_<name> indicated the type of iou to use
if self.iou_type == 'giou':
_, iou = box_ops.compute_giou(true_box, pred_box)
elif self.iou_type == 'ciou':
_, iou = box_ops.compute_ciou(true_box, pred_box)
else:
iou = box_ops.compute_iou(true_box, pred_box)
return iou
def _search_body(self, pred_box, pred_class, boxes, classes, running_boxes,
running_classes, max_iou, idx):
"""Main search fn."""
# capture the batch size to be used, and gather a slice of
# boxes from the ground truth. currently TILE_SIZE = 50, to
# save memory
batch_size = tf.shape(boxes)[0]
box_slice = tf.slice(boxes, [0, idx * TILE_SIZE, 0],
[batch_size, TILE_SIZE, 4])
# match the dimentions of the slice to the model predictions
# shape: [batch_size, 1, 1, num, TILE_SIZE, 4]
box_slice = tf.expand_dims(box_slice, axis=1)
box_slice = tf.expand_dims(box_slice, axis=1)
box_slice = tf.expand_dims(box_slice, axis=1)
box_grid = tf.expand_dims(pred_box, axis=-2)
# capture the classes
class_slice = tf.slice(classes, [0, idx * TILE_SIZE],
[batch_size, TILE_SIZE])
class_slice = tf.expand_dims(class_slice, axis=1)
class_slice = tf.expand_dims(class_slice, axis=1)
class_slice = tf.expand_dims(class_slice, axis=1)
iou = self.box_iou(box_slice, box_grid)
if self._min_conf > 0.0:
if not self._any:
class_grid = tf.expand_dims(pred_class, axis=-2)
class_mask = tf.one_hot(
tf.cast(class_slice, tf.int32),
depth=tf.shape(pred_class)[-1],
dtype=pred_class.dtype)
class_mask = tf.reduce_any(tf.equal(class_mask, class_grid), axis=-1)
else:
class_mask = tf.reduce_max(pred_class, axis=-1, keepdims=True)
class_mask = tf.cast(class_mask, iou.dtype)
iou *= class_mask
max_iou_ = tf.concat([max_iou, iou], axis=-1)
max_iou = tf.reduce_max(max_iou_, axis=-1, keepdims=True)
ind = tf.expand_dims(tf.argmax(max_iou_, axis=-1), axis=-1)
if self._track_boxes:
running_boxes = tf.expand_dims(running_boxes, axis=-2)
box_slice = tf.zeros_like(running_boxes) + box_slice
box_slice = tf.concat([running_boxes, box_slice], axis=-2)
running_boxes = tf.gather_nd(box_slice, ind, batch_dims=4)
if self._track_classes:
running_classes = tf.expand_dims(running_classes, axis=-1)
class_slice = tf.zeros_like(running_classes) + class_slice
class_slice = tf.concat([running_classes, class_slice], axis=-1)
running_classes = tf.gather_nd(class_slice, ind, batch_dims=4)
return (pred_box, pred_class, boxes, classes, running_boxes,
running_classes, max_iou, idx + 1)
def __call__(self,
pred_boxes,
pred_classes,
boxes,
classes,
clip_thresh=0.0):
num_boxes = tf.shape(boxes)[-2]
num_tiles = (num_boxes // TILE_SIZE) - 1
if self._min_conf > 0.0:
pred_classes = tf.cast(pred_classes > self._min_conf, pred_classes.dtype)
def _loop_cond(unused_pred_box, unused_pred_class, boxes, unused_classes,
unused_running_boxes, unused_running_classes, unused_max_iou,
idx):
# check that the slice has boxes that all zeros
batch_size = tf.shape(boxes)[0]
box_slice = tf.slice(boxes, [0, idx * TILE_SIZE, 0],
[batch_size, TILE_SIZE, 4])
return tf.logical_and(idx < num_tiles,
tf.math.greater(tf.reduce_sum(box_slice), 0))
running_boxes = tf.zeros_like(pred_boxes)
running_classes = tf.zeros_like(tf.reduce_sum(running_boxes, axis=-1))
max_iou = tf.zeros_like(tf.reduce_sum(running_boxes, axis=-1))
max_iou = tf.expand_dims(max_iou, axis=-1)
(pred_boxes, pred_classes, boxes, classes, running_boxes, running_classes,
max_iou, _) = tf.while_loop(_loop_cond, self._search_body, [
pred_boxes, pred_classes, boxes, classes, running_boxes,
running_classes, max_iou,
tf.constant(0)
])
mask = tf.cast(max_iou > clip_thresh, running_boxes.dtype)
running_boxes *= mask
running_classes *= tf.squeeze(mask, axis=-1)
max_iou *= mask
max_iou = tf.squeeze(max_iou, axis=-1)
mask = tf.squeeze(mask, axis=-1)
return (tf.stop_gradient(running_boxes), tf.stop_gradient(running_classes),
tf.stop_gradient(max_iou), tf.stop_gradient(mask))
def average_iou(iou):
"""Computes the average intersection over union without counting locations.
where the iou is zero.
Args:
iou: A `Tensor` representing the iou values.
Returns:
tf.stop_gradient(avg_iou): A `Tensor` representing average
intersection over union.
"""
iou_sum = tf.reduce_sum(iou, axis=tf.range(1, tf.shape(tf.shape(iou))[0]))
counts = tf.cast(
tf.math.count_nonzero(iou, axis=tf.range(1,
tf.shape(tf.shape(iou))[0])),
iou.dtype)
avg_iou = tf.reduce_mean(math_ops.divide_no_nan(iou_sum, counts))
return tf.stop_gradient(avg_iou)
def _scale_boxes(encoded_boxes, width, height, anchor_grid, grid_points,
scale_xy):
"""Decodes models boxes applying and exponential to width and height maps."""
# split the boxes
pred_xy = encoded_boxes[..., 0:2]
pred_wh = encoded_boxes[..., 2:4]
# build a scaling tensor to get the offset of th ebox relative to the image
scaler = tf.convert_to_tensor([height, width, height, width])
scale_xy = tf.cast(scale_xy, encoded_boxes.dtype)
# apply the sigmoid
pred_xy = tf.math.sigmoid(pred_xy)
# scale the centers and find the offset of each box relative to
# their center pixel
pred_xy = pred_xy * scale_xy - 0.5 * (scale_xy - 1)
# scale the offsets and add them to the grid points or a tensor that is
# the realtive location of each pixel
box_xy = grid_points + pred_xy
# scale the width and height of the predictions and corlate them
# to anchor boxes
box_wh = tf.math.exp(pred_wh) * anchor_grid
# build the final predicted box
scaled_box = tf.concat([box_xy, box_wh], axis=-1)
pred_box = scaled_box / scaler
# shift scaled boxes
scaled_box = tf.concat([pred_xy, box_wh], axis=-1)
return (scaler, scaled_box, pred_box)
@tf.custom_gradient
def _darknet_boxes(encoded_boxes, width, height, anchor_grid, grid_points,
max_delta, scale_xy):
"""Wrapper for _scale_boxes to implement a custom gradient."""
(scaler, scaled_box, pred_box) = _scale_boxes(encoded_boxes, width, height,
anchor_grid, grid_points,
scale_xy)
def delta(unused_dy_scaler, dy_scaled, dy):
dy_xy, dy_wh = tf.split(dy, 2, axis=-1)
dy_xy_, dy_wh_ = tf.split(dy_scaled, 2, axis=-1)
# add all the gradients that may have been applied to the
# boxes and those that have been applied to the width and height
dy_wh += dy_wh_
dy_xy += dy_xy_
# propagate the exponential applied to the width and height in
# order to ensure the gradient propagated is of the correct
# magnitude
pred_wh = encoded_boxes[..., 2:4]
dy_wh *= tf.math.exp(pred_wh)
dbox = tf.concat([dy_xy, dy_wh], axis=-1)
# apply the gradient clipping to xy and wh
dbox = math_ops.rm_nan_inf(dbox)
delta = tf.cast(max_delta, dbox.dtype)
dbox = tf.clip_by_value(dbox, -delta, delta)
return dbox, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
return (scaler, scaled_box, pred_box), delta
def _new_coord_scale_boxes(encoded_boxes, width, height, anchor_grid,
grid_points, scale_xy):
"""Decodes models boxes by squaring and scaling the width and height maps."""
# split the boxes
pred_xy = encoded_boxes[..., 0:2]
pred_wh = encoded_boxes[..., 2:4]
# build a scaling tensor to get the offset of th ebox relative to the image
scaler = tf.convert_to_tensor([height, width, height, width])
scale_xy = tf.cast(scale_xy, pred_xy.dtype)
# apply the sigmoid
pred_xy = tf.math.sigmoid(pred_xy)
pred_wh = tf.math.sigmoid(pred_wh)
# scale the xy offset predictions according to the config
pred_xy = pred_xy * scale_xy - 0.5 * (scale_xy - 1)
# find the true offset from the grid points and the scaler
# where the grid points are the relative offset of each pixel with
# in the image
box_xy = grid_points + pred_xy
# decode the widht and height of the boxes and correlate them
# to the anchor boxes
box_wh = (2 * pred_wh)**2 * anchor_grid
# build the final boxes
scaled_box = tf.concat([box_xy, box_wh], axis=-1)
pred_box = scaled_box / scaler
# shift scaled boxes
scaled_box = tf.concat([pred_xy, box_wh], axis=-1)
return (scaler, scaled_box, pred_box)
@tf.custom_gradient
def _darknet_new_coord_boxes(encoded_boxes, width, height, anchor_grid,
grid_points, max_delta, scale_xy):
"""Wrapper for _new_coord_scale_boxes to implement a custom gradient."""
(scaler, scaled_box,
pred_box) = _new_coord_scale_boxes(encoded_boxes, width, height, anchor_grid,
grid_points, scale_xy)
def delta(unused_dy_scaler, dy_scaled, dy):
dy_xy, dy_wh = tf.split(dy, 2, axis=-1)
dy_xy_, dy_wh_ = tf.split(dy_scaled, 2, axis=-1)
# add all the gradients that may have been applied to the
# boxes and those that have been applied to the width and height
dy_wh += dy_wh_
dy_xy += dy_xy_
dbox = tf.concat([dy_xy, dy_wh], axis=-1)
# apply the gradient clipping to xy and wh
dbox = math_ops.rm_nan_inf(dbox)
delta = tf.cast(max_delta, dbox.dtype)
dbox = tf.clip_by_value(dbox, -delta, delta)
return dbox, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
return (scaler, scaled_box, pred_box), delta
def _anchor_free_scale_boxes(encoded_boxes,
width,
height,
stride,
grid_points,
darknet=False):
"""Decode models boxes using FPN stride under anchor free conditions."""
del darknet
# split the boxes
pred_xy = encoded_boxes[..., 0:2]
pred_wh = encoded_boxes[..., 2:4]
# build a scaling tensor to get the offset of th ebox relative to the image
scaler = tf.convert_to_tensor([height, width, height, width])
# scale the offsets and add them to the grid points or a tensor that is
# the realtive location of each pixel
box_xy = (grid_points + pred_xy)
# scale the width and height of the predictions and corlate them
# to anchor boxes
box_wh = tf.math.exp(pred_wh)
# build the final predicted box
scaled_box = tf.concat([box_xy, box_wh], axis=-1)
# properly scaling boxes gradeints
scaled_box = scaled_box * tf.cast(stride, scaled_box.dtype)
pred_box = scaled_box / tf.cast(scaler * stride, scaled_box.dtype)
return (scaler, scaled_box, pred_box)
def get_predicted_box(width,
height,
encoded_boxes,
anchor_grid,
grid_points,
scale_xy,
stride,
darknet=False,
box_type='original',
max_delta=np.inf):
"""Decodes the predicted boxes from the model format to a usable format.
This function decodes the model outputs into the [x, y, w, h] format for
use in the loss function as well as for use within the detection generator.
Args:
width: A `float` scalar indicating the width of the prediction layer.
height: A `float` scalar indicating the height of the prediction layer
encoded_boxes: A `Tensor` of shape [..., height, width, 4] holding encoded
boxes.
anchor_grid: A `Tensor` of shape [..., 1, 1, 2] holding the anchor boxes
organized for box decoding, box width and height.
grid_points: A `Tensor` of shape [..., height, width, 2] holding the anchor
boxes for decoding the box centers.
scale_xy: A `float` scaler used to indicate the range for each center
outside of its given [..., i, j, 4] index, where i and j are indexing
pixels along the width and height of the predicted output map.
stride: An `int` defining the amount of down stride realtive to the input
image.
darknet: A `bool` used to select between custom gradient and default
autograd.
box_type: An `str` indicating the type of box encoding that is being used.
max_delta: A `float` scaler used for gradient clipping in back propagation.
Returns:
scaler: A `Tensor` of shape [4] returned to allow the scaling of the ground
truth boxes to be of the same magnitude as the decoded predicted boxes.
scaled_box: A `Tensor` of shape [..., height, width, 4] with the predicted
boxes.
pred_box: A `Tensor` of shape [..., height, width, 4] with the predicted
boxes divided by the scaler parameter used to put all boxes in the [0, 1]
range.
"""
if box_type == 'anchor_free':
(scaler, scaled_box, pred_box) = _anchor_free_scale_boxes(
encoded_boxes, width, height, stride, grid_points, darknet=darknet)
elif darknet:
# pylint:disable=unbalanced-tuple-unpacking
# if we are using the darknet loss we shoud nto propagate the
# decoding of the box
if box_type == 'scaled':
(scaler, scaled_box,
pred_box) = _darknet_new_coord_boxes(encoded_boxes, width, height,
anchor_grid, grid_points, max_delta,
scale_xy)
else:
(scaler, scaled_box,
pred_box) = _darknet_boxes(encoded_boxes, width, height, anchor_grid,
grid_points, max_delta, scale_xy)
else:
# if we are using the scaled loss we should propagate the decoding of
# the boxes
if box_type == 'scaled':
(scaler, scaled_box,
pred_box) = _new_coord_scale_boxes(encoded_boxes, width, height,
anchor_grid, grid_points, scale_xy)
else:
(scaler, scaled_box, pred_box) = _scale_boxes(encoded_boxes, width,
height, anchor_grid,
grid_points, scale_xy)
return (scaler, scaled_box, pred_box)
| 23,482 | 36.097946 | 80 | py |
models | models-master/official/projects/yolo/ops/math_ops.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A set of private math operations used to safely implement the YOLO loss."""
import tensorflow as tf
def rm_nan_inf(x, val=0.0):
"""Remove nan and infinity.
Args:
x: any `Tensor` of any type.
val: value to replace nan and infinity with.
Returns:
a `Tensor` with nan and infinity removed.
"""
cond = tf.math.logical_or(tf.math.is_nan(x), tf.math.is_inf(x))
val = tf.cast(val, dtype=x.dtype)
x = tf.where(cond, val, x)
return x
def rm_nan(x, val=0.0):
"""Remove nan and infinity.
Args:
x: any `Tensor` of any type.
val: value to replace nan.
Returns:
a `Tensor` with nan removed.
"""
cond = tf.math.is_nan(x)
val = tf.cast(val, dtype=x.dtype)
x = tf.where(cond, val, x)
return x
def divide_no_nan(a, b):
"""Nan safe divide operation built to allow model compilation in tflite.
Args:
a: any `Tensor` of any type.
b: any `Tensor` of any type with the same shape as tensor a.
Returns:
a `Tensor` representing a divided by b, with all nan values removed.
"""
return a / (b + 1e-9)
| 1,676 | 26.048387 | 78 | py |
models | models-master/official/projects/yolo/ops/preprocessing_ops.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessing ops for yolo."""
import random
import numpy as np
import tensorflow as tf
from official.vision.ops import augment
from official.vision.ops import box_ops as bbox_ops
PAD_VALUE = 114
GLOBAL_SEED_SET = False
def set_random_seeds(seed=0):
"""Sets all accessible global seeds to properly apply randomization.
This is not the same as passing the seed as a variable to each call
to tf.random.For more, see the documentation for tf.random on the tensorflow
website https://www.tensorflow.org/api_docs/python/tf/random/set_seed. Note
that passing the seed to each random number generator will not give you the
expected behavior if you use more than one generator in a single function.
Args:
seed: `Optional[int]` representing the seed you want to use.
"""
if seed is not None:
global GLOBAL_SEED_SET
random.seed(seed)
GLOBAL_SEED_SET = True
tf.random.set_seed(seed)
np.random.seed(seed)
def random_uniform_strong(minval,
maxval,
dtype=tf.float32,
seed=None,
shape=None):
"""A unified function for consistent random number generation.
Equivalent to tf.random.uniform, except that minval and maxval are flipped if
minval is greater than maxval. Seed Safe random number generator.
Args:
minval: An `int` for a lower or upper endpoint of the interval from which to
choose the random number.
maxval: An `int` for the other endpoint.
dtype: The output type of the tensor.
seed: An `int` used to set the seed.
shape: List or 1D tf.Tensor, output shape of the random generator.
Returns:
A random tensor of type `dtype` that falls between `minval` and `maxval`
excluding the larger one.
"""
if GLOBAL_SEED_SET:
seed = None
if minval > maxval:
minval, maxval = maxval, minval
return tf.random.uniform(
shape=shape or [], minval=minval, maxval=maxval, seed=seed, dtype=dtype)
def random_scale(val, dtype=tf.float32, seed=None):
"""Generates a random number for scaling a parameter by multiplication.
Generates a random number for the scale. Half of the time, the value is
between [1.0, val) with uniformly distributed probability. In the other half,
the value is the reciprocal of this value. The function is identical to the
one in the original implementation:
https://github.com/AlexeyAB/darknet/blob/a3714d0a/src/utils.c#L708-L713
Args:
val: A float representing the maximum scaling allowed.
dtype: The output type of the tensor.
seed: An `int` used to set the seed.
Returns:
The random scale.
"""
scale = random_uniform_strong(1.0, val, dtype=dtype, seed=seed)
do_ret = random_uniform_strong(minval=0, maxval=2, dtype=tf.int32, seed=seed)
if do_ret == 1:
return scale
return 1.0 / scale
def pad_max_instances(value, instances, pad_value=0, pad_axis=0):
"""Pad or clip the tensor value to a fixed length along a given axis.
Pads a dimension of the tensor to have a maximum number of instances filling
additional entries with the `pad_value`. Allows for selection of the padding
axis.
Args:
value: An input tensor.
instances: An `int` representing the maximum number of instances.
pad_value: An `int` representing the value used for padding until the
maximum number of instances is obtained.
pad_axis: An `int` representing the axis index to pad.
Returns:
The output tensor whose dimensions match the input tensor except with the
size along the `pad_axis` replaced by `instances`.
"""
# get the real shape of value
shape = tf.shape(value)
# compute the padding axis
if pad_axis < 0:
pad_axis = tf.rank(value) + pad_axis
# determin how much of the tensor value to keep
dim1 = shape[pad_axis]
take = tf.math.reduce_min([instances, dim1])
value, _ = tf.split(value, [take, -1], axis=pad_axis)
# pad the clipped tensor to the right shape
pad = tf.convert_to_tensor([tf.math.reduce_max([instances - dim1, 0])])
nshape = tf.concat([shape[:pad_axis], pad, shape[(pad_axis + 1):]], axis=0)
pad_tensor = tf.fill(nshape, tf.cast(pad_value, dtype=value.dtype))
value = tf.concat([value, pad_tensor], axis=pad_axis)
if isinstance(instances, int):
vshape = value.get_shape().as_list()
vshape[pad_axis] = instances
value.set_shape(vshape)
return value
def get_image_shape(image):
"""Consistently gets the width and height of the image.
Gets the shape of the image regardless of if the image is in the
(batch_size, x, y, c) format or the (x, y, c) format.
Args:
image: A tensor who has either 3 or 4 dimensions.
Returns:
A tuple (height, width), where height is the height of the image
and width is the width of the image.
"""
shape = tf.shape(image)
if shape.get_shape().as_list()[0] == 4:
width = shape[2]
height = shape[1]
else:
width = shape[1]
height = shape[0]
return height, width
def _augment_hsv_darknet(image, rh, rs, rv, seed=None):
"""Randomize the hue, saturation, and brightness via the darknet method."""
if rh > 0.0:
deltah = random_uniform_strong(-rh, rh, seed=seed)
image = tf.image.adjust_hue(image, deltah)
if rs > 0.0:
deltas = random_scale(rs, seed=seed)
image = tf.image.adjust_saturation(image, deltas)
if rv > 0.0:
deltav = random_scale(rv, seed=seed)
image *= tf.cast(deltav, image.dtype)
# clip the values of the image between 0.0 and 1.0
image = tf.clip_by_value(image, 0.0, 1.0)
return image
def _augment_hsv_torch(image, rh, rs, rv, seed=None):
"""Randomize the hue, saturation, and brightness via the pytorch method."""
dtype = image.dtype
image = tf.cast(image, tf.float32)
image = tf.image.rgb_to_hsv(image)
gen_range = tf.cast([rh, rs, rv], image.dtype)
scale = tf.cast([180, 255, 255], image.dtype)
r = random_uniform_strong(
-1, 1, shape=[3], dtype=image.dtype, seed=seed) * gen_range + 1
image = tf.math.floor(tf.cast(image, scale.dtype) * scale)
image = tf.math.floor(tf.cast(image, r.dtype) * r)
h, s, v = tf.split(image, 3, axis=-1)
h = h % 180
s = tf.clip_by_value(s, 0, 255)
v = tf.clip_by_value(v, 0, 255)
image = tf.concat([h, s, v], axis=-1)
image = tf.cast(image, scale.dtype) / scale
image = tf.image.hsv_to_rgb(image)
return tf.cast(image, dtype)
def image_rand_hsv(image, rh, rs, rv, seed=None, darknet=False):
"""Randomly alters the hue, saturation, and brightness of an image.
Args:
image: `Tensor` of shape [None, None, 3] that needs to be altered.
rh: `float32` used to indicate the maximum delta that can be multiplied to
the hue.
rs: `float32` used to indicate the maximum delta that can be multiplied to
the saturation.
rv: `float32` used to indicate the maximum delta that can be multiplied to
the brightness.
seed: `Optional[int]` for the seed to use in the random number generation.
darknet: `bool` indicating whether the model was originally built in the
Darknet or PyTorch library.
Returns:
The HSV altered image in the same datatype as the input image.
"""
if darknet:
image = _augment_hsv_darknet(image, rh, rs, rv, seed=seed)
else:
image = _augment_hsv_torch(image, rh, rs, rv, seed=seed)
return image
def mosaic_cut(image, original_width, original_height, width, height, center,
ptop, pleft, pbottom, pright, shiftx, shifty):
"""Generates a random center location to use for the mosaic operation.
Given a center location, cuts the input image into a slice that will be
concatenated with other slices with the same center in order to construct
a final mosaicked image.
Args:
image: `Tensor` of shape [None, None, 3] that needs to be altered.
original_width: `float` value indicating the original width of the image.
original_height: `float` value indicating the original height of the image.
width: `float` value indicating the final width of the image.
height: `float` value indicating the final height of the image.
center: `float` value indicating the desired center of the final patched
image.
ptop: `float` value indicating the top of the image without padding.
pleft: `float` value indicating the left of the image without padding.
pbottom: `float` value indicating the bottom of the image without padding.
pright: `float` value indicating the right of the image without padding.
shiftx: `float` 0.0 or 1.0 value indicating if the image is on the left or
right.
shifty: `float` 0.0 or 1.0 value indicating if the image is at the top or
bottom.
Returns:
image: The cropped image in the same datatype as the input image.
crop_info: `float` tensor that is applied to the boxes in order to select
the boxes still contained within the image.
"""
def cast(values, dtype):
return [tf.cast(value, dtype) for value in values]
with tf.name_scope('mosaic_cut'):
center = tf.cast(center, width.dtype)
zero = tf.cast(0.0, width.dtype)
cut_x, cut_y = center[1], center[0]
# Select the crop of the image to use
left_shift = tf.minimum(
tf.minimum(cut_x, tf.maximum(zero, -pleft * width / original_width)),
width - cut_x)
top_shift = tf.minimum(
tf.minimum(cut_y, tf.maximum(zero, -ptop * height / original_height)),
height - cut_y)
right_shift = tf.minimum(
tf.minimum(width - cut_x,
tf.maximum(zero, -pright * width / original_width)), cut_x)
bot_shift = tf.minimum(
tf.minimum(height - cut_y,
tf.maximum(zero, -pbottom * height / original_height)),
cut_y)
(left_shift, top_shift, right_shift, bot_shift,
zero) = cast([left_shift, top_shift, right_shift, bot_shift, zero],
tf.float32)
# Build a crop offset and a crop size tensor to use for slicing.
crop_offset = [zero, zero, zero]
crop_size = [zero - 1, zero - 1, zero - 1]
if shiftx == 0.0 and shifty == 0.0:
crop_offset = [top_shift, left_shift, zero]
crop_size = [cut_y, cut_x, zero - 1]
elif shiftx == 1.0 and shifty == 0.0:
crop_offset = [top_shift, cut_x - right_shift, zero]
crop_size = [cut_y, width - cut_x, zero - 1]
elif shiftx == 0.0 and shifty == 1.0:
crop_offset = [cut_y - bot_shift, left_shift, zero]
crop_size = [height - cut_y, cut_x, zero - 1]
elif shiftx == 1.0 and shifty == 1.0:
crop_offset = [cut_y - bot_shift, cut_x - right_shift, zero]
crop_size = [height - cut_y, width - cut_x, zero - 1]
# Contain and crop the image.
ishape = tf.cast(tf.shape(image)[:2], crop_size[0].dtype)
crop_size[0] = tf.minimum(crop_size[0], ishape[0])
crop_size[1] = tf.minimum(crop_size[1], ishape[1])
crop_offset = tf.cast(crop_offset, tf.int32)
crop_size = tf.cast(crop_size, tf.int32)
image = tf.slice(image, crop_offset, crop_size)
crop_info = tf.stack([
tf.cast(ishape, tf.float32),
tf.cast(tf.shape(image)[:2], dtype=tf.float32),
tf.ones_like(ishape, dtype=tf.float32),
tf.cast(crop_offset[:2], tf.float32)
])
return image, crop_info
def resize_and_jitter_image(image,
desired_size,
jitter=0.0,
letter_box=None,
random_pad=True,
crop_only=False,
shiftx=0.5,
shifty=0.5,
cut=None,
method=tf.image.ResizeMethod.BILINEAR,
seed=None):
"""Resize, Pad, and distort a given input image.
Args:
image: a `Tensor` of shape [height, width, 3] representing an image.
desired_size: a `Tensor` or `int` list/tuple of two elements representing
[height, width] of the desired actual output image size.
jitter: an `int` representing the maximum jittering that can be applied to
the image.
letter_box: a `bool` representing if letterboxing should be applied.
random_pad: a `bool` representing if random padding should be applied.
crop_only: a `bool` representing if only cropping will be applied.
shiftx: a `float` indicating if the image is in the left or right.
shifty: a `float` value indicating if the image is in the top or bottom.
cut: a `float` value indicating the desired center of the final patched
image.
method: function to resize input image to scaled image.
seed: seed for random scale jittering.
Returns:
image_: a `Tensor` of shape [height, width, 3] where [height, width]
equals to `desired_size`.
infos: a 2D `Tensor` that encodes the information of the image and the
applied preprocessing. It is in the format of
[[original_height, original_width], [desired_height, desired_width],
[y_scale, x_scale], [y_offset, x_offset]], where [desired_height,
desired_width] is the actual scaled image size, and [y_scale, x_scale] is
the scaling factor, which is the ratio of
scaled dimension / original dimension.
cast([original_width, original_height, width, height, ptop, pleft, pbottom,
pright], tf.float32): a `Tensor` containing the information of the image
andthe applied preprocessing.
"""
def intersection(a, b):
"""Finds the intersection between 2 crops."""
minx = tf.maximum(a[0], b[0])
miny = tf.maximum(a[1], b[1])
maxx = tf.minimum(a[2], b[2])
maxy = tf.minimum(a[3], b[3])
return tf.convert_to_tensor([minx, miny, maxx, maxy])
def cast(values, dtype):
return [tf.cast(value, dtype) for value in values]
if jitter > 0.5 or jitter < 0:
raise ValueError('maximum change in aspect ratio must be between 0 and 0.5')
with tf.name_scope('resize_and_jitter_image'):
# Cast all parameters to a usable float data type.
jitter = tf.cast(jitter, tf.float32)
original_dtype, original_dims = image.dtype, tf.shape(image)[:2]
# original width, original height, desigered width, desired height
original_width, original_height, width, height = cast(
[original_dims[1], original_dims[0], desired_size[1], desired_size[0]],
tf.float32)
# Compute the random delta width and height etc. and randomize the
# location of the corner points.
jitter_width = original_width * jitter
jitter_height = original_height * jitter
pleft = random_uniform_strong(
-jitter_width, jitter_width, jitter_width.dtype, seed=seed)
pright = random_uniform_strong(
-jitter_width, jitter_width, jitter_width.dtype, seed=seed)
ptop = random_uniform_strong(
-jitter_height, jitter_height, jitter_height.dtype, seed=seed)
pbottom = random_uniform_strong(
-jitter_height, jitter_height, jitter_height.dtype, seed=seed)
# Letter box the image.
if letter_box:
(image_aspect_ratio,
input_aspect_ratio) = original_width / original_height, width / height
distorted_aspect = image_aspect_ratio / input_aspect_ratio
delta_h, delta_w = 0.0, 0.0
pullin_h, pullin_w = 0.0, 0.0
if distorted_aspect > 1:
delta_h = ((original_width / input_aspect_ratio) - original_height) / 2
else:
delta_w = ((original_height * input_aspect_ratio) - original_width) / 2
ptop = ptop - delta_h - pullin_h
pbottom = pbottom - delta_h - pullin_h
pright = pright - delta_w - pullin_w
pleft = pleft - delta_w - pullin_w
# Compute the width and height to crop or pad too, and clip all crops to
# to be contained within the image.
swidth = original_width - pleft - pright
sheight = original_height - ptop - pbottom
src_crop = intersection([ptop, pleft, sheight + ptop, swidth + pleft],
[0, 0, original_height, original_width])
# Random padding used for mosaic.
h_ = src_crop[2] - src_crop[0]
w_ = src_crop[3] - src_crop[1]
if random_pad:
rmh = tf.maximum(0.0, -ptop)
rmw = tf.maximum(0.0, -pleft)
else:
rmw = (swidth - w_) * shiftx
rmh = (sheight - h_) * shifty
# Cast cropping params to usable dtype.
src_crop = tf.cast(src_crop, tf.int32)
# Compute padding parmeters.
dst_shape = [rmh, rmw, rmh + h_, rmw + w_]
ptop, pleft, pbottom, pright = dst_shape
pad = dst_shape * tf.cast([1, 1, -1, -1], ptop.dtype)
pad += tf.cast([0, 0, sheight, swidth], ptop.dtype)
pad = tf.cast(pad, tf.int32)
infos = []
# Crop the image to desired size.
cropped_image = tf.slice(
image, [src_crop[0], src_crop[1], 0],
[src_crop[2] - src_crop[0], src_crop[3] - src_crop[1], -1])
crop_info = tf.stack([
tf.cast(original_dims, tf.float32),
tf.cast(tf.shape(cropped_image)[:2], dtype=tf.float32),
tf.ones_like(original_dims, dtype=tf.float32),
tf.cast(src_crop[:2], tf.float32)
])
infos.append(crop_info)
if crop_only:
if not letter_box:
h_, w_ = cast(get_image_shape(cropped_image), width.dtype)
width = tf.cast(tf.round((w_ * width) / swidth), tf.int32)
height = tf.cast(tf.round((h_ * height) / sheight), tf.int32)
cropped_image = tf.image.resize(
cropped_image, [height, width], method=method)
cropped_image = tf.cast(cropped_image, original_dtype)
return cropped_image, infos, cast([
original_width, original_height, width, height, ptop, pleft, pbottom,
pright
], tf.int32)
# Pad the image to desired size.
image_ = tf.pad(
cropped_image, [[pad[0], pad[2]], [pad[1], pad[3]], [0, 0]],
constant_values=PAD_VALUE)
# Pad and scale info
isize = tf.cast(tf.shape(image_)[:2], dtype=tf.float32)
osize = tf.cast((desired_size[0], desired_size[1]), dtype=tf.float32)
pad_info = tf.stack([
tf.cast(tf.shape(cropped_image)[:2], tf.float32),
osize,
osize/isize,
(-tf.cast(pad[:2], tf.float32)*osize/isize)
])
infos.append(pad_info)
temp = tf.shape(image_)[:2]
cond = temp > tf.cast(desired_size, temp.dtype)
if tf.reduce_any(cond):
size = tf.cast(desired_size, temp.dtype)
size = tf.where(cond, size, temp)
image_ = tf.image.resize(
image_, (size[0], size[1]), method=tf.image.ResizeMethod.AREA)
image_ = tf.cast(image_, original_dtype)
image_ = tf.image.resize(
image_, (desired_size[0], desired_size[1]),
method=tf.image.ResizeMethod.BILINEAR,
antialias=False)
image_ = tf.cast(image_, original_dtype)
if cut is not None:
image_, crop_info = mosaic_cut(image_, original_width, original_height,
width, height, cut, ptop, pleft, pbottom,
pright, shiftx, shifty)
infos.append(crop_info)
return image_, infos, cast([
original_width, original_height, width, height, ptop, pleft, pbottom,
pright
], tf.float32)
def _build_transform(image,
perspective=0.00,
degrees=0.0,
scale_min=1.0,
scale_max=1.0,
translate=0.0,
random_pad=False,
desired_size=None,
seed=None):
"""Builds a unified affine transformation to spatially augment the image."""
height, width = get_image_shape(image)
ch = height = tf.cast(height, tf.float32)
cw = width = tf.cast(width, tf.float32)
deg_to_rad = lambda x: tf.cast(x, tf.float32) * np.pi / 180.0
if desired_size is not None:
desired_size = tf.cast(desired_size, tf.float32)
ch = desired_size[0]
cw = desired_size[1]
# Compute the center of the image in the output resulution.
center = tf.eye(3, dtype=tf.float32)
center = tf.tensor_scatter_nd_update(center, [[0, 2], [1, 2]],
[-cw / 2, -ch / 2])
center_boxes = tf.tensor_scatter_nd_update(center, [[0, 2], [1, 2]],
[cw / 2, ch / 2])
# Compute a random rotation to apply.
rotation = tf.eye(3, dtype=tf.float32)
a = deg_to_rad(random_uniform_strong(-degrees, degrees, seed=seed))
cos = tf.math.cos(a)
sin = tf.math.sin(a)
rotation = tf.tensor_scatter_nd_update(rotation,
[[0, 0], [0, 1], [1, 0], [1, 1]],
[cos, -sin, sin, cos])
rotation_boxes = tf.tensor_scatter_nd_update(rotation,
[[0, 0], [0, 1], [1, 0], [1, 1]],
[cos, sin, -sin, cos])
# Compute a random prespective change to apply.
prespective_warp = tf.eye(3)
px = random_uniform_strong(-perspective, perspective, seed=seed)
py = random_uniform_strong(-perspective, perspective, seed=seed)
prespective_warp = tf.tensor_scatter_nd_update(prespective_warp,
[[2, 0], [2, 1]], [px, py])
prespective_warp_boxes = tf.tensor_scatter_nd_update(prespective_warp,
[[2, 0], [2, 1]],
[-px, -py])
# Compute a random scaling to apply.
scale = tf.eye(3, dtype=tf.float32)
s = random_uniform_strong(scale_min, scale_max, seed=seed)
scale = tf.tensor_scatter_nd_update(scale, [[0, 0], [1, 1]], [1 / s, 1 / s])
scale_boxes = tf.tensor_scatter_nd_update(scale, [[0, 0], [1, 1]], [s, s])
# Compute a random Translation to apply.
translation = tf.eye(3)
if (random_pad and height * s < ch and width * s < cw):
# The image is contained within the image and arbitrarily translated to
# locations with in the image.
center = center_boxes = tf.eye(3, dtype=tf.float32)
tx = random_uniform_strong(-1, 0, seed=seed) * (cw / s - width)
ty = random_uniform_strong(-1, 0, seed=seed) * (ch / s - height)
else:
# The image can be translated outside of the output resolution window
# but the image is translated relative to the output resolution not the
# input image resolution.
tx = random_uniform_strong(0.5 - translate, 0.5 + translate, seed=seed)
ty = random_uniform_strong(0.5 - translate, 0.5 + translate, seed=seed)
# Center and Scale the image such that the window of translation is
# contained to the output resolution.
dx, dy = (width - cw / s) / width, (height - ch / s) / height
sx, sy = 1 - dx, 1 - dy
bx, by = dx / 2, dy / 2
tx, ty = bx + (sx * tx), by + (sy * ty)
# Scale the translation to width and height of the image.
tx *= width
ty *= height
translation = tf.tensor_scatter_nd_update(translation, [[0, 2], [1, 2]],
[tx, ty])
translation_boxes = tf.tensor_scatter_nd_update(translation, [[0, 2], [1, 2]],
[-tx, -ty])
# Use repeated matric multiplications to combine all the image transforamtions
# into a single unified augmentation operation M is applied to the image
# Mb is to apply to the boxes. The order of matrix multiplication is
# important. First, Translate, then Scale, then Rotate, then Center, then
# finally alter the Prepsective.
affine = (translation @ scale @ rotation @ center @ prespective_warp)
affine_boxes = (
prespective_warp_boxes @ center_boxes @ rotation_boxes @ scale_boxes
@ translation_boxes)
return affine, affine_boxes, s
def affine_warp_image(image,
desired_size,
perspective=0.00,
degrees=0.0,
scale_min=1.0,
scale_max=1.0,
translate=0.0,
random_pad=False,
seed=None):
"""Applies random spatial augmentation to the image.
Args:
image: A `Tensor` for the image.
desired_size: A `tuple` for desired output image size.
perspective: An `int` for the maximum that can be applied to random
perspective change.
degrees: An `int` for the maximum degrees that can be applied to random
rotation.
scale_min: An `int` for the minimum scaling factor that can be applied to
random scaling.
scale_max: An `int` for the maximum scaling factor that can be applied to
random scaling.
translate: An `int` for the maximum translation that can be applied to
random translation.
random_pad: A `bool` for using random padding.
seed: An `Optional[int]` for the seed to use in random number generation.
Returns:
image: A `Tensor` representing the augmented image.
affine_matrix: A `Tensor` representing the augmenting matrix for the image.
affine_info: A `List` containing the size of the original image, the desired
output_size of the image and the augmenting matrix for the boxes.
"""
# Build an image transformation matrix.
image_size = tf.cast(get_image_shape(image), tf.float32)
affine_matrix, affine_boxes, _ = _build_transform(
image,
perspective=perspective,
degrees=degrees,
scale_min=scale_min,
scale_max=scale_max,
translate=translate,
random_pad=random_pad,
desired_size=desired_size,
seed=seed)
affine = tf.reshape(affine_matrix, [-1])
affine = tf.cast(affine[:-1], tf.float32)
# Apply the transformation to image.
image = augment.transform(
image,
affine,
fill_value=PAD_VALUE,
output_shape=desired_size,
interpolation='bilinear',
fill_mode='constant',
)
desired_size = tf.cast(desired_size, tf.float32)
affine_info = [image_size, desired_size, affine_boxes]
return image, affine_matrix, affine_info
def affine_warp_boxes(affine, boxes, output_size, box_history):
"""Applies random rotation, random perspective change and random translation.
and random scaling to the boxes.
Args:
affine: A `Tensor` for the augmenting matrix for the boxes.
boxes: A `Tensor` for the boxes.
output_size: A `list` of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
box_history: A `Tensor` for the boxes history, which are the boxes that
undergo the same augmentations as `boxes`, but no clipping was applied. We
can keep track of how much changes are done to the boxes by keeping track
of this tensor.
Returns:
clipped_boxes: A `Tensor` representing the augmented boxes.
box_history: A `Tensor` representing the augmented box_history.
"""
def _get_corners(box):
"""Get the corner of each box as a tuple of (x, y) coordinates."""
ymi, xmi, yma, xma = tf.split(box, 4, axis=-1)
tl = tf.concat([xmi, ymi], axis=-1)
bl = tf.concat([xmi, yma], axis=-1)
tr = tf.concat([xma, ymi], axis=-1)
br = tf.concat([xma, yma], axis=-1)
return tf.concat([tl, bl, tr, br], axis=-1)
def _corners_to_boxes(corner):
"""Convert (x, y) corners back into boxes [ymin, xmin, ymax, xmax]."""
corner = tf.reshape(corner, [-1, 4, 2])
y = corner[..., 1]
x = corner[..., 0]
y_min = tf.reduce_min(y, axis=-1)
x_min = tf.reduce_min(x, axis=-1)
y_max = tf.reduce_max(y, axis=-1)
x_max = tf.reduce_max(x, axis=-1)
return tf.stack([y_min, x_min, y_max, x_max], axis=-1)
def _aug_boxes(affine_matrix, box):
"""Apply an affine transformation matrix M to the boxes augment boxes."""
corners = _get_corners(box)
corners = tf.reshape(corners, [-1, 4, 2])
z = tf.expand_dims(tf.ones_like(corners[..., 1]), axis=-1)
corners = tf.concat([corners, z], axis=-1)
corners = tf.transpose(
tf.matmul(affine_matrix, corners, transpose_b=True), perm=(0, 2, 1))
corners, p = tf.split(corners, [2, 1], axis=-1)
corners /= p
corners = tf.reshape(corners, [-1, 8])
box = _corners_to_boxes(corners)
return box
boxes = _aug_boxes(affine, boxes)
box_history = _aug_boxes(affine, box_history)
clipped_boxes = bbox_ops.clip_boxes(boxes, output_size)
return clipped_boxes, box_history
def boxes_candidates(clipped_boxes,
box_history,
wh_thr=2,
ar_thr=20,
area_thr=0.1):
"""Filters the boxes that don't satisfy the width/height and area constraints.
Args:
clipped_boxes: A `Tensor` for the boxes.
box_history: A `Tensor` for the boxes history, which are the boxes that
undergo the same augmentations as `boxes`, but no clipping was applied. We
can keep track of how much changes are done to the boxes by keeping track
of this tensor.
wh_thr: An `int` for the width/height threshold.
ar_thr: An `int` for the aspect ratio threshold.
area_thr: An `int` for the area threshold.
Returns:
indices[:, 0]: A `Tensor` representing valid boxes after filtering.
"""
if area_thr == 0.0:
wh_thr = 0
ar_thr = np.inf
area_thr = tf.math.abs(area_thr)
# Get the scaled and shifted heights of the original
# unclipped boxes.
og_height = tf.maximum(box_history[:, 2] - box_history[:, 0], 0.0)
og_width = tf.maximum(box_history[:, 3] - box_history[:, 1], 0.0)
# Get the scaled and shifted heights of the clipped boxes.
clipped_height = tf.maximum(clipped_boxes[:, 2] - clipped_boxes[:, 0], 0.0)
clipped_width = tf.maximum(clipped_boxes[:, 3] - clipped_boxes[:, 1], 0.0)
# Determine the aspect ratio of the clipped boxes.
ar = tf.maximum(clipped_width / (clipped_height + 1e-16),
clipped_height / (clipped_width + 1e-16))
# Ensure the clipped width adn height are larger than a preset threshold.
conda = clipped_width >= wh_thr
condb = clipped_height >= wh_thr
# Ensure the area of the clipped box is larger than the area threshold.
area = (clipped_height * clipped_width) / (og_width * og_height + 1e-16)
condc = area > area_thr
# Ensure the aspect ratio is not too extreme.
condd = ar < ar_thr
cond = tf.expand_dims(
tf.logical_and(
tf.logical_and(conda, condb), tf.logical_and(condc, condd)),
axis=-1)
# Set all the boxes that fail the test to be equal to zero.
indices = tf.where(cond)
return indices[:, 0]
def resize_and_crop_boxes(boxes, image_scale, output_size, offset, box_history):
"""Resizes and crops the boxes.
Args:
boxes: A `Tensor` for the boxes.
image_scale: A `Tensor` for the scaling factor of the image.
output_size: A `list` of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
offset: A `Tensor` for how much translation was applied to the image.
box_history: A `Tensor` for the boxes history, which are the boxes that
undergo the same augmentations as `boxes`, but no clipping was applied. We
can keep track of how much changes are done to the boxes by keeping track
of this tensor.
Returns:
clipped_boxes: A `Tensor` representing the augmented boxes.
box_history: A `Tensor` representing the augmented box_history.
"""
# Shift and scale the input boxes.
boxes *= tf.tile(tf.expand_dims(image_scale, axis=0), [1, 2])
boxes -= tf.tile(tf.expand_dims(offset, axis=0), [1, 2])
# Check the hitory of the boxes.
box_history *= tf.tile(tf.expand_dims(image_scale, axis=0), [1, 2])
box_history -= tf.tile(tf.expand_dims(offset, axis=0), [1, 2])
# Clip the shifted and scaled boxes.
clipped_boxes = bbox_ops.clip_boxes(boxes, output_size)
return clipped_boxes, box_history
def transform_and_clip_boxes(boxes,
infos,
affine=None,
shuffle_boxes=False,
area_thresh=0.1,
seed=None,
filter_and_clip_boxes=True):
"""Clips and cleans the boxes.
Args:
boxes: A `Tensor` for the boxes.
infos: A `list` that contains the image infos.
affine: A `list` that contains parameters for resize and crop.
shuffle_boxes: A `bool` for shuffling the boxes.
area_thresh: An `int` for the area threshold.
seed: seed for random number generation.
filter_and_clip_boxes: A `bool` for filtering and clipping the boxes to
[0, 1].
Returns:
boxes: A `Tensor` representing the augmented boxes.
ind: A `Tensor` valid box indices.
"""
# Clip and clean boxes.
def get_valid_boxes(boxes):
"""Get indices for non-empty boxes."""
# Convert the boxes to center width height formatting.
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
base = tf.logical_and(tf.greater(height, 0), tf.greater(width, 0))
return base
# Initialize history to track operation applied to boxes
box_history = boxes
# Make sure all boxes are valid to start, clip to [0, 1] and get only the
# valid boxes.
output_size = None
if filter_and_clip_boxes:
boxes = tf.math.maximum(tf.math.minimum(boxes, 1.0), 0.0)
cond = get_valid_boxes(boxes)
if infos is None:
infos = []
for info in infos:
# Denormalize the boxes.
boxes = bbox_ops.denormalize_boxes(boxes, info[0])
box_history = bbox_ops.denormalize_boxes(box_history, info[0])
# Shift and scale all boxes, and keep track of box history with no
# box clipping, history is used for removing boxes that have become
# too small or exit the image area.
(boxes, box_history) = resize_and_crop_boxes(
boxes, info[2, :], info[1, :], info[3, :], box_history=box_history)
# Get all the boxes that still remain in the image and store
# in a bit vector for later use.
cond = tf.logical_and(get_valid_boxes(boxes), cond)
# Normalize the boxes to [0, 1].
output_size = info[1]
boxes = bbox_ops.normalize_boxes(boxes, output_size)
box_history = bbox_ops.normalize_boxes(box_history, output_size)
if affine is not None:
# Denormalize the boxes.
boxes = bbox_ops.denormalize_boxes(boxes, affine[0])
box_history = bbox_ops.denormalize_boxes(box_history, affine[0])
# Clipped final boxes.
(boxes, box_history) = affine_warp_boxes(
affine[2], boxes, affine[1], box_history=box_history)
# Get all the boxes that still remain in the image and store
# in a bit vector for later use.
cond = tf.logical_and(get_valid_boxes(boxes), cond)
# Normalize the boxes to [0, 1].
output_size = affine[1]
boxes = bbox_ops.normalize_boxes(boxes, output_size)
box_history = bbox_ops.normalize_boxes(box_history, output_size)
# Remove the bad boxes.
boxes *= tf.cast(tf.expand_dims(cond, axis=-1), boxes.dtype)
# Threshold the existing boxes.
if filter_and_clip_boxes:
if output_size is not None:
boxes_ = bbox_ops.denormalize_boxes(boxes, output_size)
box_history_ = bbox_ops.denormalize_boxes(box_history, output_size)
inds = boxes_candidates(boxes_, box_history_, area_thr=area_thresh)
else:
inds = boxes_candidates(
boxes, box_history, wh_thr=0.0, area_thr=area_thresh)
# Select and gather the good boxes.
if shuffle_boxes:
inds = tf.random.shuffle(inds, seed=seed)
else:
inds = bbox_ops.get_non_empty_box_indices(boxes)
boxes = tf.gather(boxes, inds)
return boxes, inds
| 36,299 | 37.412698 | 80 | py |
models | models-master/official/projects/yolo/ops/box_ops.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Yolo box ops."""
import math
import tensorflow as tf
from official.projects.yolo.ops import math_ops
def yxyx_to_xcycwh(box: tf.Tensor):
"""Converts boxes from yxyx to x_center, y_center, width, height.
Args:
box: any `Tensor` whose last dimension is 4 representing the coordinates of
boxes in ymin, xmin, ymax, xmax.
Returns:
box: a `Tensor` whose shape is the same as `box` in new format.
"""
with tf.name_scope('yxyx_to_xcycwh'):
ymin, xmin, ymax, xmax = tf.split(box, 4, axis=-1)
x_center = (xmax + xmin) / 2
y_center = (ymax + ymin) / 2
width = xmax - xmin
height = ymax - ymin
box = tf.concat([x_center, y_center, width, height], axis=-1)
return box
def xcycwh_to_yxyx(box: tf.Tensor):
"""Converts boxes from x_center, y_center, width, height to yxyx format.
Args:
box: any `Tensor` whose last dimension is 4 representing the coordinates of
boxes in x_center, y_center, width, height.
Returns:
box: a `Tensor` whose shape is the same as `box` in new format.
"""
with tf.name_scope('xcycwh_to_yxyx'):
xy, wh = tf.split(box, 2, axis=-1)
xy_min = xy - wh / 2
xy_max = xy + wh / 2
x_min, y_min = tf.split(xy_min, 2, axis=-1)
x_max, y_max = tf.split(xy_max, 2, axis=-1)
box = tf.concat([y_min, x_min, y_max, x_max], axis=-1)
return box
def intersect_and_union(box1, box2, yxyx=False):
"""Calculates the intersection and union between box1 and box2.
Args:
box1: any `Tensor` whose last dimension is 4 representing the coordinates of
boxes.
box2: any `Tensor` whose last dimension is 4 representing the coordinates of
boxes.
yxyx: a `bool` indicating whether the input box is of the format x_center
y_center, width, height or y_min, x_min, y_max, x_max.
Returns:
intersection: a `Tensor` who represents the intersection.
union: a `Tensor` who represents the union.
"""
if not yxyx:
box1_area = tf.reduce_prod(tf.split(box1, 2, axis=-1)[-1], axis=-1)
box2_area = tf.reduce_prod(tf.split(box2, 2, axis=-1)[-1], axis=-1)
box1 = xcycwh_to_yxyx(box1)
box2 = xcycwh_to_yxyx(box2)
b1mi, b1ma = tf.split(box1, 2, axis=-1)
b2mi, b2ma = tf.split(box2, 2, axis=-1)
intersect_mins = tf.math.maximum(b1mi, b2mi)
intersect_maxes = tf.math.minimum(b1ma, b2ma)
intersect_wh = tf.math.maximum(intersect_maxes - intersect_mins, 0.0)
intersection = tf.reduce_prod(intersect_wh, axis=-1)
if yxyx:
box1_area = tf.reduce_prod(b1ma - b1mi, axis=-1)
box2_area = tf.reduce_prod(b2ma - b2mi, axis=-1)
union = box1_area + box2_area - intersection
return intersection, union
def smallest_encompassing_box(box1, box2, yxyx=False, clip=False):
"""Calculates the smallest box that encompasses box1 and box2.
Args:
box1: any `Tensor` whose last dimension is 4 representing the coordinates of
boxes.
box2: any `Tensor` whose last dimension is 4 representing the coordinates of
boxes.
yxyx: a `bool` indicating whether the input box is of the format x_center
y_center, width, height or y_min, x_min, y_max, x_max.
clip: a `bool`, whether or not to clip boxes.
Returns:
box_c: a `Tensor` whose last dimension is 4 representing the coordinates of
boxes, the return format is y_min, x_min, y_max, x_max if yxyx is set to
to True. In other words it will match the input format.
"""
if not yxyx:
box1 = xcycwh_to_yxyx(box1)
box2 = xcycwh_to_yxyx(box2)
b1mi, b1ma = tf.split(box1, 2, axis=-1)
b2mi, b2ma = tf.split(box2, 2, axis=-1)
bcmi = tf.math.minimum(b1mi, b2mi)
bcma = tf.math.maximum(b1ma, b2ma)
box_c = tf.concat([bcmi, bcma], axis=-1)
if not yxyx:
box_c = yxyx_to_xcycwh(box_c)
if clip:
bca = tf.reduce_prod(bcma - bcmi, keepdims=True, axis=-1)
box_c = tf.where(bca <= 0.0, tf.zeros_like(box_c), box_c)
return bcmi, bcma, box_c
def compute_iou(box1, box2, yxyx=False):
"""Calculates the intersection over union between box1 and box2.
Args:
box1: any `Tensor` whose last dimension is 4 representing the coordinates of
boxes.
box2: any `Tensor` whose last dimension is 4 representing the coordinates of
boxes.
yxyx: a `bool` indicating whether the input box is of the format x_center
y_center, width, height or y_min, x_min, y_max, x_max.
Returns:
iou: a `Tensor` who represents the intersection over union.
"""
with tf.name_scope('iou'):
intersection, union = intersect_and_union(box1, box2, yxyx=yxyx)
iou = math_ops.divide_no_nan(intersection, union)
return iou
def compute_giou(box1, box2, yxyx=False):
"""Calculates the General intersection over union between box1 and box2.
Args:
box1: any `Tensor` whose last dimension is 4 representing the coordinates of
boxes.
box2: any `Tensor` whose last dimension is 4 representing the coordinates of
boxes.
yxyx: a `bool` indicating whether the input box is of the format x_center
y_center, width, height or y_min, x_min, y_max, x_max.
Returns:
giou: a `Tensor` who represents the General intersection over union.
"""
with tf.name_scope('giou'):
if not yxyx:
yxyx1 = xcycwh_to_yxyx(box1)
yxyx2 = xcycwh_to_yxyx(box2)
else:
yxyx1, yxyx2 = box1, box2
cmi, cma, _ = smallest_encompassing_box(yxyx1, yxyx2, yxyx=True)
intersection, union = intersect_and_union(yxyx1, yxyx2, yxyx=True)
iou = math_ops.divide_no_nan(intersection, union)
bcwh = cma - cmi
c = tf.math.reduce_prod(bcwh, axis=-1)
regularization = math_ops.divide_no_nan((c - union), c)
giou = iou - regularization
return iou, giou
def compute_diou(box1, box2, beta=1.0, yxyx=False):
"""Calculates the distance intersection over union between box1 and box2.
Args:
box1: any `Tensor` whose last dimension is 4 representing the coordinates of
boxes.
box2: any `Tensor` whose last dimension is 4 representing the coordinates of
boxes.
beta: a `float` indicating the amount to scale the distance iou
regularization term.
yxyx: a `bool` indicating whether the input box is of the format x_center
y_center, width, height or y_min, x_min, y_max, x_max.
Returns:
diou: a `Tensor` who represents the distance intersection over union.
"""
with tf.name_scope('diou'):
# compute center distance
if not yxyx:
xycc1, xycc2 = box1, box2
yxyx1 = xcycwh_to_yxyx(box1)
yxyx2 = xcycwh_to_yxyx(box2)
else:
yxyx1, yxyx2 = box1, box2
xycc1 = yxyx_to_xcycwh(box1)
xycc2 = yxyx_to_xcycwh(box2)
cmi, cma, _ = smallest_encompassing_box(yxyx1, yxyx2, yxyx=True)
intersection, union = intersect_and_union(yxyx1, yxyx2, yxyx=True)
iou = math_ops.divide_no_nan(intersection, union)
b1xy, _ = tf.split(xycc1, 2, axis=-1)
b2xy, _ = tf.split(xycc2, 2, axis=-1)
bcwh = cma - cmi
center_dist = tf.reduce_sum((b1xy - b2xy)**2, axis=-1)
c_diag = tf.reduce_sum(bcwh**2, axis=-1)
regularization = math_ops.divide_no_nan(center_dist, c_diag)
diou = iou - regularization**beta
return iou, diou
def compute_ciou(box1, box2, yxyx=False, darknet=False):
"""Calculates the complete intersection over union between box1 and box2.
Args:
box1: any `Tensor` whose last dimension is 4 representing the coordinates of
boxes.
box2: any `Tensor` whose last dimension is 4 representing the coordinates of
boxes.
yxyx: a `bool` indicating whether the input box is of the format x_center
y_center, width, height or y_min, x_min, y_max, x_max.
darknet: a `bool` indicating whether the calling function is the YOLO
darknet loss.
Returns:
ciou: a `Tensor` who represents the complete intersection over union.
"""
with tf.name_scope('ciou'):
if not yxyx:
xycc1, xycc2 = box1, box2
yxyx1 = xcycwh_to_yxyx(box1)
yxyx2 = xcycwh_to_yxyx(box2)
else:
yxyx1, yxyx2 = box1, box2
xycc1 = yxyx_to_xcycwh(box1)
xycc2 = yxyx_to_xcycwh(box2)
# Build the smallest encomapssing box.
cmi, cma, _ = smallest_encompassing_box(yxyx1, yxyx2, yxyx=True)
intersection, union = intersect_and_union(yxyx1, yxyx2, yxyx=True)
iou = math_ops.divide_no_nan(intersection, union)
b1xy, b1w, b1h = tf.split(xycc1, [2, 1, 1], axis=-1)
b2xy, b2w, b2h = tf.split(xycc2, [2, 1, 1], axis=-1)
bchw = cma - cmi
# Center regularization
center_dist = tf.reduce_sum((b1xy - b2xy)**2, axis=-1)
c_diag = tf.reduce_sum(bchw**2, axis=-1)
regularization = math_ops.divide_no_nan(center_dist, c_diag)
# Computer aspect ratio consistency
terma = math_ops.divide_no_nan(b1w, b1h) # gt
termb = math_ops.divide_no_nan(b2w, b2h) # pred
arcterm = tf.squeeze(
tf.math.pow(tf.math.atan(termb) - tf.math.atan(terma), 2), axis=-1)
v = (4 / math.pi**2) * arcterm
# Compute the aspect ratio weight, should be treated as a constant
a = tf.stop_gradient(math_ops.divide_no_nan(v, 1 - iou + v))
if darknet:
grad_scale = tf.stop_gradient(tf.square(b2w) + tf.square(b2h))
v *= tf.squeeze(grad_scale, axis=-1)
ciou = iou - regularization - (v * a)
return iou, ciou
def aggregated_comparitive_iou(boxes1, boxes2=None, iou_type=0, beta=0.6):
"""Calculates the IOU between two set of boxes.
Similar to bbox_overlap but far more versitile.
Args:
boxes1: a `Tensor` of shape [batch size, N, 4] representing the coordinates
of boxes.
boxes2: a `Tensor` of shape [batch size, N, 4] representing the coordinates
of boxes.
iou_type: `integer` representing the iou version to use, 0 is distance iou,
1 is the general iou, 2 is the complete iou, any other number uses the
standard iou.
beta: `float` for the scaling quantity to apply to distance iou
regularization.
Returns:
iou: a `Tensor` who represents the intersection over union in of the
expected/input type.
"""
boxes1 = tf.expand_dims(boxes1, axis=-2)
if boxes2 is not None:
boxes2 = tf.expand_dims(boxes2, axis=-3)
else:
boxes2 = tf.transpose(boxes1, perm=(0, 2, 1, 3))
if iou_type == 0 or iou_type == 'diou': # diou
_, iou = compute_diou(boxes1, boxes2, beta=beta, yxyx=True)
elif iou_type == 1 or iou_type == 'giou': # giou
_, iou = compute_giou(boxes1, boxes2, yxyx=True)
elif iou_type == 2 or iou_type == 'ciou': # ciou
_, iou = compute_ciou(boxes1, boxes2, yxyx=True)
else:
iou = compute_iou(boxes1, boxes2, yxyx=True)
return iou
| 11,193 | 33.656347 | 80 | py |
models | models-master/official/projects/yolo/ops/anchor.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Yolo Anchor labler."""
import numpy as np
import tensorflow as tf
from official.projects.yolo.ops import box_ops
from official.projects.yolo.ops import loss_utils
from official.projects.yolo.ops import preprocessing_ops
INF = 10000000
def get_best_anchor(y_true,
anchors,
stride,
width=1,
height=1,
iou_thresh=0.25,
best_match_only=False,
use_tie_breaker=True):
"""Get the correct anchor that is assoiciated with each box using IOU.
Args:
y_true: tf.Tensor[] for the list of bounding boxes in the yolo format.
anchors: list or tensor for the anchor boxes to be used in prediction found
via Kmeans.
stride: `int` stride for the anchors.
width: int for the image width.
height: int for the image height.
iou_thresh: `float` the minimum iou threshold to use for selecting boxes for
each level.
best_match_only: `bool` if the box only has one match and it is less than
the iou threshold, when set to True, this match will be dropped as no
anchors can be linked to it.
use_tie_breaker: `bool` if there is many anchors for a given box, then
attempt to use all of them, if False, only the first matching box will be
used.
Returns:
tf.Tensor: y_true with the anchor associated with each ground truth box
known
"""
with tf.name_scope('get_best_anchor'):
width = tf.cast(width, dtype=tf.float32)
height = tf.cast(height, dtype=tf.float32)
scaler = tf.convert_to_tensor([width, height])
# scale to levels houts width and height
true_wh = tf.cast(y_true[..., 2:4], dtype=tf.float32) * scaler
# scale down from large anchor to small anchor type
anchors = tf.cast(anchors, dtype=tf.float32) / stride
k = tf.shape(anchors)[0]
anchors = tf.concat([tf.zeros_like(anchors), anchors], axis=-1)
truth_comp = tf.concat([tf.zeros_like(true_wh), true_wh], axis=-1)
if iou_thresh >= 1.0:
anchors = tf.expand_dims(anchors, axis=-2)
truth_comp = tf.expand_dims(truth_comp, axis=-3)
aspect = truth_comp[..., 2:4] / anchors[..., 2:4]
aspect = tf.where(tf.math.is_nan(aspect), tf.zeros_like(aspect), aspect)
aspect = tf.maximum(aspect, 1 / aspect)
aspect = tf.where(tf.math.is_nan(aspect), tf.zeros_like(aspect), aspect)
aspect = tf.reduce_max(aspect, axis=-1)
values, indexes = tf.math.top_k(
tf.transpose(-aspect, perm=[1, 0]),
k=tf.cast(k, dtype=tf.int32),
sorted=True)
values = -values
ind_mask = tf.cast(values < iou_thresh, dtype=indexes.dtype)
else:
truth_comp = box_ops.xcycwh_to_yxyx(truth_comp)
anchors = box_ops.xcycwh_to_yxyx(anchors)
iou_raw = box_ops.aggregated_comparitive_iou(
truth_comp,
anchors,
iou_type=3,
)
values, indexes = tf.math.top_k(
iou_raw, k=tf.cast(k, dtype=tf.int32), sorted=True)
ind_mask = tf.cast(values >= iou_thresh, dtype=indexes.dtype)
# pad the indexs such that all values less than the thresh are -1
# add one, multiply the mask to zeros all the bad locations
# subtract 1 makeing all the bad locations 0.
if best_match_only:
iou_index = ((indexes[..., 0:] + 1) * ind_mask[..., 0:]) - 1
elif use_tie_breaker:
iou_index = tf.concat([
tf.expand_dims(indexes[..., 0], axis=-1),
((indexes[..., 1:] + 1) * ind_mask[..., 1:]) - 1
],
axis=-1)
else:
iou_index = tf.concat([
tf.expand_dims(indexes[..., 0], axis=-1),
tf.zeros_like(indexes[..., 1:]) - 1
],
axis=-1)
return tf.cast(iou_index, dtype=tf.float32), tf.cast(values, dtype=tf.float32)
class YoloAnchorLabeler:
"""Anchor labeler for the Yolo Models."""
def __init__(self,
anchors=None,
anchor_free_level_limits=None,
level_strides=None,
center_radius=None,
max_num_instances=200,
match_threshold=0.25,
best_matches_only=False,
use_tie_breaker=True,
darknet=False,
dtype='float32'):
"""Initialization for anchor labler.
Args:
anchors: `Dict[List[Union[int, float]]]` values for each anchor box.
anchor_free_level_limits: `List` the box sizes that will be allowed at
each FPN level as is done in the FCOS and YOLOX paper for anchor free
box assignment.
level_strides: `Dict[int]` for how much the model scales down the images
at the each level.
center_radius: `Dict[float]` for radius around each box center to search
for extra centers in each level.
max_num_instances: `int` for the number of boxes to compute loss on.
match_threshold: `float` indicating the threshold over which an anchor
will be considered for prediction, at zero, all the anchors will be used
and at 1.0 only the best will be used. for anchor thresholds larger than
1.0 we stop using the IOU for anchor comparison and resort directly to
comparing the width and height, this is used for the scaled models.
best_matches_only: `boolean` indicating how boxes are selected for
optimization.
use_tie_breaker: `boolean` indicating whether to use the anchor threshold
value.
darknet: `boolean` indicating which data pipeline to use. Setting to True
swaps the pipeline to output images realtive to Yolov4 and older.
dtype: `str` indicating the output datatype of the datapipeline selecting
from {"float32", "float16", "bfloat16"}.
"""
self.anchors = anchors
self.masks = self._get_mask()
self.anchor_free_level_limits = self._get_level_limits(
anchor_free_level_limits)
if darknet and self.anchor_free_level_limits is None:
center_radius = None
self.keys = self.anchors.keys()
if self.anchor_free_level_limits is not None:
maxim = 2000
match_threshold = -0.01
self.num_instances = {key: maxim for key in self.keys}
elif not darknet:
self.num_instances = {
key: (6 - i) * max_num_instances for i, key in enumerate(self.keys)
}
else:
self.num_instances = {key: max_num_instances for key in self.keys}
self.center_radius = center_radius
self.level_strides = level_strides
self.match_threshold = match_threshold
self.best_matches_only = best_matches_only
self.use_tie_breaker = use_tie_breaker
self.dtype = dtype
def _get_mask(self):
"""For each level get indexs of each anchor for box search across levels."""
masks = {}
start = 0
minimum = int(min(self.anchors.keys()))
maximum = int(max(self.anchors.keys()))
for i in range(minimum, maximum + 1):
per_scale = len(self.anchors[str(i)])
masks[str(i)] = list(range(start, per_scale + start))
start += per_scale
return masks
def _get_level_limits(self, level_limits):
"""For each level receptive feild range for anchor free box placement."""
if level_limits is not None:
level_limits_dict = {}
level_limits = [0.0] + level_limits + [np.inf]
for i, key in enumerate(self.anchors.keys()):
level_limits_dict[key] = level_limits[i:i + 2]
else:
level_limits_dict = None
return level_limits_dict
def _tie_breaking_search(self, anchors, mask, boxes, classes):
"""After search, link each anchor ind to the correct map in ground truth."""
mask = tf.cast(tf.reshape(mask, [1, 1, 1, -1]), anchors.dtype)
anchors = tf.expand_dims(anchors, axis=-1)
viable = tf.where(tf.squeeze(anchors == mask, axis=0))
gather_id, _, anchor_id = tf.split(viable, 3, axis=-1)
boxes = tf.gather_nd(boxes, gather_id)
classes = tf.gather_nd(classes, gather_id)
classes = tf.expand_dims(classes, axis=-1)
classes = tf.cast(classes, boxes.dtype)
anchor_id = tf.cast(anchor_id, boxes.dtype)
return boxes, classes, anchor_id
def _get_anchor_id(self,
key,
boxes,
classes,
width,
height,
stride,
iou_index=None):
"""Find the object anchor assignments in an anchor based paradigm."""
# find the best anchor
anchors = self.anchors[key]
num_anchors = len(anchors)
if self.best_matches_only:
# get the best anchor for each box
iou_index, _ = get_best_anchor(
boxes,
anchors,
stride,
width=width,
height=height,
best_match_only=True,
iou_thresh=self.match_threshold)
mask = range(num_anchors)
else:
# search is done across FPN levels, get the mask of anchor indexes
# corralated to this level.
mask = self.masks[key]
# search for the correct box to use
(boxes, classes,
anchors) = self._tie_breaking_search(iou_index, mask, boxes, classes)
return boxes, classes, anchors, num_anchors
def _get_centers(self, boxes, classes, anchors, width, height, scale_xy):
"""Find the object center assignments in an anchor based paradigm."""
offset = tf.cast(0.5 * (scale_xy - 1), boxes.dtype)
grid_xy, _ = tf.split(boxes, 2, axis=-1)
wh_scale = tf.cast(tf.convert_to_tensor([width, height]), boxes.dtype)
grid_xy = grid_xy * wh_scale
centers = tf.math.floor(grid_xy)
if offset != 0.0:
clamp = lambda x, ma: tf.maximum( # pylint:disable=g-long-lambda
tf.minimum(x, tf.cast(ma, x.dtype)), tf.zeros_like(x))
grid_xy_index = grid_xy - centers
positive_shift = ((grid_xy_index < offset) & (grid_xy > 1.))
negative_shift = ((grid_xy_index > (1 - offset)) & (grid_xy <
(wh_scale - 1.)))
zero, _ = tf.split(tf.ones_like(positive_shift), 2, axis=-1)
shift_mask = tf.concat([zero, positive_shift, negative_shift], axis=-1)
offset = tf.cast([[0, 0], [1, 0], [0, 1], [-1, 0], [0, -1]],
offset.dtype) * offset
num_shifts = tf.shape(shift_mask)
num_shifts = num_shifts[-1]
boxes = tf.tile(tf.expand_dims(boxes, axis=-2), [1, num_shifts, 1])
classes = tf.tile(tf.expand_dims(classes, axis=-2), [1, num_shifts, 1])
anchors = tf.tile(tf.expand_dims(anchors, axis=-2), [1, num_shifts, 1])
shift_mask = tf.cast(shift_mask, boxes.dtype)
shift_ind = shift_mask * tf.range(0, num_shifts, dtype=boxes.dtype)
shift_ind = shift_ind - (1 - shift_mask)
shift_ind = tf.expand_dims(shift_ind, axis=-1)
boxes_and_centers = tf.concat([boxes, classes, anchors, shift_ind],
axis=-1)
boxes_and_centers = tf.reshape(boxes_and_centers, [-1, 7])
_, center_ids = tf.split(boxes_and_centers, [6, 1], axis=-1)
select = tf.where(center_ids >= 0)
select, _ = tf.split(select, 2, axis=-1)
boxes_and_centers = tf.gather_nd(boxes_and_centers, select)
center_ids = tf.gather_nd(center_ids, select)
center_ids = tf.cast(center_ids, tf.int32)
shifts = tf.gather_nd(offset, center_ids)
boxes, classes, anchors, _ = tf.split(
boxes_and_centers, [4, 1, 1, 1], axis=-1)
grid_xy, _ = tf.split(boxes, 2, axis=-1)
centers = tf.math.floor(grid_xy * wh_scale - shifts)
centers = clamp(centers, wh_scale - 1)
x, y = tf.split(centers, 2, axis=-1)
centers = tf.cast(tf.concat([y, x, anchors], axis=-1), tf.int32)
return boxes, classes, centers
def _get_anchor_free(self, key, boxes, classes, height, width, stride,
center_radius):
"""Find the box assignements in an anchor free paradigm."""
level_limits = self.anchor_free_level_limits[key]
gen = loss_utils.GridGenerator(anchors=[[1, 1]], scale_anchors=stride)
grid_points = gen(width, height, 1, boxes.dtype)[0]
grid_points = tf.squeeze(grid_points, axis=0)
box_list = boxes
class_list = classes
grid_points = (grid_points + 0.5) * stride
x_centers, y_centers = grid_points[..., 0], grid_points[..., 1]
boxes *= (tf.convert_to_tensor([width, height, width, height]) * stride)
tlbr_boxes = box_ops.xcycwh_to_yxyx(boxes)
boxes = tf.reshape(boxes, [1, 1, -1, 4])
tlbr_boxes = tf.reshape(tlbr_boxes, [1, 1, -1, 4])
if self.use_tie_breaker:
area = tf.reduce_prod(boxes[..., 2:], axis=-1)
# check if the box is in the receptive feild of the this fpn level
b_t = y_centers - tlbr_boxes[..., 0]
b_l = x_centers - tlbr_boxes[..., 1]
b_b = tlbr_boxes[..., 2] - y_centers
b_r = tlbr_boxes[..., 3] - x_centers
box_delta = tf.stack([b_t, b_l, b_b, b_r], axis=-1)
if level_limits is not None:
max_reg_targets_per_im = tf.reduce_max(box_delta, axis=-1)
gt_min = max_reg_targets_per_im >= level_limits[0]
gt_max = max_reg_targets_per_im <= level_limits[1]
is_in_boxes = tf.logical_and(gt_min, gt_max)
else:
is_in_boxes = tf.reduce_min(box_delta, axis=-1) > 0.0
is_in_boxes_all = tf.reduce_any(is_in_boxes, axis=(0, 1), keepdims=True)
# check if the center is in the receptive feild of the this fpn level
c_t = y_centers - (boxes[..., 1] - center_radius * stride)
c_l = x_centers - (boxes[..., 0] - center_radius * stride)
c_b = (boxes[..., 1] + center_radius * stride) - y_centers
c_r = (boxes[..., 0] + center_radius * stride) - x_centers
centers_delta = tf.stack([c_t, c_l, c_b, c_r], axis=-1)
is_in_centers = tf.reduce_min(centers_delta, axis=-1) > 0.0
is_in_centers_all = tf.reduce_any(is_in_centers, axis=(0, 1), keepdims=True)
# colate all masks to get the final locations
is_in_index = tf.logical_or(is_in_boxes_all, is_in_centers_all)
is_in_boxes_and_center = tf.logical_and(is_in_boxes, is_in_centers)
is_in_boxes_and_center = tf.logical_and(is_in_index, is_in_boxes_and_center)
if self.use_tie_breaker:
boxes_all = tf.cast(is_in_boxes_and_center, area.dtype)
boxes_all = ((boxes_all * area) + ((1 - boxes_all) * INF))
boxes_min = tf.reduce_min(boxes_all, axis=-1, keepdims=True)
boxes_min = tf.where(boxes_min == INF, -1.0, boxes_min)
is_in_boxes_and_center = boxes_all == boxes_min
# construct the index update grid
reps = tf.reduce_sum(tf.cast(is_in_boxes_and_center, tf.int16), axis=-1)
indexes = tf.cast(tf.where(is_in_boxes_and_center), tf.int32)
y, x, t = tf.split(indexes, 3, axis=-1)
boxes = tf.gather_nd(box_list, t)
classes = tf.cast(tf.gather_nd(class_list, t), boxes.dtype)
reps = tf.gather_nd(reps, tf.concat([y, x], axis=-1))
reps = tf.cast(tf.expand_dims(reps, axis=-1), boxes.dtype)
classes = tf.cast(tf.expand_dims(classes, axis=-1), boxes.dtype)
conf = tf.ones_like(classes)
# return the samples and the indexes
samples = tf.concat([boxes, conf, classes], axis=-1)
indexes = tf.concat([y, x, tf.zeros_like(t)], axis=-1)
return indexes, samples
def build_label_per_path(self,
key,
boxes,
classes,
width,
height,
iou_index=None):
"""Builds the labels for one path."""
stride = self.level_strides[key]
scale_xy = self.center_radius[key] if self.center_radius is not None else 1
width = tf.cast(width // stride, boxes.dtype)
height = tf.cast(height // stride, boxes.dtype)
if self.anchor_free_level_limits is None:
(boxes, classes, anchors, num_anchors) = self._get_anchor_id(
key, boxes, classes, width, height, stride, iou_index=iou_index)
boxes, classes, centers = self._get_centers(boxes, classes, anchors,
width, height, scale_xy)
ind_mask = tf.ones_like(classes)
updates = tf.concat([boxes, ind_mask, classes], axis=-1)
else:
num_anchors = 1
(centers, updates) = self._get_anchor_free(key, boxes, classes, height,
width, stride, scale_xy)
boxes, ind_mask, classes = tf.split(updates, [4, 1, 1], axis=-1)
width = tf.cast(width, tf.int32)
height = tf.cast(height, tf.int32)
full = tf.zeros([height, width, num_anchors, 1], dtype=classes.dtype)
full = tf.tensor_scatter_nd_add(full, centers, ind_mask)
num_instances = int(self.num_instances[key])
centers = preprocessing_ops.pad_max_instances(
centers, num_instances, pad_value=0, pad_axis=0)
updates = preprocessing_ops.pad_max_instances(
updates, num_instances, pad_value=0, pad_axis=0)
updates = tf.cast(updates, self.dtype)
full = tf.cast(full, self.dtype)
return centers, updates, full
def __call__(self, boxes, classes, width, height):
"""Builds the labels for a single image, not functional in batch mode.
Args:
boxes: `Tensor` of shape [None, 4] indicating the object locations in an
image.
classes: `Tensor` of shape [None] indicating the each objects classes.
width: `int` for the images width.
height: `int` for the images height.
Returns:
centers: `Tensor` of shape [None, 3] of indexes in the final grid where
boxes are located.
updates: `Tensor` of shape [None, 8] the value to place in the final grid.
full: `Tensor` of [width/stride, height/stride, num_anchors, 1] holding
a mask of where boxes are locates for confidence losses.
"""
indexes = {}
updates = {}
true_grids = {}
iou_index = None
boxes = box_ops.yxyx_to_xcycwh(boxes)
if not self.best_matches_only and self.anchor_free_level_limits is None:
# stitch and search boxes across fpn levels
anchorsvec = []
for stitch in self.anchors:
anchorsvec.extend(self.anchors[stitch])
stride = tf.cast([width, height], boxes.dtype)
# get the best anchor for each box
iou_index, _ = get_best_anchor(
boxes,
anchorsvec,
stride,
width=1.0,
height=1.0,
best_match_only=False,
use_tie_breaker=self.use_tie_breaker,
iou_thresh=self.match_threshold)
for key in self.keys:
indexes[key], updates[key], true_grids[key] = self.build_label_per_path(
key, boxes, classes, width, height, iou_index=iou_index)
return indexes, updates, true_grids
| 19,309 | 39.062241 | 80 | py |
models | models-master/official/projects/yolo/ops/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 610 | 37.1875 | 74 | py |
models | models-master/official/projects/yolo/ops/preprocessing_ops_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for preprocessing_ops.py."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.yolo.ops import preprocessing_ops
from official.vision.ops import box_ops as bbox_ops
class InputUtilsTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(([1, 2], 20, 0), ([13, 2, 4], 15, 0))
def testPadMaxInstances(self, input_shape, instances, pad_axis):
expected_output_shape = input_shape
expected_output_shape[pad_axis] = instances
output = preprocessing_ops.pad_max_instances(
np.ones(input_shape), instances, pad_axis=pad_axis)
self.assertAllEqual(expected_output_shape, tf.shape(output).numpy())
@parameterized.parameters((100, 200))
def testGetImageShape(self, image_height, image_width):
image = tf.convert_to_tensor(np.random.rand(image_height, image_width, 3))
image_shape = preprocessing_ops.get_image_shape(image)
self.assertAllEqual((image_height, image_width), image_shape)
@parameterized.parameters((400, 600, .5, .5, .0, True),
(100, 200, .5, .5, .5))
def testImageRandHSV(self,
image_height,
image_width,
rh,
rs,
rv,
is_darknet=False):
image = tf.convert_to_tensor(np.random.rand(image_height, image_width, 3))
processed_image = preprocessing_ops.image_rand_hsv(
image, rh, rs, rv, darknet=is_darknet)
processed_image_shape = tf.shape(processed_image)
self.assertAllEqual([image_height, image_width, 3],
processed_image_shape.numpy())
@parameterized.parameters((100, 200, [50, 100]))
def testResizeAndJitterImage(self, image_height, image_width, desired_size):
image = tf.convert_to_tensor(np.random.rand(image_height, image_width, 3))
processed_image, _, _ = preprocessing_ops.resize_and_jitter_image(
image, desired_size)
processed_image_shape = tf.shape(processed_image)
self.assertAllEqual([desired_size[0], desired_size[1], 3],
processed_image_shape.numpy())
@parameterized.parameters((400, 600, [200, 300]))
def testAffineWarpImage(self,
image_height,
image_width,
desired_size,
degrees=7.0,
scale_min=0.1,
scale_max=1.9):
image = tf.convert_to_tensor(np.random.rand(image_height, image_width, 3))
processed_image, _, _ = preprocessing_ops.affine_warp_image(
image,
desired_size,
degrees=degrees,
scale_min=scale_min,
scale_max=scale_max)
processed_image_shape = tf.shape(processed_image)
self.assertAllEqual([desired_size[0], desired_size[1], 3],
processed_image_shape.numpy())
# Working Test
@parameterized.parameters(([[400, 600], [200, 300],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]]], 50))
def testAffineWarpBoxes(self, affine, num_boxes):
boxes = tf.convert_to_tensor(np.random.rand(num_boxes, 4))
boxes = bbox_ops.denormalize_boxes(boxes, affine[0])
processed_boxes, _ = preprocessing_ops.affine_warp_boxes(
tf.cast(affine[2], tf.double), boxes, affine[1], box_history=boxes)
processed_boxes_shape = tf.shape(processed_boxes)
self.assertAllEqual([num_boxes, 4], processed_boxes_shape.numpy())
# Working Test
@parameterized.parameters(([100, 100], [[-0.489, 51.28, 0.236, 51.686],
[65, 100, 200, 150],
[150, 80, 200, 130]]))
def testBoxCandidates(self, output_size, boxes):
boxes = tf.cast(bbox_ops.denormalize_boxes(boxes, output_size), tf.double)
clipped_ind = preprocessing_ops.boxes_candidates(
boxes, boxes, ar_thr=1e32, wh_thr=0, area_thr=tf.cast(0, tf.double))
clipped_ind_shape = tf.shape(clipped_ind)
self.assertAllEqual([3], clipped_ind_shape.numpy())
self.assertAllEqual([0, 1, 2], clipped_ind.numpy())
# Working Test
@parameterized.parameters((
50,
[0.5, 0.5],
[0, 0], # Clipping all boxes
[0.0, 0.0]))
def testResizeAndCropBoxes(self, num_boxes, image_scale, output_size, offset):
boxes = tf.convert_to_tensor(np.random.rand(num_boxes, 4))
processed_boxes, _ = preprocessing_ops.resize_and_crop_boxes(
boxes, tf.cast(image_scale, tf.double), output_size,
tf.cast(offset, tf.double), boxes)
processed_boxes_shape = tf.shape(processed_boxes)
self.assertAllEqual([num_boxes, 4], processed_boxes_shape.numpy())
self.assertAllEqual(
tf.math.reduce_sum(processed_boxes), tf.convert_to_tensor(0))
if __name__ == '__main__':
tf.test.main()
| 5,468 | 42.404762 | 80 | py |
models | models-master/official/projects/yolo/ops/kmeans_anchors_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""kmeans_test tests."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.yolo.ops import kmeans_anchors
class KMeansTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters((9, 3, 100))
def test_kmeans(self, k, anchors_per_scale, samples):
sample_list = []
for _ in range(samples):
boxes = tf.convert_to_tensor(np.random.uniform(0, 1, [k * 100, 4]))
sample_list.append({
"groundtruth_boxes": boxes,
"width": 10,
"height": 10
})
kmeans = kmeans_anchors.AnchorKMeans()
cl = kmeans(
sample_list, k, anchors_per_scale, image_resolution=[512, 512, 3])
cl = tf.convert_to_tensor(cl)
self.assertAllEqual(tf.shape(cl).numpy(), [k, 2])
if __name__ == "__main__":
tf.test.main()
| 1,445 | 31.133333 | 74 | py |
models | models-master/official/projects/yolo/ops/box_ops_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""box_ops tests."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.yolo.ops import box_ops
class InputUtilsTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters((1), (4))
def test_box_conversions(self, num_boxes):
boxes = tf.convert_to_tensor(np.random.rand(num_boxes, 4))
expected_shape = np.array([num_boxes, 4])
xywh_box = box_ops.yxyx_to_xcycwh(boxes)
yxyx_box = box_ops.xcycwh_to_yxyx(boxes)
self.assertAllEqual(tf.shape(xywh_box).numpy(), expected_shape)
self.assertAllEqual(tf.shape(yxyx_box).numpy(), expected_shape)
@parameterized.parameters((1), (5), (7))
def test_ious(self, num_boxes):
boxes = tf.convert_to_tensor(np.random.rand(num_boxes, 4))
expected_shape = np.array([
num_boxes,
])
expected_iou = np.ones([
num_boxes,
])
iou = box_ops.compute_iou(boxes, boxes)
_, giou = box_ops.compute_giou(boxes, boxes)
_, ciou = box_ops.compute_ciou(boxes, boxes)
_, diou = box_ops.compute_diou(boxes, boxes)
self.assertAllEqual(tf.shape(iou).numpy(), expected_shape)
self.assertArrayNear(iou, expected_iou, 0.001)
self.assertArrayNear(giou, expected_iou, 0.001)
self.assertArrayNear(ciou, expected_iou, 0.001)
self.assertArrayNear(diou, expected_iou, 0.001)
if __name__ == '__main__':
tf.test.main()
| 2,005 | 35.472727 | 74 | py |
models | models-master/official/projects/yolo/ops/initializer_ops.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Yolo initializer ops."""
import tensorflow as tf
def pytorch_kernel_initializer(kernel_initializer):
"""Prepare kernel weights initializer to match PyTorch implementation."""
if kernel_initializer == 'VarianceScaling':
return tf.keras.initializers.VarianceScaling(
scale=1 / 3, mode='fan_in', distribution='uniform'
)
return kernel_initializer
| 981 | 35.37037 | 75 | py |
models | models-master/official/projects/yolo/losses/yolov7_loss_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yolo heads."""
from absl import logging
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.yolo.losses import yolov7_loss
from official.projects.yolo.ops import box_ops
_HEIGHT, _WIDTH = 640, 640
_BATCH_SIZE = 8
_NUM_GTS = 100
_NUM_LAYERS, _NUM_ANCHORS = 3, 3
_NUM_CLASSES = 80
def build_labels():
image_info = tf.constant(
[
[[640, 640], [640, 640], [1.0, 1.0], [0.0, 0.0]]
for _ in range(_BATCH_SIZE)
], dtype=tf.float32
)
box_y1x1 = np.random.rand(_BATCH_SIZE, _NUM_GTS, 2).astype(np.float32)
box_y2x2 = (
np.random.rand(_BATCH_SIZE, _NUM_GTS, 2).astype(np.float32)
* (1 - box_y1x1)
+ box_y1x1
)
boxes_yxyx = tf.concat([box_y1x1, box_y2x2], axis=-1)
num_detections = np.random.randint(_NUM_GTS, size=[_BATCH_SIZE])
classes = np.arange(_NUM_GTS * _BATCH_SIZE).reshape([_BATCH_SIZE, -1])
for i in range(_BATCH_SIZE):
classes[i, num_detections[i]:] = -1
classes = tf.constant(classes, dtype=tf.int32)
return {'image_info': image_info, 'classes': classes, 'bbox': boxes_yxyx}
def build_predictions():
# Scale down by 2^3 because prediction outputs start at level 3.
h, w = _HEIGHT // 8, _WIDTH // 8
predictions = {}
for i in range(_NUM_LAYERS):
shape = [_BATCH_SIZE, h // (2**i), w // (2**i), _NUM_ANCHORS]
p_y1x1 = tf.constant(np.random.rand(*shape, 2), dtype=tf.float32)
p_y2x2 = tf.constant(np.random.rand(*shape, 2), dtype=tf.float32)
# Transform the box from yxyx to xywh.
p_box = box_ops.yxyx_to_xcycwh(tf.concat([p_y1x1, p_y2x2], axis=-1))
p_obj = tf.constant(np.random.rand(*shape, 1), dtype=tf.float32)
p_cls = tf.constant(np.random.rand(*shape, _NUM_CLASSES), dtype=tf.float32)
predictions[str(i + 3)] = tf.concat([p_box, p_obj, p_cls], axis=-1)
return predictions
class YoloV7LossTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super().setUp()
np.random.seed(42)
self._anchors = [
[[12, 16], [19, 36], [40, 28]], # Level 3
[[36, 75], [76, 55], [72, 146]], # Level 4
[[142, 110], [192, 243], [459, 401]], # Level 5
]
self._strides = [8, 16, 32]
@parameterized.product(
gamma=(0.0, 1.5), label_smoothing=(0.0, 0.2), auto_balance=(True, False)
)
def test_loss(self, gamma, label_smoothing, auto_balance):
"""Test YOLOv7 normal loss."""
labels = build_labels()
predictions = build_predictions()
loss = yolov7_loss.YoloV7Loss(
anchors=self._anchors,
strides=self._strides,
input_size=[_HEIGHT, _WIDTH],
gamma=gamma,
label_smoothing=label_smoothing,
num_classes=_NUM_CLASSES,
auto_balance=auto_balance,
)
loss_val = loss(labels, predictions)
losses = loss.report_separate_losses()
logging.info('loss_val: %.6f', loss_val)
logging.info('box_loss: %.6f', losses['box_loss'])
logging.info('obj_loss: %.6f', losses['obj_loss'])
logging.info('cls_loss: %.6f', losses['cls_loss'])
expected_loss_val = (
losses['box_loss'] + losses['obj_loss'] + losses['cls_loss']
) * _BATCH_SIZE
self.assertNear(loss_val, expected_loss_val, err=1e-6)
@parameterized.product(
gamma=(0.0, 1.5), label_smoothing=(0.0, 0.2), auto_balance=(True, False)
)
def test_loss_ota(self, gamma, label_smoothing, auto_balance):
"""Test YOLOv7 OTA loss."""
labels = build_labels()
predictions = build_predictions()
loss = yolov7_loss.YoloV7LossOTA(
anchors=self._anchors,
strides=self._strides,
input_size=[_HEIGHT, _WIDTH],
gamma=gamma,
label_smoothing=label_smoothing,
num_classes=_NUM_CLASSES,
auto_balance=auto_balance,
)
loss_val = loss(labels, predictions)
losses = loss.report_separate_losses()
logging.info('loss_val: %.6f', loss_val)
logging.info('box_loss: %.6f', losses['box_loss'])
logging.info('obj_loss: %.6f', losses['obj_loss'])
logging.info('cls_loss: %.6f', losses['cls_loss'])
expected_loss_val = (
losses['box_loss'] + losses['obj_loss'] + losses['cls_loss']
) * _BATCH_SIZE
self.assertNear(loss_val, expected_loss_val, err=1e-6)
if __name__ == '__main__':
tf.test.main()
| 4,904 | 32.827586 | 79 | py |
models | models-master/official/projects/yolo/losses/yolo_loss_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yolo heads."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.yolo.losses import yolo_loss
class YoloDecoderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(True),
(False),
)
def test_loss_init(self, scaled):
"""Test creation of YOLO family models."""
def inpdict(input_shape, dtype=tf.float32):
inputs = {}
for key in input_shape:
inputs[key] = tf.ones(input_shape[key], dtype=dtype)
return inputs
tf.keras.backend.set_image_data_format('channels_last')
input_shape = {
'3': [1, 52, 52, 255],
'4': [1, 26, 26, 255],
'5': [1, 13, 13, 255]
}
classes = 80
anchors = {
'3': [[12.0, 19.0], [31.0, 46.0], [96.0, 54.0]],
'4': [[46.0, 114.0], [133.0, 127.0], [79.0, 225.0]],
'5': [[301.0, 150.0], [172.0, 286.0], [348.0, 340.0]]
}
keys = ['3', '4', '5']
path_strides = {key: 2**int(key) for key in keys}
loss = yolo_loss.YoloLoss(
keys,
classes,
anchors,
path_strides=path_strides,
truth_thresholds={key: 1.0 for key in keys},
ignore_thresholds={key: 0.7 for key in keys},
loss_types={key: 'ciou' for key in keys},
iou_normalizers={key: 0.05 for key in keys},
cls_normalizers={key: 0.5 for key in keys},
object_normalizers={key: 1.0 for key in keys},
objectness_smooths={key: 1.0 for key in keys},
box_types={key: 'scaled' for key in keys},
scale_xys={key: 2.0 for key in keys},
max_deltas={key: 30.0 for key in keys},
label_smoothing=0.0,
use_scaled_loss=scaled,
update_on_repeat=True)
count = inpdict({
'3': [1, 52, 52, 3, 1],
'4': [1, 26, 26, 3, 1],
'5': [1, 13, 13, 3, 1]
})
ind = inpdict({
'3': [1, 300, 3],
'4': [1, 300, 3],
'5': [1, 300, 3]
}, tf.int32)
truths = inpdict({'3': [1, 300, 6], '4': [1, 300, 6], '5': [1, 300, 6]})
boxes = tf.ones([1, 300, 4], dtype=tf.float32)
classes = tf.ones([1, 300], dtype=tf.float32)
gt = {
'true_conf': count,
'inds': ind,
'upds': truths,
'bbox': boxes,
'classes': classes
}
_, _, _ = loss(gt, inpdict(input_shape))
if __name__ == '__main__':
tf.test.main()
| 2,998 | 29.292929 | 76 | py |
models | models-master/official/projects/yolo/losses/yolov7_loss.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YOLOv7 loss function."""
import tensorflow as tf
from official.projects.yolo.ops import box_ops
from official.vision.losses import focal_loss
_LAYER_BALANCE = {
'3': [4.0, 1.0, 0.4],
'5': [4.0, 1.0, 0.25, 0.06, 0.02],
}
def smooth_bce_targets(eps=0.1):
"""Computes positive, negative label smoothing BCE targets.
https://arxiv.org/pdf/1902.04103.pdf equation 3.
Args:
eps: a float number from [0, 1] representing label smoothing factor.
Returns:
Positive and negative targets after label smoothing.
"""
return 1.0 - 0.5 * eps, 0.5 * eps
def merge_labels(labels):
"""Converts the ground-truth labels into loss targets."""
boxes = box_ops.yxyx_to_xcycwh(labels['bbox'])
classes = tf.cast(labels['classes'], boxes.dtype)
return tf.concat([classes[..., None], boxes], axis=-1)
class YoloV7Loss(tf.keras.losses.Loss):
"""YOLOv7 loss function."""
def __init__(
self,
anchors,
strides,
input_size,
alpha=0.25,
gamma=1.5,
box_weight=0.05,
obj_weight=0.7,
cls_weight=0.3,
label_smoothing=0.0,
anchor_threshold=4.0,
iou_mix_ratio=1.0,
num_classes=80,
auto_balance=False,
reduction=tf.keras.losses.Reduction.NONE,
name=None,
):
"""Constructor for YOLOv7 loss.
Follows the implementation here:
https://github.com/WongKinYiu/yolov7/blob/main/utils/loss.py#L422
Args:
anchors: a 2D array represents different anchors used at each level.
strides: a 1D array represents the strides. Note that all numbers should
be a power of 2, and they usually start with level 3 and end at level
5 or 7. Therefore, the list should usually be [8, 16, 32] or
[8, 16, 32, 64, 128].
input_size: a list containing the height and width of the input image.
alpha: alpha for focal loss.
gamma: gamma for focal loss. If set to 0, focal loss will be disabled.
box_weight: float weight scalar applied to bounding box loss.
obj_weight: float weight scalar applied to objectness loss.
cls_weight: float weight scalar applied to class loss.
label_smoothing: small float number used to compute positive and negative
targets. If set to 0, the positive targets will be 1 and negative
targets will be 0.
anchor_threshold: threshold for the anchor matching. Larger number allows
more displacements between anchors and targets.
iou_mix_ratio: float ratio to mix the IoU score with the positive target,
which is 1.
num_classes: number of classes.
auto_balance: a boolean flag that indicates whether auto balance should be
used. If used, the default balance factors will automatically update
for each batch.
reduction: Reduction method. Should be set to None at all time as this
loss module always output a loss scalar.
name: Optional name for the loss.
"""
# Loss required fields.
self._num_classes = num_classes
self._num_layers = len(strides)
self._num_anchors = len(anchors[0])
self._anchors = anchors
self._strides = strides
self._input_size = input_size
self._iou_mix_ratio = iou_mix_ratio
# Scale down anchors by the strides to match the feature map.
for i, stride in enumerate(strides):
self._anchors[i] = tf.constant(self._anchors[i], tf.float32) / stride
self._anchor_threshold = anchor_threshold
self._pos_targets, self._neg_targets = smooth_bce_targets(label_smoothing)
if gamma > 0:
self._cls_loss_fn = focal_loss.FocalLoss(
alpha=alpha, gamma=gamma, reduction=reduction, name='cls_loss')
self._obj_loss_fn = focal_loss.FocalLoss(
alpha=alpha, gamma=gamma, reduction=reduction, name='obj_loss')
else:
self._cls_loss_fn = tf.nn.sigmoid_cross_entropy_with_logits
self._obj_loss_fn = tf.nn.sigmoid_cross_entropy_with_logits
# Weight to combine losses
self._box_weight = box_weight
self._obj_weight = obj_weight * input_size[0] / 640 * input_size[1] / 640
self._cls_weight = cls_weight * num_classes / 80
# Layer balance scalar
self._balance = _LAYER_BALANCE[str(self._num_layers)][:]
for i, bal in enumerate(self._balance):
self._balance[i] = tf.constant(bal, tf.float32)
self._auto_balance = auto_balance
assert 16 in strides, (
'Expect level 4 (stride of 16) always exist in the strides, received %s'
% strides
)
self._ssi = list(strides).index(16) if auto_balance else 0 # stride 16 idx
super().__init__(reduction=reduction, name=name)
def call(self, labels, predictions):
labels = merge_labels(labels)
p = {}
for key in predictions:
# [batch_size, num_anchors, height, width, num_classes + boxes + obj]
p[key] = tf.transpose(predictions[key], [0, 3, 1, 2, 4])
cls_loss, box_loss, obj_loss, iou_metric = [tf.zeros(1) for _ in range(4)]
total_num_matchings = tf.zeros(1)
total_num_gts = tf.reduce_sum(tf.cast(labels[..., 0] != -1, tf.float32))
masks, indices, anchors, cls_targets, box_targets = self._build_targets(
labels, p)
batch_size = tf.shape(indices)[0]
layer_shape = [batch_size, self._num_layers, -1]
# [anchor_indices, grid_js, grid_is]
masks = tf.reshape(masks, layer_shape)
indices = tf.reshape(indices, [*layer_shape, 3])
anchors = tf.reshape(anchors, [*layer_shape, 2])
cls_targets = tf.reshape(cls_targets, layer_shape)
box_targets = tf.reshape(box_targets, [*layer_shape, 4])
# Losses
for layer_key, layer_pred in p.items():
i = int(layer_key) - 3
obj_targets = tf.zeros_like(layer_pred[..., 0])
layer_masks = masks[:, i]
num_matchings = tf.reduce_sum(tf.cast(layer_masks, tf.int32))
total_num_matchings += tf.cast(num_matchings, tf.float32)
if num_matchings > 0:
layer_indices = indices[:, i]
batch_indices = tf.tile(
tf.range(batch_size)[:, None], [1, tf.shape(layer_indices)[1]]
)[..., None]
layer_indices = tf.concat([batch_indices, layer_indices], axis=-1)
layer_indices = tf.boolean_mask(layer_indices, layer_masks)
layer_anchors = tf.boolean_mask(anchors[:, i], layer_masks)
layer_cls_targets = tf.boolean_mask(cls_targets[:, i], layer_masks)
layer_box_targets = tf.boolean_mask(box_targets[:, i], layer_masks)
# In the same shape of layer_target.
matched_pred = tf.gather_nd(layer_pred, layer_indices)
pred_xcyc = tf.sigmoid(matched_pred[..., :2]) * 2 - 0.5
pred_wh = (
tf.square(tf.sigmoid(matched_pred[..., 2:4]) * 2) * layer_anchors)
pred_xcycwh = tf.concat([pred_xcyc, pred_wh], axis=-1)
_, ciou = box_ops.compute_ciou(pred_xcycwh, layer_box_targets)
box_loss += tf.reduce_mean(1.0 - ciou)
iou_metric += tf.reduce_mean(ciou)
# Compute classification loss.
if self._num_classes > 1: # cls loss (only if multiple classes)
t = tf.one_hot(
layer_cls_targets,
self._num_classes,
on_value=self._pos_targets,
off_value=self._neg_targets,
)
cls_loss += tf.reduce_mean(
self._cls_loss_fn(t, matched_pred[..., 5:]))
# Compute objectness loss.
iou_ratio = tf.cast(
(1.0 - self._iou_mix_ratio)
+ (self._iou_mix_ratio * tf.maximum(tf.stop_gradient(ciou), 0)),
obj_targets.dtype,
)
obj_targets = tf.tensor_scatter_nd_max(
obj_targets, layer_indices, iou_ratio
)
layer_obj_loss = tf.reduce_mean(
self._obj_loss_fn(obj_targets, layer_pred[..., 4])
)
obj_loss += layer_obj_loss * self._balance[i]
# Updates the balance factor, which is a moving average of previous
# factor at the same level.
if self._auto_balance:
self._balance[i] = self._balance[
i
] * 0.9999 + 0.0001 / tf.stop_gradient(layer_obj_loss)
# Re-balance the factors so that stride at self._ssi always receives 1.
if self._auto_balance:
self._balance = [x / self._balance[self._ssi] for x in self._balance]
box_loss *= self._box_weight
obj_loss *= self._obj_weight
cls_loss *= self._cls_weight
self._box_loss = tf.stop_gradient(box_loss)
self._obj_loss = tf.stop_gradient(obj_loss)
self._cls_loss = tf.stop_gradient(cls_loss)
self._iou = tf.stop_gradient(iou_metric) / self._num_layers
self._num_matchings = tf.stop_gradient(
total_num_matchings) / tf.cast(batch_size, tf.float32)
self._num_gts = tf.stop_gradient(
total_num_gts) / tf.cast(batch_size, tf.float32)
loss = box_loss + obj_loss + cls_loss
return loss * tf.cast(batch_size, loss.dtype)
def _build_targets(self, labels, predictions):
"""Finds three matching anchors for each ground-truth."""
label_shape = tf.shape(labels)
batch_size, max_boxes = label_shape[0], label_shape[1]
masks, indices, anch = [], [], []
cls_targets, box_targets = [], []
anchor_indices = tf.tile(
tf.range(self._num_anchors, dtype=tf.float32)[None, None],
[batch_size, max_boxes, 1],
)
# Append anchor indices to labels.
labels = tf.tile(labels[:, :, None], [1, 1, self._num_anchors, 1])
labels = tf.concat([labels, anchor_indices[..., None]], axis=-1)
# Bias is used to determine the matching. 0.5 means matching anchors that
# fall in the 0.5 differences in the feature map. For instance, a box
# coordinates of (15.6, 35.4) will match the anchors at [15, 35], [16, 35],
# and [15, 34].
bias = 0.5 # bias
off = (
tf.constant(
[
[0, 0],
[1, 0], [0, 1], [-1, 0], [0, -1], # j, k, l, m
# [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
],
tf.float32,
)
* bias
) # offsets
for i in range(self._num_layers):
anchors = self._anchors[i]
_, _, h, w, _ = predictions[str(i + 3)].get_shape().as_list()
gain = tf.constant([1, w, h, w, h, 1], dtype=tf.float32)
t = labels * gain
# Filter out targets that do not match the current anchors.
wh_ratio = t[..., 3:5] / tf.cast(anchors[None, None], tf.float32)
labels_mask = tf.less(
tf.reduce_max(tf.maximum(wh_ratio, 1.0 / wh_ratio), axis=-1),
self._anchor_threshold,
)[..., None]
# Compute valid mask for ground-truths.
labels_mask = tf.logical_and(t[..., :1] != -1, labels_mask)
labels_mask = tf.reshape(labels_mask, [batch_size, -1])
t = tf.reshape(t, [batch_size, -1, 6])
# Find the matching offsets for valid labels.
gxy = t[..., 1:3] # grid xy
gxi = gain[1:3] - gxy # inverse
j, k = tf.split((gxy % 1.0 < bias) & (gxy >= 1.0), 2, axis=-1)
l, m = tf.split((gxi % 1.0 < bias) & (gxi >= 1.0), 2, axis=-1)
j, k, l, m = j[..., 0], k[..., 0], l[..., 0], m[..., 0]
# Note that j and l, k and m are conjugate to each other, so at most one
# of them will be True during running. Therefore, we can reduce memory
# usage by gathering the selected index.
x_map = tf.cast(tf.stack([j, l], axis=-1), tf.int8)
y_map = tf.cast(tf.stack([k, m], axis=-1), tf.int8)
# Add the indices offsets.
x_indices = tf.argmax(x_map, axis=-1) * 2 + 1
y_indices = tf.argmax(y_map, axis=-1) * 2 + 2
three_targets_indices = tf.stack(
[tf.zeros_like(x_indices), x_indices, y_indices], axis=-1
)[..., None]
# Gather the selected 3 targets from the 5-target map.
j = tf.stack([tf.ones_like(j), j, k, l, m], axis=-1)
three_targets_mask = tf.gather_nd(j, three_targets_indices, batch_dims=2)
labels_mask = tf.tile(labels_mask[:, :, None], [1, 1, 5])
t = tf.tile(t[:, :, None], [1, 1, 5, 1])
labels_mask = tf.gather_nd(
labels_mask, three_targets_indices, batch_dims=2
)
t = tf.gather_nd(t, three_targets_indices, batch_dims=2)
offsets = tf.zeros_like(gxy)[:, :, None] + off[None, None]
offsets = tf.gather_nd(offsets, three_targets_indices, batch_dims=2)
cls_target = tf.cast(t[..., 0], tf.int32)
gxy, gwh = t[..., 1:3], t[..., 3:5]
# Find the actual grid locations.
gij = tf.cast(gxy - offsets * 2, tf.int32)
gi, gj = tf.split(gij, 2, axis=-1)
gi, gj = gi[..., 0], gj[..., 0]
# Append the result.
anchor_idx = tf.cast(t[..., 5], tf.int32)
gain = tf.cast(gain, tf.int32)
gi = tf.clip_by_value(gi, 0, gain[2] - 1)
gj = tf.clip_by_value(gj, 0, gain[3] - 1)
gij = tf.stack([gi, gj], axis=-1)
labels_mask = tf.logical_and(labels_mask, three_targets_mask)
masks.append(labels_mask)
indices.append(tf.stack([anchor_idx, gj, gi], axis=-1))
anch.append(tf.gather(anchors, anchor_idx))
cls_targets.append(cls_target)
box_targets.append(
tf.concat([gxy - tf.cast(gij, tf.float32), gwh], axis=-1)) # box
# [batch_size, num_layers, num_anchors * max_boxes, num_targets]
masks = tf.stack(masks, axis=1)
indices = tf.stack(indices, axis=1)
anch = tf.stack(anch, axis=1)
cls_targets = tf.stack(cls_targets, axis=1)
box_targets = tf.stack(box_targets, axis=1)
return masks, indices, anch, cls_targets, box_targets
def report_separate_losses(self):
return {
'box_loss': self._box_loss,
'obj_loss': self._obj_loss,
'cls_loss': self._cls_loss,
'iou': self._iou,
}
def report_stats(self):
return {
'num_gts': self._num_gts,
'num_matchings': self._num_matchings,
# No duplicates.
'num_duplicates': tf.constant(0),
}
def get_config(self):
config = {
'alpha': self._alpha,
'gamma': self._gamma,
'box_weight': self._box_weight,
'obj_weight': self._obj_weight,
'cls_weight': self._cls_weight,
'pos_targets': self._pos_targets,
'neg_targets': self._neg_targets,
'num_classes': self._num_classes,
'num_layers': self._num_layers,
'num_anchors': self._num_anchors,
'auto_balance': self._auto_balance,
'balance': self._balance,
'strides': self._strides,
'anchors': self._anchors,
'input_size': self._input_size,
'anchor_threshold': self._anchor_threshold,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class YoloV7LossOTA(tf.keras.losses.Loss):
"""YOLOv7 loss function with OTA.
OTA (Optimal Transport Assignment) uses Sinkhorn-Knopp algorithm to copmute
a matching between anchors and ground-truth labels.
Paper: https://arxiv.org/pdf/2103.14259.pdf
"""
def __init__(
self,
anchors,
strides,
input_size,
alpha=0.25,
gamma=1.5,
box_weight=0.05,
obj_weight=0.7,
cls_weight=0.3,
iou_weight=3.0,
label_smoothing=0.0,
anchor_threshold=4.0,
iou_mix_ratio=1.0,
num_classes=80,
auto_balance=False,
reduction=tf.keras.losses.Reduction.NONE,
name=None,
):
"""Constructor for YOLOv7 loss OTA.
Follows the implementation here:
https://github.com/WongKinYiu/yolov7/blob/main/utils/loss.py#L556
Args:
anchors: a 2D array represents different anchors used at each level.
strides: a 1D array represents the strides. Note that all numbers should
be a power of 2, and they usually start with level 3 and end at level 5
or 7. Therefore, the list should usually be [8, 16, 32] or [8, 16, 32,
64, 128].
input_size: a list containing the height and width of the input image.
alpha: alpha for focal loss.
gamma: gamma for focal loss. If set to 0, focal loss will be disabled.
box_weight: float weight scalar applied to bounding box loss.
obj_weight: float weight scalar applied to objectness loss.
cls_weight: float weight scalar applied to class loss.
iou_weight: float weight scalar to mix class loss and IoU class to
construct the cost matrix.
label_smoothing: small float number used to compute positive and negative
targets. If set to 0, the positive targets will be 1 and negative
targets will be 0.
anchor_threshold: threshold for the anchor matching. Larger number allows
more displacements between anchors and targets.
iou_mix_ratio: float ratio to mix the IoU score with the positive target,
which is 1.
num_classes: number of classes.
auto_balance: a boolean flag that indicates whether auto balance should be
used. If used, the default balance factors will automatically update for
each batch.
reduction: Reduction method. Should be set to None at all time as this
loss module always output a loss scalar.
name: Optional name for the loss.
"""
# Loss required fields.
self._num_classes = num_classes
self._num_layers = len(strides)
self._num_anchors = len(anchors[0])
self._anchors = []
self._strides = strides
self._input_size = input_size
self._iou_mix_ratio = iou_mix_ratio
# Scale down anchors by the strides to match the feature map.
for i, stride in enumerate(strides):
self._anchors.append(tf.constant(anchors[i], tf.float32) / stride)
self._anchor_threshold = anchor_threshold
self._pos_targets, self._neg_targets = smooth_bce_targets(label_smoothing)
if gamma > 0:
self._cls_loss_fn = focal_loss.FocalLoss(
alpha=alpha, gamma=gamma, reduction=reduction, name='cls_loss')
self._obj_loss_fn = focal_loss.FocalLoss(
alpha=alpha, gamma=gamma, reduction=reduction, name='obj_loss')
else:
self._cls_loss_fn = tf.nn.sigmoid_cross_entropy_with_logits
self._obj_loss_fn = tf.nn.sigmoid_cross_entropy_with_logits
# Weight to combine losses
self._box_weight = box_weight
self._obj_weight = obj_weight * input_size[0] / 640 * input_size[1] / 640
self._cls_weight = cls_weight * num_classes / 80
# Weight to construct cost matrix
self._iou_weight = iou_weight
# Layer balance scalar
self._balance = _LAYER_BALANCE[str(self._num_layers)][:]
for i, bal in enumerate(self._balance):
self._balance[i] = tf.constant(bal, tf.float32)
self._auto_balance = auto_balance
assert 16 in strides, (
'Expect level 4 (stride of 16) always exist in the strides, received %s'
% strides
)
self._ssi = list(strides).index(16) if auto_balance else 0 # stride 16 idx
super().__init__(reduction=reduction, name=name)
def call(self, labels, predictions):
"""Comptues the OTA loss.
Args:
labels: a dictionary contains the following required keys:
- classes: class indices in shape [batch_size, max_num_instances].
- bbox: bounding boxes in shape [batch_size, max_num_instances, 4].
- image_info: image info in shape [batch_size, 4, 2].
predictions: a dictionary contains model outputs at different layers.
They are in shape of [batch_size, h_at_level, w_at_level, num_anchors,
num_classes + 4 (box coordinates) + 1 (objectness)].
Returns:
The scaled loss (up by batch size) from OTA.
"""
image_info = labels['image_info']
# Convert labels dictionary into tensors.
labels = merge_labels(labels)
p = {}
for key in predictions:
# [batch_size, num_anchors, height, width, num_classes + boxes + obj]
p[key] = tf.transpose(predictions[key], [0, 3, 1, 2, 4])
cls_loss, box_loss, obj_loss, iou_metric = [tf.zeros(1) for _ in range(4)]
total_num_matchings = tf.zeros(1)
total_num_gts = tf.reduce_sum(tf.cast(labels[..., 0] != -1, tf.float32))
(matched_indices, matched_anchors, matched_mask, matched_targets,
num_duplicates) = self._build_targets(labels, p, image_info)
# Get height and width for each layers.
pre_gen_gains = [
tf.gather(tf.shape(p[str(i + 3)]), [3, 2, 3, 2])
for i in range(self._num_layers)
]
batch_size = tf.shape(matched_indices)[0]
layer_shape = [batch_size, self._num_layers, -1]
# [anchor_indices, grid_js, grid_is]
masks = tf.reshape(matched_mask, layer_shape)
indices = tf.reshape(matched_indices, [*layer_shape, 3])
anchors = tf.reshape(matched_anchors, [*layer_shape, 2])
targets = tf.reshape(matched_targets, [*layer_shape, 5])
# Losses
for layer_idx, layer_pred in p.items():
# Always assume the output level starts with 3.
i = int(layer_idx) - 3
obj_targets = tf.zeros_like(layer_pred[..., 0])
# Get layer inputs
layer_masks = masks[:, i]
num_matchings = tf.reduce_sum(tf.cast(layer_masks, tf.int32))
total_num_matchings += tf.cast(num_matchings, tf.float32)
if num_matchings > 0:
layer_indices = indices[:, i]
batch_indices = tf.tile(
tf.range(batch_size)[:, None], [1, tf.shape(layer_indices)[1]]
)[..., None]
layer_indices = tf.concat([batch_indices, layer_indices], axis=-1)
layer_indices = tf.boolean_mask(layer_indices, layer_masks)
layer_anchors = tf.boolean_mask(anchors[:, i], layer_masks)
layer_targets = tf.boolean_mask(targets[:, i], layer_masks)
layer_cls_targets = tf.cast(layer_targets[:, 0], tf.int32)
layer_box_targets = layer_targets[:, 1:]
# In the same shape of layer_target.
matched_pred = tf.gather_nd(layer_pred, layer_indices)
pred_xcyc = tf.sigmoid(matched_pred[..., :2]) * 2 - 0.5
pred_wh = (
tf.square(tf.sigmoid(matched_pred[..., 2:4]) * 2) * layer_anchors)
pred_xcycwh = tf.concat([pred_xcyc, pred_wh], axis=-1)
grid = tf.cast(
tf.stack(
[
layer_indices[:, 3], # gi
layer_indices[:, 2], # gj
tf.zeros_like(layer_indices[:, 0]),
tf.zeros_like(layer_indices[:, 0]),
],
axis=-1,
),
tf.float32,
)
target_xcycwh = layer_box_targets * tf.cast(
pre_gen_gains[i], layer_targets.dtype
)
target_xcycwh -= grid
_, ciou = box_ops.compute_ciou(target_xcycwh, pred_xcycwh)
box_loss += tf.reduce_mean(1.0 - ciou)
iou_metric += tf.reduce_mean(ciou)
# Compute classification loss.
if self._num_classes > 1: # cls loss (only if multiple classes)
t = tf.one_hot(
layer_cls_targets,
self._num_classes,
on_value=self._pos_targets,
off_value=self._neg_targets,
)
cls_loss += tf.reduce_mean(
self._cls_loss_fn(t, matched_pred[..., 5:]))
# Compute objectness loss.
iou_ratio = tf.cast(
(1.0 - self._iou_mix_ratio)
+ (self._iou_mix_ratio * tf.maximum(tf.stop_gradient(ciou), 0)),
obj_targets.dtype,
)
obj_targets = tf.tensor_scatter_nd_max(
obj_targets, layer_indices, iou_ratio
)
layer_obj_loss = tf.reduce_mean(
self._obj_loss_fn(obj_targets, layer_pred[..., 4])
)
obj_loss += layer_obj_loss * self._balance[i]
# Updates the balance factor, which is a moving average of previous
# factor at the same level.
if self._auto_balance:
self._balance[i] = self._balance[
i
] * 0.9999 + 0.0001 / tf.stop_gradient(layer_obj_loss)
# Re-balance the factors so that stride at self._ssi always receives 1.
if self._auto_balance:
self._balance = [x / self._balance[self._ssi] for x in self._balance]
# Keep separate losses for summary purpose.
box_loss *= self._box_weight
obj_loss *= self._obj_weight
cls_loss *= self._cls_weight
self._iou = tf.stop_gradient(iou_metric) / self._num_layers
self._num_matchings = tf.stop_gradient(
total_num_matchings) / tf.cast(batch_size, tf.float32)
self._num_gts = total_num_gts / tf.cast(batch_size, tf.float32)
self._num_duplicates = tf.stop_gradient(
num_duplicates) / tf.cast(batch_size, tf.float32)
self._box_loss = tf.stop_gradient(box_loss)
self._obj_loss = tf.stop_gradient(obj_loss)
self._cls_loss = tf.stop_gradient(cls_loss)
loss = box_loss + obj_loss + cls_loss
# Scale up the loss by batch size.
return loss * tf.cast(batch_size, loss.dtype)
def _build_targets(self, labels, predictions, image_info):
"""Finds the matching targets using Sinkhorn-Knopp."""
# Find the three positives matching first for predictions.
masks, indices, anchors = self._find_three_positives(labels, predictions)
batch_size = tf.shape(masks)[0]
# Collect the predictions.
p_box, p_cls, p_obj = [], [], []
for layer_key, layer_p in predictions.items():
# Always assume level starts from 3.
i = int(layer_key) - 3
layer_indices = tf.reshape(indices[:, i], [batch_size, -1, 3])
anchor = tf.reshape(anchors[:, i], [batch_size, -1, 2])
fg_pred = tf.gather_nd(layer_p, layer_indices, batch_dims=1)
grid = tf.stack([layer_indices[..., 2], layer_indices[..., 1]], axis=-1)
grid = tf.cast(grid, fg_pred.dtype)
pxy = (tf.sigmoid(fg_pred[..., :2]) * 2 - 0.5 + grid) * self._strides[i]
pwh = (
tf.square(tf.sigmoid(fg_pred[..., 2:4]) * 2)
* anchor
* self._strides[i]
)
pxywh = tf.concat([pxy, pwh], axis=-1)
p_box.append(pxywh)
p_obj.append(fg_pred[..., 4:5])
p_cls.append(fg_pred[..., 5:])
p_box = tf.concat(p_box, axis=1)
p_cls = tf.concat(p_cls, axis=1)
p_obj = tf.concat(p_obj, axis=1)
# Compute valid masks for both targets and predictions.
t_mask = labels[..., 0] != -1
p_mask = tf.reshape(masks, [batch_size, -1])
# [anchor_idx, gj, gi]
indices = tf.reshape(indices, [batch_size, -1, 3])
anchors = tf.reshape(anchors, [batch_size, -1, 2])
num_preds = tf.shape(p_box)[1]
num_gts = tf.shape(labels)[1]
# Computes pair-wise IoU.
t_box = labels[..., 1:5] * tf.tile(image_info[0, 1], [2])
pair_wise_iou = box_ops.compute_iou(t_box[:, :, None], p_box[:, None])
pair_wise_iou_loss = -tf.math.log(pair_wise_iou + 1e-8)
# Computes pair-wise class loss.
y = tf.sqrt(tf.sigmoid(p_cls) * tf.sigmoid(p_obj))
# Add 1e-9 to avoid nan.
logits = tf.math.log(y / (1 - y + 1e-9) + 1e-9)
logits = tf.tile(logits[:, None], [1, num_gts, 1, 1])
t_cls = tf.cast(labels[..., 0], tf.int32)
class_labels = tf.one_hot(t_cls, self._num_classes, dtype=tf.float32)
class_labels = tf.tile(class_labels[:, :, None], [1, 1, num_preds, 1])
pair_wise_cls_loss = tf.reduce_sum(
tf.nn.sigmoid_cross_entropy_with_logits(class_labels, logits), axis=-1
)
# Compute the cost matrix and its corresponding valid mask.
cost_mask = tf.logical_and(t_mask[..., None], p_mask[:, None])
cost = tf.stop_gradient(pair_wise_cls_loss + 3 * pair_wise_iou_loss)
largest_cost = tf.reduce_max(cost)
# Set invalid IoU to 0.0 for top_k.
valid_iou = tf.where(cost_mask, pair_wise_iou, tf.zeros_like(pair_wise_iou))
# Compute top-10 IoUs from valid IoUs for each target.
# When matched predictions is smaller than 10, we only want the top-k where
# k is the total size of the matched predictions (k < 10).
top_k_mask = tf.less(
tf.range(10)[None],
tf.minimum(10, tf.reduce_sum(tf.cast(p_mask, tf.int32), axis=-1))[
:, None
],
)
top_k_mask = tf.logical_and(top_k_mask[:, None], t_mask[..., None])
top_k, _ = tf.nn.top_k(valid_iou, k=10)
top_k = tf.where(top_k_mask, top_k, tf.zeros_like(top_k))
# Use top_k to compute the dynamic ks for target matching. Each target_i can
# match to k_i predictions, and k_i is computed based on the pair-wise
# valid IoU.
dynamic_ks = tf.maximum(tf.cast(tf.reduce_sum(top_k, axis=-1), tf.int32), 1)
dynamic_ks = tf.where(t_mask, dynamic_ks, tf.zeros_like(dynamic_ks))
dynamic_ks = tf.stop_gradient(dynamic_ks)
dynamic_mask = tf.range(10)[None, None] < dynamic_ks[..., None]
# Set the invalid field to maximum cost so that they won't be selected
# during matching.
cost = tf.where(cost_mask, cost, tf.ones_like(cost) * (largest_cost + 1))
matching_matrix = tf.zeros_like(cost, dtype=tf.int32)
_, pred_idx = tf.nn.top_k(-cost, k=10)
# Update matching matrix.
# [batch_size, num_gts, 10]
batch_idx = tf.tile(tf.range(batch_size)[:, None, None], [1, num_gts, 10])
gt_idx = tf.tile(tf.range(num_gts)[None, :, None], [batch_size, 1, 10])
matched_indices = tf.stack([batch_idx, gt_idx, pred_idx], axis=-1)
matching_matrix = tf.tensor_scatter_nd_add(
matching_matrix,
matched_indices,
tf.cast(dynamic_mask, matching_matrix.dtype),
)
# Detect if there is a detection matches to multiple targets, if so, we
# assign it to the target with minimum cost.
duplicate_mask = tf.reduce_sum(matching_matrix, axis=1) > 1
num_duplicates = tf.reduce_sum(tf.cast(duplicate_mask, tf.float32))
cost_argmin = tf.argmin(cost, axis=1, output_type=tf.int32)
remove_mask = tf.tile(duplicate_mask[:, None], [1, num_gts, 1])
matching_matrix = tf.where(
remove_mask, tf.zeros_like(matching_matrix), matching_matrix)
min_mask = tf.equal(
tf.tile(tf.range(num_gts)[None, :, None], [batch_size, 1, num_preds]),
cost_argmin[:, None],
)
update_mask = tf.logical_and(min_mask, duplicate_mask[:, None])
matching_matrix = tf.where(
update_mask, tf.ones_like(matching_matrix), matching_matrix)
# Find the final matching and collect the matched targets.
matched_gt_indices = tf.argmax(
matching_matrix, axis=1, output_type=tf.int32
)
matched_mask = tf.reduce_sum(matching_matrix, axis=1) > 0
matched_targets = tf.gather_nd(
labels, matched_gt_indices[..., None], batch_dims=1
)
return indices, anchors, matched_mask, matched_targets, num_duplicates
def _find_three_positives(self, labels, predictions):
"""Finds three matching anchors for each ground-truth."""
label_shape = tf.shape(labels)
batch_size, max_boxes = label_shape[0], label_shape[1]
masks, indices, anch = [], [], []
anchor_indices = tf.tile(
tf.range(self._num_anchors, dtype=tf.float32)[None, None],
[batch_size, max_boxes, 1],
)
# Append anchor indices to labels.
labels = tf.tile(labels[:, :, None], [1, 1, self._num_anchors, 1])
labels = tf.concat([labels, anchor_indices[..., None]], axis=-1)
# Bias is used to determine the matching. 0.5 means matching anchors that
# fall in the 0.5 differences in the feature map. For instance, a box
# coordinates of (15.6, 35.4) will match the anchors at [15, 35], [16, 35],
# and [15, 34].
bias = 0.5 # bias
off = (
tf.constant(
[
[0, 0],
[1, 0], [0, 1], [-1, 0], [0, -1], # j, k, l, m
# [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
],
tf.float32,
)
* bias
) # offsets
for i in range(self._num_layers):
anchors = self._anchors[i]
_, _, h, w, _ = predictions[str(i + 3)].get_shape().as_list()
gain = tf.constant([1, w, h, w, h, 1], dtype=tf.float32)
t = labels * gain
# Filter out targets that do not match the current anchors.
wh_ratio = t[..., 3:5] / tf.cast(anchors[None, None], tf.float32)
labels_mask = tf.less(
tf.reduce_max(tf.maximum(wh_ratio, 1.0 / wh_ratio), axis=-1),
self._anchor_threshold,
)[..., None]
# Compute valid mask for ground-truths.
labels_mask = tf.logical_and(t[..., :1] != -1, labels_mask)
labels_mask = tf.reshape(labels_mask, [batch_size, -1])
t = tf.reshape(t, [batch_size, -1, 6])
# Find the matching offsets for valid labels.
gxy = t[..., 1:3] # grid xy
gxi = gain[1:3] - gxy # inverse
j, k = tf.split((gxy % 1.0 < bias) & (gxy >= 1.0), 2, axis=-1)
l, m = tf.split((gxi % 1.0 < bias) & (gxi >= 1.0), 2, axis=-1)
j, k, l, m = j[..., 0], k[..., 0], l[..., 0], m[..., 0]
# Note that j and l, k and m are conjugate to each other, so at most one
# of them will be True during running. Therefore, we can reduce memory
# usage by gathering the selected index.
x_map = tf.cast(tf.stack([j, l], axis=-1), tf.int8)
y_map = tf.cast(tf.stack([k, m], axis=-1), tf.int8)
# Add the indices offsets.
x_indices = tf.argmax(x_map, axis=-1) * 2 + 1
y_indices = tf.argmax(y_map, axis=-1) * 2 + 2
three_targets_indices = tf.stack(
[tf.zeros_like(x_indices), x_indices, y_indices], axis=-1
)[..., None]
# Gather the selected 3 targets from the 5-target map.
j = tf.stack([tf.ones_like(j), j, k, l, m], axis=-1)
three_targets_mask = tf.gather_nd(j, three_targets_indices, batch_dims=2)
labels_mask = tf.tile(labels_mask[:, :, None], [1, 1, 5])
t = tf.tile(t[:, :, None], [1, 1, 5, 1])
labels_mask = tf.gather_nd(
labels_mask, three_targets_indices, batch_dims=2
)
t = tf.gather_nd(t, three_targets_indices, batch_dims=2)
offsets = tf.zeros_like(gxy)[:, :, None] + off[None, None]
offsets = tf.gather_nd(offsets, three_targets_indices, batch_dims=2)
gxy = t[..., 1:3]
# Find the actual grid locations.
gij = tf.cast(gxy - offsets * 2, tf.int32)
gi, gj = tf.split(gij, 2, axis=-1)
gi, gj = gi[..., 0], gj[..., 0]
# Append the result.
anchor_idx = tf.cast(t[..., 5], tf.int32)
gain = tf.cast(gain, tf.int32)
gi = tf.clip_by_value(gi, 0, gain[2] - 1)
gj = tf.clip_by_value(gj, 0, gain[3] - 1)
labels_mask = tf.logical_and(labels_mask, three_targets_mask)
masks.append(labels_mask)
indices.append(tf.stack([anchor_idx, gj, gi], axis=-1))
anch.append(tf.gather(anchors, anchor_idx))
# [batch_size, num_layers, num_anchors * max_boxes, num_targets]
masks = tf.stack(masks, axis=1)
indices = tf.stack(indices, axis=1)
anch = tf.stack(anch, axis=1)
return masks, indices, anch
def report_stats(self):
return {
'num_gts': self._num_gts,
'num_matchings': self._num_matchings,
'num_duplicates': self._num_duplicates,
}
def report_separate_losses(self):
"""Returns separate losses that construct the reported loss."""
return {
'iou': self._iou,
'box_loss': self._box_loss,
'obj_loss': self._obj_loss,
'cls_loss': self._cls_loss,
}
def get_config(self):
"""Configs for the loss constructor."""
config = {
'alpha': self._alpha,
'gamma': self._gamma,
'box_weight': self._box_weight,
'obj_weight': self._obj_weight,
'cls_weight': self._cls_weight,
'iou_weight': self._iou_weight,
'iou_mix_ratio': self._iou_mix_ratio,
'pos_targets': self._pos_targets,
'neg_targets': self._neg_targets,
'num_classes': self._num_classes,
'num_layers': self._num_layers,
'num_anchors': self._num_anchors,
'auto_balance': self._auto_balance,
'balance': self._balance,
'strides': self._strides,
'anchors': self._anchors,
'input_size': self._input_size,
'anchor_threshold': self._anchor_threshold,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| 36,619 | 37.34555 | 80 | py |
models | models-master/official/projects/yolo/losses/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/yolo/losses/yolo_loss.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Yolo Loss function."""
import abc
import collections
import functools
import tensorflow as tf
from official.projects.yolo.ops import box_ops
from official.projects.yolo.ops import loss_utils
from official.projects.yolo.ops import math_ops
class YoloLossBase(object, metaclass=abc.ABCMeta):
"""Parameters for the YOLO loss functions used at each detection generator.
This base class implements the base functionality required to implement a Yolo
Loss function.
"""
def __init__(self,
classes,
anchors,
path_stride=1,
ignore_thresh=0.7,
truth_thresh=1.0,
loss_type='ciou',
iou_normalizer=1.0,
cls_normalizer=1.0,
object_normalizer=1.0,
label_smoothing=0.0,
objectness_smooth=True,
update_on_repeat=False,
box_type='original',
scale_x_y=1.0,
max_delta=10):
"""Loss Function Initialization.
Args:
classes: `int` for the number of classes
anchors: `List[List[int]]` for the anchor boxes that are used in the model
at all levels. For anchor free prediction set the anchor list to be the
same as the image resolution.
path_stride: `int` for how much to scale this level to get the orginal
input shape.
ignore_thresh: `float` for the IOU value over which the loss is not
propagated, and a detection is assumed to have been made.
truth_thresh: `float` for the IOU value over which the loss is propagated
despite a detection being made.
loss_type: `str` for the typeof iou loss to use with in {ciou, diou, giou,
iou}.
iou_normalizer: `float` for how much to scale the loss on the IOU or the
boxes.
cls_normalizer: `float` for how much to scale the loss on the classes.
object_normalizer: `float` for how much to scale loss on the detection
map.
label_smoothing: `float` for how much to smooth the loss on the classes.
objectness_smooth: `float` for how much to smooth the loss on the
detection map.
update_on_repeat: `bool` for whether to replace with the newest or the
best value when an index is consumed by multiple objects.
box_type: `bool` for which scaling type to use.
scale_x_y: dictionary `float` values inidcating how far each pixel can see
outside of its containment of 1.0. a value of 1.2 indicates there is a
20% extended radius around each pixel that this specific pixel can
predict values for a center at. the center can range from 0 - value/2 to
1 + value/2, this value is set in the yolo filter, and resused here.
there should be one value for scale_xy for each level from min_level to
max_level.
max_delta: gradient clipping to apply to the box loss.
"""
self._loss_type = loss_type
self._classes = classes
self._num = tf.cast(len(anchors), dtype=tf.int32)
self._truth_thresh = truth_thresh
self._ignore_thresh = ignore_thresh
self._anchors = anchors
self._iou_normalizer = iou_normalizer
self._cls_normalizer = cls_normalizer
self._object_normalizer = object_normalizer
self._scale_x_y = scale_x_y
self._max_delta = max_delta
self._label_smoothing = tf.cast(label_smoothing, tf.float32)
self._objectness_smooth = float(objectness_smooth)
self._update_on_repeat = update_on_repeat
self._box_type = box_type
self._path_stride = path_stride
box_kwargs = dict(
stride=self._path_stride,
scale_xy=self._scale_x_y,
box_type=self._box_type,
max_delta=self._max_delta)
self._decode_boxes = functools.partial(
loss_utils.get_predicted_box, **box_kwargs)
self._search_pairs = lambda *args: (None, None, None, None)
self._build_per_path_attributes()
def box_loss(self, true_box, pred_box, darknet=False):
"""Call iou function and use it to compute the loss for the box maps."""
if self._loss_type == 'giou':
iou, liou = box_ops.compute_giou(true_box, pred_box)
elif self._loss_type == 'ciou':
iou, liou = box_ops.compute_ciou(true_box, pred_box, darknet=darknet)
else:
liou = iou = box_ops.compute_iou(true_box, pred_box)
loss_box = 1 - liou
return iou, liou, loss_box
def _tiled_global_box_search(self,
pred_boxes,
pred_classes,
boxes,
classes,
true_conf,
smoothed,
scale=None):
"""Search of all groundtruths to associate groundtruths to predictions."""
boxes = box_ops.yxyx_to_xcycwh(boxes)
if scale is not None:
boxes = boxes * tf.cast(tf.stop_gradient(scale), boxes.dtype)
# Search all predictions against ground truths to find mathcing boxes for
# each pixel.
_, _, iou_max, _ = self._search_pairs(pred_boxes, pred_classes, boxes,
classes)
if iou_max is None:
return true_conf, tf.ones_like(true_conf)
# Find the exact indexes to ignore and keep.
ignore_mask = tf.cast(iou_max < self._ignore_thresh, pred_boxes.dtype)
iou_mask = iou_max > self._ignore_thresh
if not smoothed:
# Ignore all pixels where a box was not supposed to be predicted but a
# high confidence box was predicted.
obj_mask = true_conf + (1 - true_conf) * ignore_mask
else:
# Replace pixels in the tre confidence map with the max iou predicted
# with in that cell.
obj_mask = tf.ones_like(true_conf)
iou_ = (1 - self._objectness_smooth) + self._objectness_smooth * iou_max
iou_ = tf.where(iou_max > 0, iou_, tf.zeros_like(iou_))
true_conf = tf.where(iou_mask, iou_, true_conf)
# Stop gradient so while loop is not tracked.
obj_mask = tf.stop_gradient(obj_mask)
true_conf = tf.stop_gradient(true_conf)
return true_conf, obj_mask
def __call__(self, true_counts, inds, y_true, boxes, classes, y_pred):
"""Call function to compute the loss and a set of metrics per FPN level.
Args:
true_counts: `Tensor` of shape [batchsize, height, width, num_anchors]
represeneting how many boxes are in a given pixel [j, i] in the output
map.
inds: `Tensor` of shape [batchsize, None, 3] indicating the location [j,
i] that a given box is associatied with in the FPN prediction map.
y_true: `Tensor` of shape [batchsize, None, 8] indicating the actual box
associated with each index in the inds tensor list.
boxes: `Tensor` of shape [batchsize, None, 4] indicating the original
ground truth boxes for each image as they came from the decoder used for
bounding box search.
classes: `Tensor` of shape [batchsize, None, 1] indicating the original
ground truth classes for each image as they came from the decoder used
for bounding box search.
y_pred: `Tensor` of shape [batchsize, height, width, output_depth] holding
the models output at a specific FPN level.
Returns:
loss: `float` for the actual loss.
box_loss: `float` loss on the boxes used for metrics.
conf_loss: `float` loss on the confidence used for metrics.
class_loss: `float` loss on the classes used for metrics.
avg_iou: `float` metric for the average iou between predictions and ground
truth.
avg_obj: `float` metric for the average confidence of the model for
predictions.
"""
(loss, box_loss, conf_loss, class_loss, mean_loss, iou, pred_conf, ind_mask,
grid_mask) = self._compute_loss(true_counts, inds, y_true, boxes, classes,
y_pred)
# Metric compute using done here to save time and resources.
sigmoid_conf = tf.stop_gradient(tf.sigmoid(pred_conf))
iou = tf.stop_gradient(iou)
avg_iou = loss_utils.average_iou(
loss_utils.apply_mask(tf.squeeze(ind_mask, axis=-1), iou))
avg_obj = loss_utils.average_iou(
tf.squeeze(sigmoid_conf, axis=-1) * grid_mask)
return (loss, box_loss, conf_loss, class_loss, mean_loss,
tf.stop_gradient(avg_iou), tf.stop_gradient(avg_obj))
@abc.abstractmethod
def _build_per_path_attributes(self):
"""Additional initialization required for each YOLO loss version."""
...
@abc.abstractmethod
def _compute_loss(self, true_counts, inds, y_true, boxes, classes, y_pred):
"""The actual logic to apply to the raw model for optimization."""
...
def post_path_aggregation(self, loss, box_loss, conf_loss, class_loss,
ground_truths, predictions): # pylint:disable=unused-argument
"""This method allows for post processing of a loss value.
After the loss has been aggregated across all the FPN levels some post
proceessing may need to occur to poroperly scale the loss. The default
behavior is to pass the loss through with no alterations. Passing the
individual losses for each mask will allow for aggeregation of loss across
paths for some losses.
Args:
loss: `tf.float` scalar for the actual loss.
box_loss: `tf.float` for the loss on the boxs only.
conf_loss: `tf.float` for the loss on the confidences only.
class_loss: `tf.float` for the loss on the classes only.
ground_truths: `Dict` holding all the ground truth tensors.
predictions: `Dict` holding all the predicted values.
Returns:
loss: `tf.float` scalar for the scaled loss.
scale: `tf.float` how much the loss was scaled by.
"""
del box_loss
del conf_loss
del class_loss
del ground_truths
del predictions
return loss, tf.ones_like(loss)
@abc.abstractmethod
def cross_replica_aggregation(self, loss, num_replicas_in_sync):
"""This controls how the loss should be aggregated across replicas."""
...
@tf.custom_gradient
def grad_sigmoid(values):
"""This function scales the gradient as if a signmoid was applied.
This is used in the Darknet Loss when the choosen box type is the scaled
coordinate type. This function is used to match the propagated gradient to
match that of the Darkent Yolov4 model. This is an Identity operation that
allows us to add some extra steps to the back propagation.
Args:
values: A tensor of any shape.
Returns:
values: The unaltered input tensor.
delta: A custom gradient function that adds the sigmoid step to the
backpropagation.
"""
def delta(dy):
t = tf.math.sigmoid(values)
return dy * t * (1 - t)
return values, delta
class DarknetLoss(YoloLossBase):
"""This class implements the full logic for the standard Yolo models."""
def _build_per_path_attributes(self):
"""Paramterization of pair wise search and grid generators.
Objects created here are used for box decoding and dynamic ground truth
association.
"""
self._anchor_generator = loss_utils.GridGenerator(
anchors=self._anchors,
scale_anchors=self._path_stride)
if self._ignore_thresh > 0.0:
self._search_pairs = loss_utils.PairWiseSearch(
iou_type='iou', any_match=True, min_conf=0.25)
return
def _compute_loss(self, true_counts, inds, y_true, boxes, classes, y_pred):
"""Per FPN path loss logic used for Yolov3, Yolov4, and Yolo-Tiny."""
if self._box_type == 'scaled':
# Darknet Model Propagates a sigmoid once in back prop so we replicate
# that behaviour
y_pred = grad_sigmoid(y_pred)
# Generate and store constants and format output.
shape = tf.shape(true_counts)
batch_size, width, height, num = shape[0], shape[1], shape[2], shape[3]
fwidth = tf.cast(width, tf.float32)
fheight = tf.cast(height, tf.float32)
grid_points, anchor_grid = self._anchor_generator(
width, height, batch_size, dtype=tf.float32)
# Cast all input compontnts to float32 and stop gradient to save memory.
boxes = tf.stop_gradient(tf.cast(boxes, tf.float32))
classes = tf.stop_gradient(tf.cast(classes, tf.float32))
y_true = tf.stop_gradient(tf.cast(y_true, tf.float32))
true_counts = tf.stop_gradient(tf.cast(true_counts, tf.float32))
true_conf = tf.stop_gradient(tf.clip_by_value(true_counts, 0.0, 1.0))
grid_points = tf.stop_gradient(grid_points)
anchor_grid = tf.stop_gradient(anchor_grid)
# Split all the ground truths to use as separate items in loss computation.
(true_box, ind_mask, true_class) = tf.split(y_true, [4, 1, 1], axis=-1)
true_conf = tf.squeeze(true_conf, axis=-1)
true_class = tf.squeeze(true_class, axis=-1)
grid_mask = true_conf
# Splits all predictions.
y_pred = tf.cast(
tf.reshape(y_pred, [batch_size, width, height, num, -1]), tf.float32)
pred_box, pred_conf, pred_class = tf.split(y_pred, [4, 1, -1], axis=-1)
# Decode the boxes to be used for loss compute.
_, _, pred_box = self._decode_boxes(
fwidth, fheight, pred_box, anchor_grid, grid_points, darknet=True)
# If the ignore threshold is enabled, search all boxes ignore all
# IOU valeus larger than the ignore threshold that are not in the
# noted ground truth list.
if self._ignore_thresh != 0.0:
(true_conf, obj_mask) = self._tiled_global_box_search(
pred_box,
tf.stop_gradient(tf.sigmoid(pred_class)),
boxes,
classes,
true_conf,
smoothed=self._objectness_smooth > 0)
# Build the one hot class list that are used for class loss.
true_class = tf.one_hot(
tf.cast(true_class, tf.int32),
depth=tf.shape(pred_class)[-1],
dtype=pred_class.dtype)
true_class = tf.stop_gradient(loss_utils.apply_mask(ind_mask, true_class))
# Reorganize the one hot class list as a grid.
true_class_grid = loss_utils.build_grid(
inds, true_class, pred_class, ind_mask, update=False)
true_class_grid = tf.stop_gradient(true_class_grid)
# Use the class mask to find the number of objects located in
# each predicted grid cell/pixel.
counts = true_class_grid
counts = tf.reduce_sum(counts, axis=-1, keepdims=True)
reps = tf.gather_nd(counts, inds, batch_dims=1)
reps = tf.squeeze(reps, axis=-1)
reps = tf.stop_gradient(tf.where(reps == 0.0, tf.ones_like(reps), reps))
# Compute the loss for only the cells in which the boxes are located.
pred_box = loss_utils.apply_mask(ind_mask,
tf.gather_nd(pred_box, inds, batch_dims=1))
iou, _, box_loss = self.box_loss(true_box, pred_box, darknet=True)
box_loss = loss_utils.apply_mask(tf.squeeze(ind_mask, axis=-1), box_loss)
box_loss = math_ops.divide_no_nan(box_loss, reps)
box_loss = tf.cast(tf.reduce_sum(box_loss, axis=1), dtype=y_pred.dtype)
if self._update_on_repeat:
# Converts list of gound truths into a grid where repeated values
# are replaced by the most recent value. So some class identities may
# get lost but the loss computation will be more stable. Results are
# more consistent.
# Compute the sigmoid binary cross entropy for the class maps.
class_loss = tf.reduce_mean(
loss_utils.sigmoid_bce(
tf.expand_dims(true_class_grid, axis=-1),
tf.expand_dims(pred_class, axis=-1), self._label_smoothing),
axis=-1)
# Apply normalization to the class losses.
if self._cls_normalizer < 1.0:
# Build a mask based on the true class locations.
cls_norm_mask = true_class_grid
# Apply the classes weight to class indexes were one_hot is one.
class_loss *= ((1 - cls_norm_mask) +
cls_norm_mask * self._cls_normalizer)
# Mask to the class loss and compute the sum over all the objects.
class_loss = tf.reduce_sum(class_loss, axis=-1)
class_loss = loss_utils.apply_mask(grid_mask, class_loss)
class_loss = math_ops.rm_nan_inf(class_loss, val=0.0)
class_loss = tf.cast(
tf.reduce_sum(class_loss, axis=(1, 2, 3)), dtype=y_pred.dtype)
else:
# Computes the loss while keeping the structure as a list in
# order to ensure all objects are considered. In some cases can
# make training more unstable but may also return higher APs.
pred_class = loss_utils.apply_mask(
ind_mask, tf.gather_nd(pred_class, inds, batch_dims=1))
class_loss = tf.keras.losses.binary_crossentropy(
tf.expand_dims(true_class, axis=-1),
tf.expand_dims(pred_class, axis=-1),
label_smoothing=self._label_smoothing,
from_logits=True)
class_loss = loss_utils.apply_mask(ind_mask, class_loss)
class_loss = math_ops.divide_no_nan(class_loss,
tf.expand_dims(reps, axis=-1))
class_loss = tf.cast(
tf.reduce_sum(class_loss, axis=(1, 2)), dtype=y_pred.dtype)
class_loss *= self._cls_normalizer
# Compute the sigmoid binary cross entropy for the confidence maps.
bce = tf.reduce_mean(
loss_utils.sigmoid_bce(
tf.expand_dims(true_conf, axis=-1), pred_conf, 0.0),
axis=-1)
# Mask the confidence loss and take the sum across all the grid cells.
if self._ignore_thresh != 0.0:
bce = loss_utils.apply_mask(obj_mask, bce)
conf_loss = tf.cast(tf.reduce_sum(bce, axis=(1, 2, 3)), dtype=y_pred.dtype)
# Apply the weights to each loss.
box_loss *= self._iou_normalizer
conf_loss *= self._object_normalizer
# Add all the losses together then take the mean over the batches.
loss = box_loss + class_loss + conf_loss
loss = tf.reduce_mean(loss)
# Reduce the mean of the losses to use as a metric.
box_loss = tf.reduce_mean(box_loss)
conf_loss = tf.reduce_mean(conf_loss)
class_loss = tf.reduce_mean(class_loss)
return (loss, box_loss, conf_loss, class_loss, loss, iou, pred_conf,
ind_mask, grid_mask)
def cross_replica_aggregation(self, loss, num_replicas_in_sync):
"""This method is not specific to each loss path, but each loss type."""
return loss / num_replicas_in_sync
class ScaledLoss(YoloLossBase):
"""This class implements the full logic for the scaled Yolo models."""
def _build_per_path_attributes(self):
"""Paramterization of pair wise search and grid generators.
Objects created here are used for box decoding and dynamic ground truth
association.
"""
self._anchor_generator = loss_utils.GridGenerator(
anchors=self._anchors,
scale_anchors=self._path_stride)
if self._ignore_thresh > 0.0:
self._search_pairs = loss_utils.PairWiseSearch(
iou_type=self._loss_type, any_match=False, min_conf=0.25)
self._cls_normalizer = self._cls_normalizer * self._classes / 80
return
def _compute_loss(self, true_counts, inds, y_true, boxes, classes, y_pred):
"""Per FPN path loss logic for Yolov4-csp, Yolov4-Large, and Yolov5."""
# Generate shape constants.
shape = tf.shape(true_counts)
batch_size, width, height, num = shape[0], shape[1], shape[2], shape[3]
fwidth = tf.cast(width, tf.float32)
fheight = tf.cast(height, tf.float32)
# Cast all input compontnts to float32 and stop gradient to save memory.
y_true = tf.cast(y_true, tf.float32)
true_counts = tf.cast(true_counts, tf.float32)
true_conf = tf.clip_by_value(true_counts, 0.0, 1.0)
grid_points, anchor_grid = self._anchor_generator(
width, height, batch_size, dtype=tf.float32)
# Split the y_true list.
(true_box, ind_mask, true_class) = tf.split(y_true, [4, 1, 1], axis=-1)
grid_mask = true_conf = tf.squeeze(true_conf, axis=-1)
true_class = tf.squeeze(true_class, axis=-1)
num_objs = tf.cast(tf.reduce_sum(ind_mask), dtype=y_pred.dtype)
# Split up the predicitons.
y_pred = tf.cast(
tf.reshape(y_pred, [batch_size, width, height, num, -1]), tf.float32)
pred_box, pred_conf, pred_class = tf.split(y_pred, [4, 1, -1], axis=-1)
# Decode the boxes for loss compute.
scale, pred_box, pbg = self._decode_boxes(
fwidth, fheight, pred_box, anchor_grid, grid_points, darknet=False)
# If the ignore threshold is enabled, search all boxes ignore all
# IOU valeus larger than the ignore threshold that are not in the
# noted ground truth list.
if self._ignore_thresh != 0.0:
(_, obj_mask) = self._tiled_global_box_search(
pbg,
tf.stop_gradient(tf.sigmoid(pred_class)),
boxes,
classes,
true_conf,
smoothed=False,
scale=None)
# Scale and shift and select the ground truth boxes
# and predictions to the prediciton domain.
if self._box_type == 'anchor_free':
true_box = loss_utils.apply_mask(ind_mask,
(scale * self._path_stride * true_box))
else:
offset = tf.cast(
tf.gather_nd(grid_points, inds, batch_dims=1), true_box.dtype)
offset = tf.concat([offset, tf.zeros_like(offset)], axis=-1)
true_box = loss_utils.apply_mask(ind_mask, (scale * true_box) - offset)
pred_box = loss_utils.apply_mask(ind_mask,
tf.gather_nd(pred_box, inds, batch_dims=1))
# Select the correct/used prediction classes.
true_class = tf.one_hot(
tf.cast(true_class, tf.int32),
depth=tf.shape(pred_class)[-1],
dtype=pred_class.dtype)
true_class = loss_utils.apply_mask(ind_mask, true_class)
pred_class = loss_utils.apply_mask(
ind_mask, tf.gather_nd(pred_class, inds, batch_dims=1))
# Compute the box loss.
_, iou, box_loss = self.box_loss(true_box, pred_box, darknet=False)
box_loss = loss_utils.apply_mask(tf.squeeze(ind_mask, axis=-1), box_loss)
box_loss = math_ops.divide_no_nan(tf.reduce_sum(box_loss), num_objs)
# Use the box IOU to build the map for confidence loss computation.
iou = tf.maximum(tf.stop_gradient(iou), 0.0)
smoothed_iou = ((
(1 - self._objectness_smooth) * tf.cast(ind_mask, iou.dtype)) +
self._objectness_smooth * tf.expand_dims(iou, axis=-1))
smoothed_iou = loss_utils.apply_mask(ind_mask, smoothed_iou)
true_conf = loss_utils.build_grid(
inds, smoothed_iou, pred_conf, ind_mask, update=self._update_on_repeat)
true_conf = tf.squeeze(true_conf, axis=-1)
# Compute the cross entropy loss for the confidence map.
bce = tf.keras.losses.binary_crossentropy(
tf.expand_dims(true_conf, axis=-1), pred_conf, from_logits=True)
if self._ignore_thresh != 0.0:
bce = loss_utils.apply_mask(obj_mask, bce)
conf_loss = tf.reduce_sum(bce) / tf.reduce_sum(obj_mask)
else:
conf_loss = tf.reduce_mean(bce)
# Compute the cross entropy loss for the class maps.
class_loss = tf.keras.losses.binary_crossentropy(
true_class,
pred_class,
label_smoothing=self._label_smoothing,
from_logits=True)
class_loss = loss_utils.apply_mask(
tf.squeeze(ind_mask, axis=-1), class_loss)
class_loss = math_ops.divide_no_nan(tf.reduce_sum(class_loss), num_objs)
# Apply the weights to each loss.
box_loss *= self._iou_normalizer
class_loss *= self._cls_normalizer
conf_loss *= self._object_normalizer
# Add all the losses together then take the sum over the batches.
mean_loss = box_loss + class_loss + conf_loss
loss = mean_loss * tf.cast(batch_size, mean_loss.dtype)
return (loss, box_loss, conf_loss, class_loss, mean_loss, iou, pred_conf,
ind_mask, grid_mask)
def post_path_aggregation(self, loss, box_loss, conf_loss, class_loss,
ground_truths, predictions):
"""This method allows for post processing of a loss value.
By default the model will have about 3 FPN levels {3, 4, 5}, on
larger model that have more like 4 or 5 FPN levels the loss needs to
be scaled such that the total update is scaled to the same effective
magintude as the model with 3 FPN levels. This helps to prevent gradient
explosions.
Args:
loss: `tf.float` scalar for the actual loss.
box_loss: `tf.float` for the loss on the boxs only.
conf_loss: `tf.float` for the loss on the confidences only.
class_loss: `tf.float` for the loss on the classes only.
ground_truths: `Dict` holding all the ground truth tensors.
predictions: `Dict` holding all the predicted values.
Returns:
loss: `tf.float` scalar for the scaled loss.
scale: `tf.float` how much the loss was scaled by.
"""
scale = tf.stop_gradient(3 / len(list(predictions.keys())))
return loss * scale, 1 / scale
def cross_replica_aggregation(self, loss, num_replicas_in_sync):
"""This method is not specific to each loss path, but each loss type."""
return loss
class YoloLoss:
"""This class implements the aggregated loss across YOLO model FPN levels."""
def __init__(self,
keys,
classes,
anchors,
path_strides=None,
truth_thresholds=None,
ignore_thresholds=None,
loss_types=None,
iou_normalizers=None,
cls_normalizers=None,
object_normalizers=None,
objectness_smooths=None,
box_types=None,
scale_xys=None,
max_deltas=None,
label_smoothing=0.0,
use_scaled_loss=False,
update_on_repeat=True):
"""Loss Function Initialization.
Args:
keys: `List[str]` indicating the name of the FPN paths that need to be
optimized.
classes: `int` for the number of classes
anchors: `List[List[int]]` for the anchor boxes that are used in the model
at all levels. For anchor free prediction set the anchor list to be the
same as the image resolution.
path_strides: `Dict[int]` for how much to scale this level to get the
orginal input shape for each FPN path.
truth_thresholds: `Dict[float]` for the IOU value over which the loss is
propagated despite a detection being made for each FPN path.
ignore_thresholds: `Dict[float]` for the IOU value over which the loss is
not propagated, and a detection is assumed to have been made for each
FPN path.
loss_types: `Dict[str]` for the typeof iou loss to use with in {ciou,
diou, giou, iou} for each FPN path.
iou_normalizers: `Dict[float]` for how much to scale the loss on the IOU
or the boxes for each FPN path.
cls_normalizers: `Dict[float]` for how much to scale the loss on the
classes for each FPN path.
object_normalizers: `Dict[float]` for how much to scale loss on the
detection map for each FPN path.
objectness_smooths: `Dict[float]` for how much to smooth the loss on the
detection map for each FPN path.
box_types: `Dict[bool]` for which scaling type to use for each FPN path.
scale_xys: `Dict[float]` values inidcating how far each pixel can see
outside of its containment of 1.0. a value of 1.2 indicates there is a
20% extended radius around each pixel that this specific pixel can
predict values for a center at. the center can range from 0 - value/2 to
1 + value/2, this value is set in the yolo filter, and resused here.
there should be one value for scale_xy for each level from min_level to
max_level. One for each FPN path.
max_deltas: `Dict[float]` for gradient clipping to apply to the box loss
for each FPN path.
label_smoothing: `Dict[float]` for how much to smooth the loss on the
classes for each FPN path.
use_scaled_loss: `bool` for whether to use the scaled loss or the
traditional loss.
update_on_repeat: `bool` for whether to replace with the newest or the
best value when an index is consumed by multiple objects.
"""
losses = {'darknet': DarknetLoss, 'scaled': ScaledLoss}
if use_scaled_loss:
loss_type = 'scaled'
else:
loss_type = 'darknet'
self._loss_dict = {}
for key in keys:
self._loss_dict[key] = losses[loss_type](
classes=classes,
anchors=anchors[key],
truth_thresh=truth_thresholds[key],
ignore_thresh=ignore_thresholds[key],
loss_type=loss_types[key],
iou_normalizer=iou_normalizers[key],
cls_normalizer=cls_normalizers[key],
object_normalizer=object_normalizers[key],
box_type=box_types[key],
objectness_smooth=objectness_smooths[key],
max_delta=max_deltas[key],
path_stride=path_strides[key],
scale_x_y=scale_xys[key],
update_on_repeat=update_on_repeat,
label_smoothing=label_smoothing)
def __call__(self, ground_truth, predictions):
metric_dict = collections.defaultdict(dict)
metric_dict['net']['box'] = 0
metric_dict['net']['class'] = 0
metric_dict['net']['conf'] = 0
loss_val, metric_loss = 0, 0
num_replicas_in_sync = tf.distribute.get_strategy().num_replicas_in_sync
for key in predictions.keys():
(loss, loss_box, loss_conf, loss_class, mean_loss, avg_iou,
avg_obj) = self._loss_dict[key](ground_truth['true_conf'][key],
ground_truth['inds'][key],
ground_truth['upds'][key],
ground_truth['bbox'],
ground_truth['classes'],
predictions[key])
# after computing the loss, scale loss as needed for aggregation
# across FPN levels
loss, scale = self._loss_dict[key].post_path_aggregation(
loss, loss_box, loss_conf, loss_class, ground_truth, predictions)
# after completing the scaling of the loss on each replica, handle
# scaling the loss for mergeing the loss across replicas
loss = self._loss_dict[key].cross_replica_aggregation(
loss, num_replicas_in_sync)
loss_val += loss
# detach all the below gradients: none of them should make a
# contribution to the gradient form this point forwards
metric_loss += tf.stop_gradient(mean_loss / scale)
metric_dict[key]['loss'] = tf.stop_gradient(mean_loss / scale)
metric_dict[key]['avg_iou'] = tf.stop_gradient(avg_iou)
metric_dict[key]['avg_obj'] = tf.stop_gradient(avg_obj)
metric_dict['net']['box'] += tf.stop_gradient(loss_box / scale)
metric_dict['net']['class'] += tf.stop_gradient(loss_class / scale)
metric_dict['net']['conf'] += tf.stop_gradient(loss_conf / scale)
return loss_val, metric_loss, metric_dict
| 31,724 | 41.3 | 90 | py |
models | models-master/official/projects/maxvit/registry_imports.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All necessary imports for registration."""
# pylint: disable=unused-import
# pylint: disable=g-bad-import-order
from official.vision import registry_imports
from official.projects.maxvit import configs # pylint: disable=unused-import
from official.projects.maxvit.modeling import maxvit # pylint: disable=unused-import
| 934 | 41.5 | 85 | py |
models | models-master/official/projects/maxvit/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/maxvit/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Model Garden Vision training driver, including MaxViT configs.."""
from absl import app
from official.common import flags as tfm_flags
from official.projects.maxvit import registry_imports # pylint: disable=unused-import
from official.vision import train
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(train.main)
| 958 | 35.884615 | 86 | py |
models | models-master/official/projects/maxvit/train_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from absl import flags
from absl.testing import flagsaver
import gin
import tensorflow as tf
from official.projects.maxvit import train as train_lib
from official.vision.dataloaders import tfexample_utils
FLAGS = flags.FLAGS
class TrainTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self._model_dir = os.path.join(self.get_temp_dir(), 'model_dir')
tf.io.gfile.makedirs(self._model_dir)
self._test_tfrecord_file = os.path.join(
self.get_temp_dir(), 'test.tfrecord'
)
num_samples = 3
example = tf.train.Example.FromString(
tfexample_utils.create_classification_example(
image_height=224, image_width=224
)
)
examples = [example] * num_samples
tfexample_utils.dump_to_tfrecord(
record_file=self._test_tfrecord_file, tf_examples=examples
)
def test_run(self):
saved_flag_values = flagsaver.save_flag_values()
train_lib.tfm_flags.define_flags()
FLAGS.mode = 'train'
FLAGS.model_dir = self._model_dir
FLAGS.experiment = 'maxvit_imagenet'
params_override = json.dumps({
'runtime': {
'mixed_precision_dtype': 'float32',
},
'trainer': {
'train_steps': 1,
'validation_steps': 1,
'optimizer_config': {
'ema': None,
},
},
'task': {
'init_checkpoint': '',
'model': {
'backbone': {
'maxvit': {
'model_name': 'maxvit-tiny-for-test',
'representation_size': 64,
'add_gap_layer_norm': True,
}
},
'input_size': [224, 224, 3],
'num_classes': 3,
},
'train_data': {
'global_batch_size': 2,
'input_path': self._test_tfrecord_file,
},
'validation_data': {
'global_batch_size': 2,
'input_path': self._test_tfrecord_file,
},
},
})
FLAGS.params_override = params_override
train_lib.train.main('unused_args')
FLAGS.mode = 'eval'
with gin.unlock_config():
train_lib.train.main('unused_args')
flagsaver.restore_flag_values(saved_flag_values)
if __name__ == '__main__':
tf.test.main()
| 3,005 | 27.903846 | 74 | py |
models | models-master/official/projects/maxvit/configs/rcnn.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mask R-CNN configuration definition."""
import os
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling.optimization.configs import optimization_config
from official.projects.maxvit.configs import backbones
from official.vision.configs import common
from official.vision.configs import decoders
from official.vision.configs import maskrcnn
Parser = maskrcnn.Parser
Anchor = maskrcnn.Anchor
Losses = maskrcnn.Losses
ROISampler = maskrcnn.ROISampler
DetectionHead = maskrcnn.DetectionHead
DataConfig = maskrcnn.DataConfig
MaskRCNN = maskrcnn.MaskRCNN
MaskRCNNTask = maskrcnn.MaskRCNNTask
COCO_INPUT_PATH_BASE = (
'/readahead/200M/placer/prod/home/tensorflow-performance-data/datasets/coco'
)
@exp_factory.register_config_factory('rcnn_maxvit_coco')
def rcnn_maxvit_coco() -> cfg.ExperimentConfig:
"""COCO object detection with MaxViT and Cascade R-CNN."""
steps_per_epoch = 1848 # based on 463 steps @ bs=256
train_batch_size = 256
coco_val_samples = 5000
eval_batch_size = 64
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=MaskRCNNTask(
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=MaskRCNN(
anchor=Anchor(num_scales=3, anchor_size=3.0),
backbone=backbones.Backbone(
type='maxvit',
maxvit=backbones.MaxViT(model_name='maxvit-base')
),
decoder=decoders.Decoder(type='fpn', fpn=decoders.FPN()),
num_classes=91,
input_size=[640, 640, 3],
include_mask=True,
roi_sampler=ROISampler(
cascade_iou_thresholds=[0.7], foreground_iou_threshold=0.6),
detection_head=DetectionHead(
cascade_class_ensemble=True, class_agnostic_bbox_pred=True),
norm_activation=common.NormActivation(
use_sync_bn=True,
activation='relu',
norm_epsilon=0.001,
norm_momentum=0.99),
min_level=3,
max_level=7,
),
losses=Losses(l2_weight_decay=0.0),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.1, aug_scale_max=2.5)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size,
drop_remainder=True)),
trainer=cfg.TrainerConfig(
train_steps=90000,
validation_steps=coco_val_samples // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
best_checkpoint_export_subdir='best_ckpt',
best_checkpoint_eval_metric='AP',
checkpoint_interval=steps_per_epoch * 4,
optimizer_config=optimization_config.OptimizationConfig({
'ema': {
'average_decay': 0.9998,
'trainable_weights_only': False,
},
'optimizer': {
'type': 'adamw',
'adamw': {
'weight_decay_rate': 0.0001,
'beta_1': 0.9,
'beta_2': 0.999,
'include_in_weight_decay': r'.*(kernel|weight):0$',
},
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'decay_steps': 90000,
'initial_learning_rate': 0.0001,
'alpha': 0.03,
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 6000,
'warmup_learning_rate': 0.,
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
| 5,017 | 36.729323 | 80 | py |
models | models-master/official/projects/maxvit/configs/retinanet_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for retinanet."""
# pylint: disable=unused-import
from absl.testing import parameterized
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.maxvit.configs import retinanet
from official.vision.configs import retinanet as exp_cfg
class RetinaNetConfigTest(tf.test.TestCase, parameterized.TestCase):
def test_retinanet_configs(self):
config = exp_factory.get_exp_config('retinanet_maxvit_coco')
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, exp_cfg.RetinaNetTask)
self.assertIsInstance(config.task.model, exp_cfg.RetinaNet)
self.assertIsInstance(
config.task.model.backbone.maxvit, retinanet.backbones.MaxViT
)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.validate()
config.task.train_data.is_training = None
with self.assertRaisesRegex(KeyError, 'Found inconsistency between key'):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 1,678 | 36.311111 | 77 | py |
models | models-master/official/projects/maxvit/configs/retinanet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RetinaNet configuration definition."""
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.maxvit.configs import backbones
from official.vision.configs import retinanet
@exp_factory.register_config_factory('retinanet_maxvit_coco')
def retinanet_maxvit_coco() -> cfg.ExperimentConfig:
"""COCO object detection with RetinaNet using MaxViT backbone."""
config = retinanet.retinanet_resnetfpn_coco()
config.task.model.backbone = backbones.Backbone(
type='maxvit', maxvit=backbones.MaxViT(
model_name='maxvit-base',
window_size=20,
grid_size=20,
scale_ratio='20/7',
survival_prob=0.7,
)
)
config.task.validation_data.global_batch_size = 32
config.trainer.validation_steps = 156
config.trainer.validation_interval = 1560
return config
| 1,483 | 36.1 | 74 | py |
models | models-master/official/projects/maxvit/configs/semantic_segmentation_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import tensorflow as tf
# pylint: disable=unused-import
from official import vision
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.maxvit.configs import semantic_segmentation as exp_cfg
class ImageSegmentationConfigTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(('maxvit_seg_pascal',),
('maxvit_seg_coco',))
def test_semantic_segmentation_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, exp_cfg.SemanticSegmentationTask)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.validate()
config.task.train_data.is_training = None
with self.assertRaises(KeyError):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 1,573 | 35.604651 | 77 | py |
models | models-master/official/projects/maxvit/configs/semantic_segmentation.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Semantic segmentation configuration definition."""
import os
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.projects.maxvit.configs import backbones
from official.vision.configs import common
from official.vision.configs import decoders
from official.vision.configs import semantic_segmentation
DataConfig = semantic_segmentation.DataConfig
Losses = semantic_segmentation.Losses
Evaluation = semantic_segmentation.Evaluation
SegmentationHead = semantic_segmentation.SegmentationHead
SemanticSegmentationModel = semantic_segmentation.SemanticSegmentationModel
SemanticSegmentationTask = semantic_segmentation.SemanticSegmentationTask
# PASCAL VOC 2012 Dataset
PASCAL_TRAIN_EXAMPLES = 10582
PASCAL_VAL_EXAMPLES = 1449
PASCAL_INPUT_PATH_BASE = 'gs://**/pascal_voc_seg'
@exp_factory.register_config_factory('maxvit_seg_pascal')
def maxvit_seg_pascal() -> cfg.ExperimentConfig:
"""Image segmentation on Pascal VOC with MaxViT."""
train_batch_size = 32
eval_batch_size = 32
steps_per_epoch = PASCAL_TRAIN_EXAMPLES // train_batch_size
config = cfg.ExperimentConfig(
task=SemanticSegmentationTask(
model=SemanticSegmentationModel(
num_classes=21,
input_size=[512, 512, 3],
min_level=3,
max_level=7,
backbone=backbones.Backbone(
type='maxvit',
maxvit=backbones.MaxViT(
model_name='maxvit-tiny',
window_size=16,
grid_size=16,
scale_ratio='16/7',
),
),
decoder=decoders.Decoder(type='fpn', fpn=decoders.FPN()),
head=SegmentationHead(level=3, num_convs=3),
norm_activation=common.NormActivation(
use_sync_bn=True,
activation='relu',
norm_epsilon=0.001,
norm_momentum=0.99,
),
),
losses=Losses(l2_weight_decay=1e-5, top_k_percent_pixels=1.0),
train_data=DataConfig(
input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'train_aug*'),
output_size=[512, 512],
is_training=True,
global_batch_size=train_batch_size,
aug_rand_hflip=True,
aug_scale_min=0.2,
aug_scale_max=1.5,
),
validation_data=DataConfig(
input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'val*'),
output_size=[512, 512],
is_training=True,
global_batch_size=eval_batch_size,
resize_eval_groundtruth=True,
groundtruth_padded_size=[512, 512],
drop_remainder=True,
),
),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=20000,
validation_steps=PASCAL_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'ema': {
'average_decay': 0.9998,
'trainable_weights_only': False,
},
'optimizer': {
'type': 'adamw',
'adamw': {
'beta_1': 0.9,
'beta_2': 0.999,
'weight_decay_rate': 0.0001,
'include_in_weight_decay': r'.*(kernel|weight):0$',
},
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate': 0.0005,
'decay_steps': 20000,
'alpha': 0.03,
},
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 500,
'warmup_learning_rate': 0,
},
},
}),
),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
],
)
return config
# COCO segmentation.
COCO_TRAIN_EXAMPLES = 25600
COCO_VAL_EXAMPLES = 5000
COCO_INPUT_PATH_BASE = 'mscoco'
@exp_factory.register_config_factory('maxvit_seg_coco')
def maxvit_seg_coco() -> cfg.ExperimentConfig:
"""Image segmentation on COCO with MaxViT."""
train_batch_size = 32
eval_batch_size = 32
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
config = cfg.ExperimentConfig(
task=SemanticSegmentationTask(
model=SemanticSegmentationModel(
num_classes=91,
input_size=[640, 640, 3],
backbone=backbones.Backbone(
type='maxvit',
maxvit=backbones.MaxViT(
model_name='maxvit-tiny',
window_size=20,
grid_size=20,
scale_ratio='20/7',
),
),
decoder=decoders.Decoder(type='fpn', fpn=decoders.FPN()),
head=SegmentationHead(level=3, num_convs=3),
norm_activation=common.NormActivation(
use_sync_bn=True,
activation='relu',
norm_epsilon=0.001,
norm_momentum=0.99,
),
),
losses=Losses(l2_weight_decay=1e-5, top_k_percent_pixels=1.0),
train_data=DataConfig(
input_path=os.path.join(
COCO_INPUT_PATH_BASE,
'mscoco_alltasks_trainvalminusminival2014*',
),
output_size=[640, 640],
is_training=True,
global_batch_size=train_batch_size,
aug_rand_hflip=True,
aug_scale_min=0.2,
aug_scale_max=2.0,
),
validation_data=DataConfig(
input_path=os.path.join(
COCO_INPUT_PATH_BASE, 'mscoco_alltasks_minival2014*'
),
output_size=[640, 640],
is_training=True,
global_batch_size=eval_batch_size,
resize_eval_groundtruth=True,
groundtruth_padded_size=[640, 640],
drop_remainder=True,
),
),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=64000,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'ema': {
'average_decay': 0.9998,
'trainable_weights_only': False,
},
'optimizer': {
'type': 'adamw',
'adamw': {
'beta_1': 0.9,
'beta_2': 0.999,
'weight_decay_rate': 0.00001,
'include_in_weight_decay': r'.*(kernel|weight):0$',
},
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate': 0.00005,
'decay_steps': 64000,
'alpha': 0.03,
},
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 1600,
'warmup_learning_rate': 0,
},
},
}),
),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
],
)
return config
| 8,657 | 34.62963 | 76 | py |
models | models-master/official/projects/maxvit/configs/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Configs package definition."""
from official.projects.maxvit.configs import backbones # pylint:disable=unused-import
from official.projects.maxvit.configs import rcnn # pylint:disable=unused-import
from official.projects.maxvit.configs import retinanet # pylint:disable=unused-import
from official.projects.maxvit.configs import semantic_segmentation # pylint:disable=unused-import
from official.projects.maxvit.configs import image_classification # pylint:disable=unused-import
| 1,116 | 47.565217 | 98 | py |
models | models-master/official/projects/maxvit/configs/backbones.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CoAtNet Image classification configuration definition."""
import dataclasses
from typing import Optional, Tuple
import tensorflow as tf
from official.modeling import hyperparams
from official.vision.configs import backbones
@dataclasses.dataclass
class MaxViT(hyperparams.Config):
"""MaxViT config."""
model_name: str = 'maxvit-tiny'
# These configs are specified according to `model_name` in default.
# Set values will override the default configs.
stem_hsize: Optional[Tuple[int, ...]] = None
block_type: Optional[Tuple[str, ...]] = None
num_blocks: Optional[Tuple[int, ...]] = None
hidden_size: Optional[Tuple[int, ...]] = None
# specific to the multi-axis attention in MaxViT
# Note that the window_size and grid_size should be divisible by all the
# feature map sizes along the entire network. Say, if you train on ImageNet
# classification at 224x224, set both to 7 is almost the only choice.
# If you train on COCO object detection at 896x896, set it to 28 is suggested,
# as following Swin Transformer, window size should scales with feature size.
# You may as well set it as 14 or 7.
window_size: int = 7 # window size for conducting block attention module.
grid_size: int = 7 # grid size for conducting sparse global grid attention.
# tfm specific
head_size: int = 32
dropatt: Optional[float] = None
dropout: Optional[float] = None
rel_attn_type: str = '2d_multi_head'
num_heads: Optional[int] = None
# A string of `current_window_size/ckpt_window_size` for finetuning from a
# checkpoint trained with `ckpt_window_size`.
scale_ratio: Optional[str] = None
ln_epsilon: float = 1e-5
ln_dtype: Optional[tf.DType] = None
# conv specific
downsample_loc: str = 'depth_conv'
kernel_size: int = 3
se_ratio: float = 0.25
dropcnn: Optional[float] = None
# Only channels_last is supported for now.
data_format: str = 'channels_last'
norm_type: str = 'sync_batch_norm'
# shared
add_pos_enc: bool = False
pool_type: str = '2d:avg'
pool_stride: int = 2
expansion_rate: int = 4
# Stochastic depth keep probability for the residual connection in. Smaller
# value means stronger regularization. If using anneal, it decays linearly
# from 1.0 to this value with the depth of each layer."
survival_prob: Optional[float] = None # from [0, 1]
survival_prob_anneal: bool = True
kernel_initializer: str = 'glorot_uniform'
bias_initializer: str = 'zeros'
# For cls head, should be same as the last `hidden_size` of backbone.
representation_size: Optional[int] = None
# Only effective when representation_size > 0.
add_gap_layer_norm: bool = True
@dataclasses.dataclass
class Backbone(backbones.Backbone):
"""Configuration for backbones."""
type: Optional[str] = 'maxvit'
maxvit: MaxViT = dataclasses.field(default_factory=MaxViT)
| 3,456 | 35.389474 | 80 | py |
models | models-master/official/projects/maxvit/configs/image_classification_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.maxvit.configs import image_classification # pylint:disable=unused-import
from official.vision.configs import image_classification as img_cls_config
class MaxViTImageClassificationConfigTest(tf.test.TestCase):
def test_maxvit_build_model(self):
config = exp_factory.get_exp_config('maxvit_imagenet')
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(
config.task, img_cls_config.ImageClassificationTask
)
self.assertIsInstance(
config.task.model, img_cls_config.ImageClassificationModel
)
self.assertIsInstance(
config.task.train_data, img_cls_config.DataConfig
)
config.validate()
config.task.train_data.is_training = None
with self.assertRaises(KeyError):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 1,574 | 33.23913 | 97 | py |
models | models-master/official/projects/maxvit/configs/image_classification.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MaxViT Image classification configuration definition."""
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling.optimization.configs import optimization_config
from official.projects.maxvit.configs import backbones
from official.vision.configs import image_classification as img_cls_cfg
@exp_factory.register_config_factory('maxvit_imagenet')
def maxvit_imagenet() -> cfg.ExperimentConfig:
"""Returns MaxViT-Tiny on imagenet-1k.
Expected to be trained on DF 4x4 or bigger. Can eval on DF 4x2.
Returns:
The full experiment config.
"""
# Reuse ViT deit pretraining config.
exp = img_cls_cfg.image_classification_imagenet_deit_pretrain()
exp.task.model = img_cls_cfg.ImageClassificationModel(
num_classes=1001,
input_size=[224, 224, 3],
kernel_initializer='glorot_uniform',
backbone=backbones.Backbone(
type='maxvit',
maxvit=backbones.MaxViT(
model_name='maxvit-tiny', representation_size=768
),
),
norm_activation=img_cls_cfg.common.NormActivation(activation='relu'),
)
exp.task.train_data.aug_type.randaug.num_layers = 2
exp.task.train_data.aug_type.randaug.magnitude = 15
exp.runtime.mixed_precision_dtype = 'bfloat16'
exp.trainer.optimizer_config.optimizer.adamw.gradient_clip_norm = 0.0
exp.trainer.optimizer_config.warmup.linear.warmup_steps = 10000
exp.trainer.optimizer_config.ema = optimization_config.opt_cfg.EMAConfig(
average_decay=0.9999,
trainable_weights_only=False,
)
return exp
| 2,197 | 36.254237 | 75 | py |
models | models-master/official/projects/maxvit/configs/rcnn_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.maxvit.configs import rcnn as exp_cfg
class MaskRCNNConfigTest(tf.test.TestCase):
def test_maskrcnn_configs(self):
config = exp_factory.get_exp_config('rcnn_maxvit_coco')
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, exp_cfg.MaskRCNNTask)
self.assertIsInstance(config.task.model, exp_cfg.MaskRCNN)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.validate()
config.task.train_data.is_training = None
with self.assertRaisesRegex(KeyError, 'Found inconsistency between key'):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 1,393 | 35.684211 | 77 | py |
models | models-master/official/projects/maxvit/modeling/maxvit_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for MaxViT."""
import collections
from typing import Optional, Sequence
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.maxvit.configs import backbones
from official.projects.maxvit.modeling import maxvit
from official.vision.configs import common
class MaxViTBlockTest(tf.test.TestCase):
"""Test the layers of MaxViT."""
def testMaxViTBlockCreation(self) -> None:
"""Ensures that layers can be constructed and forward-props can run."""
inputs_shape = [2, 64, 64, 3]
inp = tf.random.uniform(
shape=inputs_shape, minval=-1.0, maxval=1.0, dtype=tf.float32
)
model = maxvit.MaxViTBlock(
hidden_size=8, head_size=4, window_size=4, grid_size=4
)
out = model(inp, training=False)
self.assertAllEqual([2, 64, 64, 8], out.get_shape().as_list())
self.assertDTypeEqual(tf.reduce_mean(out).numpy(), np.float32)
class MaxViTTest(tf.test.TestCase, parameterized.TestCase):
"""Test the layers of MaxViT."""
@parameterized.named_parameters(
collections.OrderedDict(
testcase_name='MaxViTTest',
input_shape=[2, 64, 64, 3],
input_dtype=tf.float32,
training=False,
stem_hsize=[12, 12],
num_blocks=[2, 2, 2, 2],
window_size=2,
grid_size=2,
block_type=['maxvit', 'maxvit', 'maxvit'],
hidden_size=[16, 32, 64],
expected_shape=[2, 4, 4, 64],
name='maxvit_test',
),
collections.OrderedDict(
testcase_name='MaxViTTiny',
input_shape=[2, 64, 64, 3],
input_dtype=tf.float32,
training=False,
block_type=['maxvit', 'maxvit', 'maxvit', 'maxvit'],
stem_hsize=[64, 64],
num_blocks=[2, 3, 5, 2],
window_size=2,
grid_size=2,
hidden_size=[96, 192, 384, 768],
expected_shape=[2, 2, 2, 768],
name='maxvit_tiny',
),
collections.OrderedDict(
testcase_name='MaxViTTinyWithPrelogits',
input_shape=[2, 64, 64, 3],
input_dtype=tf.float32,
training=False,
representation_size=16,
add_gap_layer_norm=True,
block_type=['maxvit', 'maxvit', 'maxvit', 'maxvit'],
stem_hsize=[64, 64],
num_blocks=[2, 3, 5, 2],
window_size=2,
grid_size=2,
hidden_size=[96, 192, 384, 768],
expected_shape=[2, 2, 2, 768],
name='maxvit_tiny',
),
)
def testForward(
self,
input_shape: Sequence[int],
input_dtype: Optional[tf.DType] = tf.float32,
**kwargs
) -> None:
"""Ensures that layers can be constructed and forward-props can run."""
inp = tf.random.uniform(
input_shape,
minval=-1.0,
maxval=1.0,
dtype=input_dtype,
)
model = maxvit.MaxViT(**kwargs)
out = model(inp, training=kwargs.get('training', None))
add_gap_layer_norm = kwargs.get('add_gap_layer_norm', False)
if add_gap_layer_norm:
self.assertAllEqual([input_shape[0], kwargs['representation_size']],
out['pre_logits'].get_shape().as_list())
# Remove `pre_logits` if exists.
out.pop('pre_logits', None)
out = out[max(out.keys())]
self.assertAllEqual(kwargs['expected_shape'], out.get_shape().as_list())
self.assertDTypeEqual(tf.reduce_mean(out).numpy(), np.float32)
def testBuildMaxViTWithConfig(self):
backbone_config = backbones.Backbone(
type='maxvit',
maxvit=backbones.MaxViT(
stem_hsize=[32, 32],
num_blocks=[2, 3, 5, 2],
window_size=2,
grid_size=2,
hidden_size=[32, 32, 32, 32],
),
)
backbone = maxvit.build_maxvit(
input_specs=tf.keras.layers.InputSpec(shape=[None] + [64, 64, 3]),
backbone_config=backbone_config,
norm_activation_config=common.NormActivation(),
)
self.assertSetEqual(
set(['2', '3', '4', '5']), set(backbone.output_specs.keys())
)
if __name__ == '__main__':
tf.test.main()
| 4,743 | 30.838926 | 76 | py |
models | models-master/official/projects/maxvit/modeling/layers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers and Model class for MaxViT."""
import functools
import string
from typing import Any, Callable, Optional, Tuple, Union
from absl import logging
import tensorflow as tf
from official.projects.maxvit.modeling import common_ops
class TrailDense(tf.keras.layers.Layer):
"""Dense module that projects multiple trailing dimensions."""
def __init__(
self,
output_trailing_dims: Union[int, Tuple[int, ...]],
begin_axis: int = -1,
use_bias: bool = True,
kernel_initializer: Optional[str] = 'glorot_uniform',
bias_initializer: Optional[str] = 'zeros',
name: str = 'dense',
):
super().__init__(name=name)
if isinstance(output_trailing_dims, int):
self._output_trailing_dims = [output_trailing_dims]
else:
assert isinstance(output_trailing_dims, (list, tuple)) and all(
isinstance(i, int) for i in output_trailing_dims
), f'Invalid output shape: {output_trailing_dims}.'
self._output_trailing_dims = list(output_trailing_dims)
self.begin_axis = begin_axis
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
def build(self, input_shape: tf.TensorShape) -> None:
"""Create variables and einsum expression based on input shape."""
# Create variables
weight_shape = input_shape[self.begin_axis :] + self._output_trailing_dims
self.weight = self.add_weight(
name='weight',
shape=weight_shape,
initializer=self.kernel_initializer,
trainable=True,
)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=self._output_trailing_dims,
initializer=self.bias_initializer,
trainable=True,
)
# Create einsum expression
input_rank = input_shape.rank
shared_size = self.begin_axis % input_rank
i_only_size = input_rank - shared_size
o_only_size = len(self._output_trailing_dims)
assert input_rank + o_only_size < len(
string.ascii_uppercase
), 'Cannot use einsum as input rank + output rank > 26.'
einsum_str = string.ascii_uppercase[: input_rank + o_only_size]
offset = 0
shared_str = einsum_str[offset : offset + shared_size]
offset += shared_size
i_only_str = einsum_str[offset : offset + i_only_size]
offset += i_only_size
o_only_str = einsum_str[offset : offset + o_only_size]
input_str = f'{shared_str}{i_only_str}'
output_str = f'{shared_str}{o_only_str}'
weight_str = f'{i_only_str}{o_only_str}'
# Examples
# - For 4D tensors in conv, a common expr would be 'ABCD,DE->ABCE'.
# - For `q/k/v` head projection in multi-head attention with two output
# trailing dims, the expr is 'ABC,CDE->ABDE'
# - For `o` output projection in multi-head attention with begin_axis = -2,
# the expr is 'ABCD,CDE->ABE'
self.einsum_expr = f'{input_str},{weight_str}->{output_str}'
def call(self, inputs: tf.Tensor) -> tf.Tensor:
output = tf.einsum(self.einsum_expr, inputs, self.weight)
if self.use_bias:
output += self.bias
return output
class Attention(tf.keras.layers.Layer):
"""Multi-headed attention module."""
def __init__(
self,
hidden_size: int,
head_size: int,
input_origin_height: int = 1,
input_origin_width: int = 1,
num_heads: Optional[int] = None,
dropatt: float = 0.0,
attn_axis: int = 0,
rel_attn_type: Optional[str] = None,
scale_ratio: Optional[float] = None,
kernel_initializer: Optional[str] = 'glorot_uniform',
bias_initializer: Optional[str] = 'zeros',
name: str = 'attention',
):
super().__init__(name=name)
self.hidden_size = hidden_size
self.head_size = head_size
self.input_origin_height = input_origin_height
self.input_origin_width = input_origin_width
self.num_heads = num_heads or hidden_size // head_size
self.dropatt = dropatt
self.attn_axis = attn_axis
self.rel_attn_type = rel_attn_type
self.scale_ratio = scale_ratio
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self._q_proj = TrailDense(
output_trailing_dims=(self.num_heads, self.head_size),
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name='q',
)
self._k_proj = TrailDense(
output_trailing_dims=(self.num_heads, self.head_size),
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name='k',
)
self._v_proj = TrailDense(
output_trailing_dims=(self.num_heads, self.head_size),
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name='v',
)
self._o_proj = TrailDense(
output_trailing_dims=self.hidden_size,
begin_axis=-2,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name='o',
)
self.q_scale = self.head_size**-0.5
self.relative_bias = None
def build(self, query_shape: Any) -> None:
##### Content attention
# Einsum expression:
# B = batch_size
# N = num_heads
# K = head_size
# S = query_len (of the given attn_axis)
# T = key/value_len (of the given attn_axis)
# [U-Z] = length of other attension axes
# Example for 5D query_heads, (e.g. images [B x H x W x N x K])
# - when attn_axis = 0 (H axis):
# symbols = 'U' => num_attn_dims = 2
# q_expr = 'BSUNK' => 'S' is inserted, prefix = 'B', suffix = 'NK'
# k_expr = 'BTUNK' => 'T' is inserted, prefix = 'B', suffix = 'NK'
# v_expr = 'BTUNK' => 'T' is inserted, prefix = 'B', suffix = 'NK'
# a_expr = 'BUNST' => 'N x S x T' attention map
num_attn_dims = query_shape.rank - 2 # -2 to account for bsz, hidden size
assert num_attn_dims < 6, 'Only support at most 6 attention dims.'
symbols = ''.join([chr(ord('U') + i) for i in range(num_attn_dims - 1)])
insert = lambda s, i, c: s[:i] + c + s[i:]
create_expr = lambda s, prefix='B', suffix='NK': prefix + s + suffix
self.q_expr = create_expr(insert(symbols, self.attn_axis, 'S'))
self.k_expr = create_expr(insert(symbols, self.attn_axis, 'T'))
self.v_expr = create_expr(insert(symbols, self.attn_axis, 'T'))
self.a_expr = create_expr(symbols, suffix='NST')
##### Relative attention
if self.rel_attn_type in ['2d_multi_head', '2d_single_head']:
query_shape_list = query_shape.as_list()
if query_shape.rank == 4:
height, width = query_shape_list[1:3]
elif query_shape.rank == 3:
seq_len = query_shape_list[1]
height, width = common_ops.get_shape_from_length(
seq_len, self.input_origin_height, self.input_origin_width
)
if height * width != seq_len:
raise ValueError(
'Sequence length: %s violates input size: (%s, %s).'
% (seq_len, height, width)
)
else:
raise ValueError(
'Does not support relative attention for query shape: %s.'
% query_shape_list
)
if self.scale_ratio is not None:
scale_ratio = eval(self.scale_ratio) # pylint:disable=eval-used
vocab_height = 2 * int(height / scale_ratio) - 1
vocab_width = 2 * int(width / scale_ratio) - 1
else:
vocab_height = 2 * height - 1
vocab_width = 2 * width - 1
if self.rel_attn_type == '2d_multi_head':
rel_bias_shape = [self.num_heads, vocab_height, vocab_width]
elif self.rel_attn_type == '2d_single_head':
rel_bias_shape = [vocab_height, vocab_width]
else:
raise NotImplementedError(
f'rel_attn_type {self.rel_attn_type} not implemented yet.'
)
self._feat_height = height
self._feat_width = width
self.relative_bias = self.add_weight(
'relative_bias',
rel_bias_shape,
initializer=self.kernel_initializer,
trainable=True,
)
def call(
self,
query: tf.Tensor,
training: bool,
context: Optional[tf.Tensor] = None,
attn_mask: Optional[tf.Tensor] = None,
) -> tf.Tensor:
if context is None:
context = query
q_heads = self._q_proj(query)
k_heads = self._k_proj(context)
v_heads = self._v_proj(context)
q_heads *= self.q_scale
# attention
attn_logits = tf.einsum(
f'{self.q_expr},{self.k_expr}->{self.a_expr}', q_heads, k_heads
)
if self.relative_bias is not None:
if self.rel_attn_type == '2d_multi_head':
h_axis = 1
else:
h_axis = 0
if self.scale_ratio is not None:
src_shape = self.relative_bias.shape.as_list()
relative_bias = tf.expand_dims(self.relative_bias, axis=-1)
relative_bias = tf.image.resize(
relative_bias, [2 * self._feat_height - 1, 2 * self._feat_width - 1]
)
relative_bias = tf.cast(
tf.squeeze(relative_bias, axis=-1), self.compute_dtype
)
tgt_shape = relative_bias.shape.as_list()
logging.info(
'Bilinear resize relative position bias %s -> %s.',
src_shape,
tgt_shape,
)
else:
relative_bias = tf.cast(self.relative_bias, self.compute_dtype)
reindexed_bias = common_ops.reindex_2d_einsum_lookup(
relative_position_tensor=relative_bias,
height=self._feat_height,
width=self._feat_width,
max_relative_height=self._feat_height - 1,
max_relative_width=self._feat_width - 1,
h_axis=h_axis,
)
attn_logits += reindexed_bias
if attn_mask is not None:
# attn_mask: 1.0 means CAN attend, 0.0 means CANNOT attend
attn_logits += (1.0 - attn_mask) * attn_logits.dtype.min
attn_probs = common_ops.float32_softmax(attn_logits, axis=-1)
if self.dropatt:
attn_probs = tf.keras.layers.Dropout(self.dropatt, 'attn_prob_drop')(
attn_probs, training=training
)
attn_out = tf.einsum(
f'{self.a_expr},{self.v_expr}->{self.q_expr}', attn_probs, v_heads
)
output = self._o_proj(attn_out)
return output
class FFN(tf.keras.layers.Layer):
"""Positionwise feed-forward network."""
def __init__(
self,
hidden_size: int,
dropout: float = 0.0,
expansion_rate: int = 4,
activation: str = 'gelu',
kernel_initializer: Optional[str] = 'glorot_uniform',
bias_initializer: Optional[str] = 'zeros',
name: str = 'ffn',
):
super().__init__(name=name)
self.hidden_size = hidden_size
self.expansion_rate = expansion_rate
self.expanded_size = self.hidden_size * self.expansion_rate
self.dropout = dropout
self.activation = activation
self._expand_dense = TrailDense(
output_trailing_dims=self.expanded_size,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name='expand_dense',
)
self._shrink_dense = TrailDense(
output_trailing_dims=self.hidden_size,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name='shrink_dense',
)
self._activation_fn = common_ops.get_act_fn(self.activation)
def call(self, inputs: tf.Tensor, training: bool) -> tf.Tensor:
output = inputs
output = self._expand_dense(output)
output = self._activation_fn(output)
if self.dropout:
output = tf.keras.layers.Dropout(self.dropout, name='nonlinearity_drop')(
output, training=training
)
output = self._shrink_dense(output)
return output
class TransformerBlock(tf.keras.layers.Layer):
"""Transformer block = Attention + FFN."""
def __init__(
self,
hidden_size: int,
head_size: int,
input_origin_height: int = 1,
input_origin_width: int = 1,
num_heads: Optional[int] = None,
expansion_rate: int = 4,
activation: str = 'gelu',
pool_type: str = '2d:avg',
pool_stride: int = 1,
pool_query_only: bool = False,
dropatt: Optional[Union[float, tf.Tensor]] = None,
dropout: Optional[Union[float, tf.Tensor]] = None,
rel_attn_type: Optional[str] = None,
scale_ratio: Optional[str] = None,
survival_prob: Optional[Union[float, tf.Tensor]] = None,
ln_epsilon: float = 1e-5,
ln_dtype: Optional[tf.DType] = None,
kernel_initializer: Optional[str] = 'glorot_uniform',
bias_initializer: Optional[str] = 'zeros',
name: str = 'transformer',
) -> None:
super().__init__(name=name)
self._hidden_size = hidden_size
self._head_size = head_size
self._input_origin_height = input_origin_height
self._input_origin_width = input_origin_width
self._num_heads = num_heads
self._expansion_rate = expansion_rate
self._activation = activation
self._pool_type = pool_type
self._pool_stride = pool_stride
self._pool_query_only = pool_query_only
self._dropatt = dropatt
self._dropout = dropout
self._rel_attn_type = rel_attn_type
self._scale_ratio = scale_ratio
self._survival_prob = survival_prob
self._ln_epsilon = ln_epsilon
self._ln_dtype = ln_dtype
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
def build(self, input_shape: tf.TensorShape) -> None:
if len(input_shape.as_list()) == 4:
_, height, width, _ = input_shape.as_list()
elif len(input_shape.as_list()) == 3:
_, seq_len, _ = input_shape.as_list()
height, width = common_ops.get_shape_from_length(
seq_len, self._input_origin_height, self._input_origin_width
)
else:
raise ValueError(f'Unsupported input shape: {input_shape.as_list()}.')
self.height, self.width = height, width
input_size = input_shape.as_list()[-1]
if input_size != self._hidden_size:
self._shortcut_proj = TrailDense(
self._hidden_size,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
name='shortcut_proj',
)
else:
self._shortcut_proj = None
self._attn_layer_norm = tf.keras.layers.LayerNormalization(
axis=-1,
epsilon=self._ln_epsilon,
dtype=self._ln_dtype,
name='attn_layer_norm',
)
self._attention = Attention(
self._hidden_size,
self._head_size,
height // self._pool_stride,
width // self._pool_stride,
num_heads=self._num_heads,
dropatt=self._dropatt,
rel_attn_type=self._rel_attn_type,
scale_ratio=self._scale_ratio,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
)
self._ffn_layer_norm = tf.keras.layers.LayerNormalization(
axis=-1,
epsilon=self._ln_epsilon,
dtype=self._ln_dtype,
name='ffn_layer_norm',
)
self._ffn = FFN(
self._hidden_size,
dropout=self._dropout,
expansion_rate=self._expansion_rate,
activation=self._activation,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
)
def downsample(self, inputs: tf.Tensor, name: str) -> tf.Tensor:
output = inputs
if self._pool_stride > 1:
assert self._pool_type in [
'2d:avg',
'2d:max',
'1d:avg',
'1d:max',
], f'Invalid pool_type {self._pool_type}'
if self._pool_type.startswith('2d'):
output = common_ops.maybe_reshape_to_2d(output, height=self.height)
output = common_ops.pooling_2d(
output,
self._pool_type.split(':')[-1],
self._pool_stride,
padding='same',
data_format='channels_last',
name=name,
)
else:
output = common_ops.pooling_1d(
output,
self._pool_type.split(':')[-1],
self._pool_stride,
padding='same',
data_format='channels_last',
name=name,
)
return output
def shortcut_branch(self, shortcut: tf.Tensor) -> tf.Tensor:
shortcut = self.downsample(shortcut, 'shortcut_pool')
shortcut = common_ops.maybe_reshape_to_1d(shortcut)
if self._shortcut_proj:
shortcut = self._shortcut_proj(shortcut)
return shortcut
def attn_branch(
self,
inputs: tf.Tensor,
training: bool,
attn_mask: Optional[tf.Tensor] = None,
) -> tf.Tensor:
output = self._attn_layer_norm(inputs)
if self._pool_query_only:
query = self.downsample(output, 'query_pool')
query = common_ops.maybe_reshape_to_1d(query)
output = common_ops.maybe_reshape_to_1d(output)
output = self._attention(
query, training, context=output, attn_mask=attn_mask
)
else:
output = self.downsample(output, 'residual_pool')
output = common_ops.maybe_reshape_to_1d(output)
output = self._attention(output, training, attn_mask=attn_mask)
return output
def ffn_branch(self, inputs: tf.Tensor, training: bool) -> tf.Tensor:
output = self._ffn_layer_norm(inputs)
output = self._ffn(output, training)
return output
def call(
self,
inputs: tf.Tensor,
training: bool,
attn_mask: Optional[tf.Tensor] = None,
) -> tf.Tensor:
logging.info(
'Block %s input shape: %s, (%s).', self.name, inputs.shape, inputs.dtype
)
shortcut = self.shortcut_branch(inputs)
output = self.attn_branch(inputs, training, attn_mask)
if self._dropout:
output = tf.keras.layers.Dropout(self._dropout, name='after_attn_drop')(
output, training=training
)
output = common_ops.residual_add(
output, shortcut, self._survival_prob, training
)
shortcut = output
output = self.ffn_branch(output, training)
if self._dropout:
output = tf.keras.layers.Dropout(self._dropout, name='after_ffn_drop')(
output, training=training
)
output = common_ops.residual_add(
output, shortcut, self._survival_prob, training
)
return output
class SqueezeAndExcitation(tf.keras.layers.Layer):
"""Squeeze-and-excitation layer."""
def __init__(
self,
se_filters: int,
output_filters: int,
local_pooling: bool = False,
data_format: str = 'channels_last',
activation: str = 'swish',
kernel_initializer: Optional[str] = 'glorot_uniform',
bias_initializer: Optional[str] = 'zeros',
name: str = 'se',
):
super().__init__(name=name)
self._local_pooling = local_pooling
self._data_format = data_format
self._activation_fn = common_ops.get_act_fn(activation)
# Squeeze and Excitation layer.
self._se_reduce = tf.keras.layers.Conv2D(
se_filters,
kernel_size=[1, 1],
strides=[1, 1],
padding='same',
data_format=self._data_format,
use_bias=True,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name='reduce_conv2d',
)
self._se_expand = tf.keras.layers.Conv2D(
output_filters,
kernel_size=[1, 1],
strides=[1, 1],
padding='same',
data_format=self._data_format,
use_bias=True,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name='expand_conv2d',
)
def call(self, inputs: tf.Tensor) -> tf.Tensor:
h_axis, w_axis = [2, 3] if self._data_format == 'channels_first' else [1, 2]
if self._local_pooling:
se_tensor = tf.nn.avg_pool(
inputs,
ksize=[1, inputs.shape[h_axis], inputs.shape[w_axis], 1],
strides=[1, 1, 1, 1],
padding='VALID',
)
else:
se_tensor = tf.reduce_mean(inputs, [h_axis, w_axis], keepdims=True)
se_tensor = self._se_expand(self._activation_fn(self._se_reduce(se_tensor)))
return tf.sigmoid(se_tensor) * inputs
def _config_batch_norm(
norm_type: str,
ln_epsilon: float = 1e-6,
bn_momentum: float = 0.99,
bn_epsilon: float = 1e-6,
) -> Callable[..., Any]:
"""Defines the normalization class for MbConv based on `norm_type`."""
if norm_type == 'layer_norm':
return functools.partial(
tf.keras.layers.LayerNormalization, epsilon=ln_epsilon
)
elif norm_type == 'batch_norm':
return functools.partial(
tf.keras.layers.BatchNormalization,
momentum=bn_momentum,
epsilon=bn_epsilon,
)
elif norm_type == 'sync_batch_norm':
return functools.partial(
tf.keras.layers.BatchNormalization,
momentum=bn_momentum,
epsilon=bn_epsilon,
synchronized=True,
)
else:
raise ValueError(f'Unsupported norm_type {norm_type}.')
class MBConvBlock(tf.keras.layers.Layer):
"""Mobile Inverted Residual Bottleneck (https://arxiv.org/abs/1905.02244)."""
def __init__(
self,
hidden_size: int,
downsample_loc: str = 'depth_conv',
data_format: str = 'channels_last',
kernel_size: int = 3,
expansion_rate: int = 4,
se_ratio: float = 0.25,
activation: str = 'gelu',
pool_type: str = 'avg',
pool_stride: int = 1,
dropcnn: Optional[float] = None,
survival_prob: Optional[float] = None,
norm_type: str = 'sync_batch_norm',
bn_epsilon: float = 1e-3,
bn_momentum: float = 0.99,
kernel_initializer: Optional[str] = 'glorot_uniform',
bias_initializer: Optional[str] = 'zeros',
name: str = 'mbconv',
):
super().__init__(name=name)
self._hidden_size = hidden_size
self._downsample_loc = downsample_loc
self._data_format = data_format
self._kernel_size = kernel_size
self._expansion_rate = expansion_rate
self._se_ratio = se_ratio
self._activation = activation
self._pool_type = pool_type
self._pool_stride = pool_stride
self._dropcnn = dropcnn
self._survival_prob = survival_prob
self._norm_type = norm_type
self._bn_epsilon = bn_epsilon
self._bn_momentum = bn_momentum
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._activation_fn = common_ops.get_act_fn(self._activation)
def build(self, input_shape: tf.TensorShape) -> None:
"""Builds block according to the arguments."""
channel_axis = 3 if self._data_format == 'channels_last' else 1
input_size = input_shape[channel_axis]
inner_size = self._hidden_size * self._expansion_rate
norm_cls = _config_batch_norm(
self._norm_type,
bn_momentum=self._bn_momentum,
bn_epsilon=self._bn_epsilon,
)
# Shortcut projection.
if input_size != self._hidden_size:
self._shortcut_conv = tf.keras.layers.Conv2D(
filters=self._hidden_size,
kernel_size=1,
strides=1,
padding='same',
data_format=self._data_format,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
use_bias=True,
name='shortcut_conv',
)
else:
self._shortcut_conv = None
# Pre-Activation norm
self._pre_norm = norm_cls(name='pre_norm')
# Expansion phase. Called if not using fused convolutions and expansion
# phase is necessary.
if self._expansion_rate != 1:
self._expand_conv = tf.keras.layers.Conv2D(
filters=inner_size,
kernel_size=1,
strides=(
self._pool_stride if self._downsample_loc == 'expand_conv' else 1
),
kernel_initializer=self._kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False,
name='expand_conv',
)
self._expand_norm = norm_cls(name='expand_norm')
# Depth-wise convolution phase. Called if not using fused convolutions.
self._depthwise_conv = tf.keras.layers.DepthwiseConv2D(
kernel_size=self._kernel_size,
strides=(
self._pool_stride if self._downsample_loc == 'depth_conv' else 1
),
depthwise_initializer=self._kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False,
name='depthwise_conv',
)
self._depthwise_norm = norm_cls(name='depthwise_norm')
if self._se_ratio is not None and 0 < self._se_ratio <= 1:
se_filters = max(1, int(self._hidden_size * self._se_ratio))
self._se = SqueezeAndExcitation(
se_filters=se_filters,
output_filters=inner_size,
data_format=self._data_format,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
name='se',
)
else:
self._se = None
# Output phase.
self._shrink_conv = tf.keras.layers.Conv2D(
filters=self._hidden_size,
kernel_size=1,
strides=1,
padding='same',
data_format=self._data_format,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
use_bias=True,
name='shrink_conv',
)
def downsample(self, inputs: tf.Tensor, name: str) -> tf.Tensor:
output = inputs
if self._pool_stride > 1:
output = common_ops.pooling_2d(
output,
self._pool_type,
self._pool_stride,
padding='same',
data_format=self._data_format,
name=name,
)
return output
def shortcut_branch(self, shortcut: tf.Tensor) -> tf.Tensor:
shortcut = self.downsample(shortcut, name='shortcut_pool')
if self._shortcut_conv:
shortcut = self._shortcut_conv(shortcut)
return shortcut
def residual_branch(self, inputs: tf.Tensor, training: bool) -> tf.Tensor:
output = self._pre_norm(inputs, training=training)
if self._downsample_loc == 'inputs':
output = self.downsample(output, name='residual_pool')
if self._expansion_rate != 1:
output = self._expand_conv(output)
output = self._expand_norm(output, training=training)
output = self._activation_fn(output)
logging.debug('Expand shape: %s', output.shape)
output = self._depthwise_conv(output)
output = self._depthwise_norm(output, training=training)
output = self._activation_fn(output)
logging.debug('DConv shape: %s', output.shape)
if self._dropcnn:
output = tf.keras.layers.Dropout(self._dropcnn, 'after_dconv_drop')(
output, training=training
)
if self._se:
output = self._se(output)
self.endpoints = {'expansion_output': output}
output = self._shrink_conv(output)
logging.debug('Shrink shape: %s', output.shape)
return output
def call(
self,
inputs: tf.Tensor,
training: bool,
survival_prob: Optional[Union[float, tf.Tensor]] = None,
) -> tf.Tensor:
"""Implementation of call().
Args:
inputs: the inputs tensor.
training: boolean, whether the model is constructed for training.
survival_prob: float, between 0 to 1, drop connect rate.
Returns:
A output tensor.
"""
logging.debug(
'Block %s input shape: %s (%s)', self.name, inputs.shape, inputs.dtype
)
residual = self.residual_branch(inputs, training)
shortcut = self.shortcut_branch(inputs)
survival_prob = survival_prob or self._survival_prob
output = common_ops.residual_add(
residual, shortcut, survival_prob, training
)
return output
| 28,290 | 31.706358 | 80 | py |
models | models-master/official/projects/maxvit/modeling/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/maxvit/modeling/maxvit.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=logging-fstring-interpolation
r"""MaxViT layers and model class."""
import functools
from typing import Any, Mapping, Optional, Tuple, Union
from absl import logging
import tensorflow as tf
from official.projects.maxvit.modeling import common_ops as ops
from official.projects.maxvit.modeling import layers
from official.vision.modeling.backbones import factory
MAXVIT_SPECS = {
'maxvit-tiny-for-test': dict(
survival_prob=None,
stem_hsize=(8, 8),
block_type=('maxvit', 'maxvit', 'maxvit', 'maxvit'),
num_blocks=(2, 3, 3, 2),
hidden_size=(32, 32, 32, 768),
),
'maxvit-tiny': dict(
survival_prob=0.8,
stem_hsize=(64, 64),
block_type=('maxvit', 'maxvit', 'maxvit', 'maxvit'),
num_blocks=(2, 2, 5, 2),
hidden_size=(64, 128, 256, 512),
),
'maxvit-small': dict(
survival_prob=0.7,
stem_hsize=(64, 64),
block_type=('maxvit', 'maxvit', 'maxvit', 'maxvit'),
num_blocks=(2, 2, 5, 2),
hidden_size=(96, 192, 384, 768),
),
'maxvit-base': dict(
survival_prob=0.6,
stem_hsize=(64, 64),
block_type=('maxvit', 'maxvit', 'maxvit', 'maxvit'),
num_blocks=(2, 6, 14, 2),
hidden_size=(96, 192, 384, 768),
),
'maxvit-large': dict(
survival_prob=0.4,
stem_hsize=(128, 128),
block_type=('maxvit', 'maxvit', 'maxvit', 'maxvit'),
num_blocks=(2, 6, 14, 2),
hidden_size=(128, 256, 512, 1024),
),
'maxvit-xlarge': dict(
survival_prob=0.3,
stem_hsize=(192, 192),
block_type=('maxvit', 'maxvit', 'maxvit', 'maxvit'),
num_blocks=(2, 6, 14, 2),
hidden_size=(192, 384, 768, 1536),
),
}
class MaxViTBlock(tf.keras.layers.Layer):
"""MaxViT block = MBConv + Block-Attention + FFN + Grid-Attention + FFN."""
def __init__(
self,
hidden_size: int,
head_size: int,
window_size: int,
grid_size: int,
num_heads: Optional[int] = None,
downsample_loc: str = 'depth_conv',
data_format: str = 'channels_last',
kernel_size: int = 3,
expansion_rate: int = 4,
se_ratio: float = 0.25,
activation: str = 'gelu',
pool_type: str = '2d:avg',
pool_stride: int = 1,
dropcnn: Optional[float] = None,
dropatt: Optional[Union[float, tf.Tensor]] = None,
dropout: Optional[Union[float, tf.Tensor]] = None,
rel_attn_type: Optional[str] = None,
scale_ratio: Optional[str] = None,
survival_prob: Optional[Union[float, tf.Tensor]] = None,
ln_epsilon: float = 1e-5,
ln_dtype: Optional[tf.DType] = None,
norm_type: str = 'sync_batch_norm',
bn_epsilon: float = 1e-3,
bn_momentum: float = 0.99,
kernel_initializer: Optional[str] = 'glorot_uniform',
bias_initializer: Optional[str] = 'zeros',
name: str = 'maxvit_block',
) -> None:
super().__init__(name=name)
self._hidden_size = hidden_size
self._head_size = head_size
self._window_size = window_size
self._grid_size = grid_size
self._num_heads = num_heads
self._downsample_loc = downsample_loc
self._data_format = data_format
self._kernel_size = kernel_size
self._expansion_rate = expansion_rate
self._se_ratio = se_ratio
self._dropcnn = dropcnn
self._activation = activation
self._norm_type = norm_type
self._bn_epsilon = bn_epsilon
self._bn_momentum = bn_momentum
self._pool_type = pool_type
self._pool_stride = pool_stride
self._dropatt = dropatt
self._dropout = dropout
self._rel_attn_type = rel_attn_type
self._scale_ratio = scale_ratio
self._survival_prob = survival_prob
self._ln_epsilon = ln_epsilon
self._ln_dtype = ln_dtype
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
def build(self, input_shape: tf.TensorShape) -> None:
input_size = input_shape.as_list()[-1]
if input_size != self._hidden_size:
self._shortcut_proj = layers.TrailDense(
self._hidden_size,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
name='shortcut_proj',
)
else:
self._shortcut_proj = None
self._block_attn_layer_norm = tf.keras.layers.LayerNormalization(
axis=-1,
epsilon=self._ln_epsilon,
dtype=self._ln_dtype,
name='attn_layer_norm',
)
self._grid_attn_layer_norm = tf.keras.layers.LayerNormalization(
axis=-1,
epsilon=self._ln_epsilon,
dtype=self._ln_dtype,
name='attn_layer_norm_1',
)
self._block_attention = layers.Attention(
self._hidden_size,
self._head_size,
num_heads=self._num_heads,
dropatt=self._dropatt,
rel_attn_type=self._rel_attn_type,
scale_ratio=self._scale_ratio,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
name='attention',
)
self._grid_attention = layers.Attention(
self._hidden_size,
self._head_size,
num_heads=self._num_heads,
dropatt=self._dropatt,
rel_attn_type=self._rel_attn_type,
scale_ratio=self._scale_ratio,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
name='attention_1',
)
self._block_ffn_layer_norm = tf.keras.layers.LayerNormalization(
axis=-1,
epsilon=self._ln_epsilon,
dtype=self._ln_dtype,
name='ffn_layer_norm',
)
self._grid_ffn_layer_norm = tf.keras.layers.LayerNormalization(
axis=-1,
epsilon=self._ln_epsilon,
dtype=self._ln_dtype,
name='ffn_layer_norm_1',
)
self._block_ffn = layers.FFN(
self._hidden_size,
dropout=self._dropout,
expansion_rate=self._expansion_rate,
activation=self._activation,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
name='ffn',
)
self._grid_ffn = layers.FFN(
self._hidden_size,
dropout=self._dropout,
expansion_rate=self._expansion_rate,
activation=self._activation,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
name='ffn_1',
)
self._mbconv = layers.MBConvBlock(
self._hidden_size,
downsample_loc=self._downsample_loc,
data_format=self._data_format,
kernel_size=self._kernel_size,
expansion_rate=self._expansion_rate,
se_ratio=self._se_ratio,
activation=self._activation,
pool_type='avg' if self._pool_type == '2d:avg' else 'max',
pool_stride=self._pool_stride,
dropcnn=self._dropcnn,
survival_prob=self._survival_prob,
norm_type=self._norm_type,
bn_epsilon=self._bn_epsilon,
bn_momentum=self._bn_momentum,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
name='mbconv',
)
def downsample(self, inputs, name):
output = inputs
if self._pool_stride > 1:
output = ops.maybe_reshape_to_2d(output)
output = ops.pooling_2d(
output,
self._pool_type,
self._pool_stride,
padding='same',
data_format='channels_last',
name=name,
)
return output
def window_partition(self, features: tf.Tensor) -> tf.Tensor:
"""Partition the input feature maps into non-overlapping windows.
Note that unsuitable feature or window sizes may be costly on TPU due to
padding sizes:
https://docs.google.com/document/d/1GojE1Q7hR2qyi0mIfnTHgERfl7Dmsj6xPQ31MQo3xUk/edit#
Args:
features: [B, H, W, C] feature maps.
Returns:
Partitioned features: [B, nH, nW, wSize, wSize, c].
Raises:
ValueError: If the feature map sizes are not divisible by window sizes.
"""
_, h, w, c = features.shape
window_size = self._window_size
if h % window_size != 0 or w % window_size != 0:
raise ValueError(
f'Feature map sizes {(h, w)} '
f'not divisible by window size ({window_size}).'
)
features = tf.reshape(
features,
(-1, h // window_size, window_size, w // window_size, window_size, c),
)
features = tf.transpose(features, (0, 1, 3, 2, 4, 5))
features = tf.reshape(features, (-1, window_size, window_size, c))
return features
def window_stitch_back(
self, features: tf.Tensor, window_size: int, h: int, w: int
) -> tf.Tensor:
"""Reverse window_partition."""
features = tf.reshape(
features,
[
-1,
h // window_size,
w // window_size,
window_size,
window_size,
features.shape[-1],
],
)
return tf.reshape(
tf.transpose(features, (0, 1, 3, 2, 4, 5)),
[-1, h, w, features.shape[-1]],
)
def grid_partition(self, features: tf.Tensor) -> tf.Tensor:
"""Partition the input feature maps into non-overlapping windows.
Note that unsuitable feature or window sizes may be costly on TPU due to
padding sizes:
https://docs.google.com/document/d/1GojE1Q7hR2qyi0mIfnTHgERfl7Dmsj6xPQ31MQo3xUk/edit#
Args:
features: [B, H, W, C] feature maps.
Returns:
Partitioned features: [B, nH, nW, wSize, wSize, c].
Raises:
ValueError: If the feature map sizes are not divisible by window sizes.
"""
_, h, w, c = features.shape
grid_size = self._grid_size
if h % grid_size != 0 or w % grid_size != 0:
raise ValueError(
f'Feature map sizes {(h, w)} '
f'not divisible by window size ({grid_size}).'
)
features = tf.reshape(
features, (-1, grid_size, h // grid_size, grid_size, w // grid_size, c)
)
features = tf.transpose(features, (0, 2, 4, 1, 3, 5))
features = tf.reshape(features, (-1, grid_size, grid_size, c))
return features
def grid_stitch_back(
self, features: tf.Tensor, grid_size: int, h: int, w: int
) -> tf.Tensor:
"""Reverse window_partition."""
features = tf.reshape(
features,
[
-1,
h // grid_size,
w // grid_size,
grid_size,
grid_size,
features.shape[-1],
],
)
return tf.reshape(
tf.transpose(features, (0, 3, 1, 4, 2, 5)),
[-1, h, w, features.shape[-1]],
)
def block_attn_branch(
self, inputs: tf.Tensor, training: bool, attn_mask: tf.Tensor
) -> tf.Tensor:
output = self._block_attn_layer_norm(inputs)
# If put grid-attention in front, we don't need to downsample.
# Apply local block-attention
_, h, w, _ = output.shape
output = self.window_partition(output)
output = ops.maybe_reshape_to_1d(output)
output = self._block_attention(output, training, attn_mask=attn_mask)
output = self.window_stitch_back(output, self._window_size, h, w)
return output
def grid_attn_branch(
self, inputs: tf.Tensor, training: bool, attn_mask: tf.Tensor
) -> tf.Tensor:
output = self._grid_attn_layer_norm(inputs)
# output = self.downsample(output, 'residual_pool')
# Apply global grid
_, h, w, _ = output.shape
output = self.grid_partition(output)
output = ops.maybe_reshape_to_1d(output)
output = self._grid_attention(output, training, attn_mask=attn_mask)
output = self.grid_stitch_back(output, self._grid_size, h, w)
return output
def block_ffn_branch(self, inputs: tf.Tensor, training: bool) -> tf.Tensor:
output = self._block_ffn_layer_norm(inputs)
output = self._block_ffn(output, training)
return output
def grid_ffn_branch(self, inputs: tf.Tensor, training: bool) -> tf.Tensor:
output = self._grid_ffn_layer_norm(inputs)
output = self._grid_ffn(output, training)
return output
def mbconv_branch(self, inputs: tf.Tensor, training: bool) -> tf.Tensor:
output = self._mbconv(inputs, training=training)
return output
def call(
self,
inputs: tf.Tensor,
training: bool,
attn_mask: Optional[tf.Tensor] = None,
) -> tf.Tensor:
logging.debug(
'Block %s input shape: %s (%s)', self.name, inputs.shape, inputs.dtype
)
# MBConv
output = self.mbconv_branch(inputs, training)
# block self-attention
shortcut = output
output = self.block_attn_branch(output, training, attn_mask)
if self._dropout:
output = tf.keras.layers.Dropout(
self._dropout, name='after_block_attn_drop'
)(output, training=training)
output = ops.residual_add(output, shortcut, self._survival_prob, training)
shortcut = output
output = self.block_ffn_branch(output, training)
if self._dropout:
output = tf.keras.layers.Dropout(
self._dropout, name='after_block_ffn_drop_1'
)(output, training=training)
output = ops.residual_add(output, shortcut, self._survival_prob, training)
# grid self-attention
shortcut = output
output = self.grid_attn_branch(output, training, attn_mask)
if self._dropout:
output = tf.keras.layers.Dropout(
self._dropout, name='after_grid_attn_drop'
)(output, training=training)
output = ops.residual_add(output, shortcut, self._survival_prob, training)
shortcut = output
output = self.grid_ffn_branch(output, training)
if self._dropout:
output = tf.keras.layers.Dropout(
self._dropout, name='after_grid_ffn_drop'
)(output, training=training)
output = ops.residual_add(output, shortcut, self._survival_prob, training)
return output
class MaxViT(tf.keras.Model):
"""MaxViT's backbone that outputs the pre-global-pooled features."""
def __init__(
self,
block_type: Tuple[str, ...],
num_blocks: Tuple[int, ...],
hidden_size: Tuple[int, ...],
stem_hsize: Tuple[int, ...],
head_size: int = 32,
num_heads: Optional[int] = None,
dropatt: Optional[float] = None,
dropout: Optional[float] = None,
rel_attn_type: str = '2d_multi_head',
window_size: int = 7,
grid_size: int = 7,
scale_ratio: Optional[str] = None,
ln_epsilon: float = 1e-5,
ln_dtype: Optional[tf.DType] = None,
downsample_loc: str = 'depth_conv',
kernel_size: int = 3,
se_ratio: float = 0.25,
dropcnn: Optional[float] = None,
data_format: str = 'channels_last',
norm_type: str = 'sync_batch_norm',
bn_epsilon: float = 1e-3,
bn_momentum: float = 0.99,
add_pos_enc: bool = False,
pool_type: str = '2d:avg',
pool_stride: int = 2,
expansion_rate: int = 4,
activation: str = 'gelu',
survival_prob: Optional[float] = None,
survival_prob_anneal: bool = True,
representation_size: Optional[int] = None,
add_gap_layer_norm: bool = False,
kernel_initializer: Optional[str] = 'glorot_uniform',
bias_initializer: Optional[str] = 'zeros',
name: str = 'maxvit',
**kwargs,
):
"""Initializes MaxViT backbone.
Args:
block_type: a tuple of `str`, specify each block type.
num_blocks: a tuple of `int`, specify the number of blocks in each stage.
hidden_size: a tuple of `int`, specify hidden size of block in each stage.
stem_hsize: a tuple of `int`, specify the hidden size of stem network.
head_size: embedding size of each attention head.
num_heads: number of attention head.
dropatt: an optional float of attention dropout rate.
dropout: an optional float of dropping rate for dropout regularization.
rel_attn_type: =a `str` specify the type of relative attention head,
possible values are ['2d_multi_head', '2d_single_head'].
window_size: window size for conducting block attention module.
grid_size: grid size for conducting sparse global grid attention.
scale_ratio: a optional string for finetuning at different window size,
e.g. '14/7'.
ln_epsilon: layer normalization epsilon.
ln_dtype: layer normalization data type.
downsample_loc: location to conduct downsampleing to feature maps.
kernel_size: stem convoluation kernal size.
se_ratio: se ratio for `mbconv` block.
dropcnn: an optional float of CNN dropout rate.
data_format: image data format, usualy 'channels_last'.
norm_type: normalization type, one of ['batch_norm', 'sync_batch_norm',
'layer_norm'].
bn_epsilon: batch normalization epsilon.
bn_momentum: batch normalization momentum.
add_pos_enc: if add position embedding.
pool_type: pooling operation type, one of ['2d:avg', '2d:max', '1d:avg',
'1d:max'].
pool_stride: pooling stride size.
expansion_rate: expansion rate value.
activation: activate function.
survival_prob: survival probability.
survival_prob_anneal: if anneal survival probability.
representation_size: an optional `int` of representation size.
add_gap_layer_norm: if add layer norm to GAP of backbone final output.
kernel_initializer: kernel initializer.
bias_initializer: bias initializer.
name: specify module name.
**kwargs: extra keyword arguments to be passed.
"""
super().__init__(name=name)
self._block_type = block_type
self._num_blocks = num_blocks
self._hidden_size = hidden_size
self._stem_hsize = stem_hsize
self._head_size = head_size
self._num_heads = num_heads
self._dropatt = dropatt
self._dropout = dropout
self._rel_attn_type = rel_attn_type
self._window_size = window_size
self._grid_size = grid_size
self._scale_ratio = scale_ratio
self._ln_epsilon = ln_epsilon
self._ln_dtype = ln_dtype
self._downsample_loc = downsample_loc
self._kernel_size = kernel_size
self._se_ratio = se_ratio
self._dropcnn = dropcnn
self._data_format = data_format
self._norm_type = norm_type
self._bn_epsilon = bn_epsilon
self._bn_momentum = bn_momentum
self._add_pos_enc = add_pos_enc
self._pool_type = pool_type
self._pool_stride = pool_stride
self._expansion_rate = expansion_rate
self._activation = activation
self._survival_prob = survival_prob
self._survival_prob_anneal = survival_prob_anneal
self._representation_size = representation_size
self._add_gap_layer_norm = add_gap_layer_norm
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._output_specs = {}
def build(self, input_shape: tf.TensorShape) -> None:
if self._norm_type == 'layer_norm':
bn_class = functools.partial(
tf.keras.layers.LayerNormalization, epsilon=self._ln_epsilon
)
elif self._norm_type == 'batch_norm':
bn_class = functools.partial(
tf.keras.layers.BatchNormalization,
momentum=self._bn_momentum,
epsilon=self._bn_epsilon,
)
elif self._norm_type == 'sync_batch_norm':
bn_class = functools.partial(
tf.keras.layers.BatchNormalization,
momentum=self._bn_momentum,
epsilon=self._bn_epsilon,
synchronized=True,
)
else:
raise ValueError(f'Unsupported norm_type {self._norm_type}.')
_, self.height, self.width, _ = input_shape.as_list()
logging.info(
f'Build backbone with input size: ({self.height}, {self.width}).'
)
# Stem
stem_layers = []
for i, _ in enumerate(self._stem_hsize):
conv_layer = tf.keras.layers.Conv2D(
filters=self._stem_hsize[i],
kernel_size=self._kernel_size,
strides=2 if i == 0 else 1,
padding='same',
data_format=self._data_format,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
use_bias=True,
name='conv_{}'.format(i),
)
stem_layers.append(conv_layer)
if i < len(self._stem_hsize) - 1:
stem_layers.append(bn_class(name='norm_{}'.format(i)))
stem_layers.append(
tf.keras.layers.Activation(
ops.get_act_fn(self._activation), name=f'act_{i}'
)
)
self._stem = tf.keras.Sequential(layers=stem_layers, name='stem')
# Backbone
self._blocks = []
total_num_blocks = sum(self._num_blocks)
bid = 0
for i, _ in enumerate(self._block_type):
self._blocks.append([])
for j in range(self._num_blocks[i]):
# block name
block_name = f'block_{i:0>2d}_{j:0>2d}'
##### Update per-block config
# No pooling if not the first block in the stage
if j == 0:
pool_stride = self._pool_stride
else:
pool_stride = 1
# anneal the survival prob
survival_prob = self._survival_prob
if survival_prob and self._survival_prob_anneal:
drop_rate = 1.0 - survival_prob
survival_prob = 1.0 - drop_rate * bid / total_num_blocks
logging.info(
'[%02d/%02d] %s survival_prob: %.4f',
bid,
total_num_blocks,
block_name,
survival_prob,
)
##### Init block
if self._block_type[i] == 'tfm':
block = layers.TransformerBlock(
hidden_size=self._hidden_size[i],
head_size=self._head_size,
input_origin_height=self.height,
input_origin_width=self.width,
num_heads=self._num_heads,
expansion_rate=self._expansion_rate,
activation=self._activation,
pool_type=self._pool_type,
pool_stride=pool_stride,
dropatt=self._dropatt,
dropout=self._dropout,
rel_attn_type=self._rel_attn_type,
scale_ratio=self._scale_ratio,
survival_prob=survival_prob,
ln_epsilon=self._ln_epsilon,
ln_dtype=self._ln_dtype,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
name=block_name,
)
elif self._block_type[i] == 'mbconv':
assert self._pool_type in ['2d:max', '2d:avg'], (
'Invalid pool_type %s for MBConv block' % self._pool_type
)
pool_type = self._pool_type.split(':')[-1]
block = layers.MBConvBlock(
hidden_size=self._hidden_size[i],
downsample_loc=self._downsample_loc,
data_format=self._data_format,
kernel_size=self._kernel_size,
expansion_rate=self._expansion_rate,
se_ratio=self._se_ratio,
activation=self._activation,
pool_type=pool_type,
pool_stride=pool_stride,
dropcnn=self._dropcnn,
survival_prob=survival_prob,
norm_type=self._norm_type,
bn_epsilon=self._bn_epsilon,
bn_momentum=self._bn_momentum,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
name=block_name,
)
elif self._block_type[i] == 'maxvit':
block = MaxViTBlock(
hidden_size=self._hidden_size[i],
head_size=self._head_size,
window_size=self._window_size,
grid_size=self._grid_size,
num_heads=self._num_heads,
downsample_loc=self._downsample_loc,
data_format=self._data_format,
kernel_size=self._kernel_size,
expansion_rate=self._expansion_rate,
se_ratio=self._se_ratio,
activation=self._activation,
pool_type=self._pool_type,
pool_stride=pool_stride,
dropcnn=self._dropcnn,
dropatt=self._dropatt,
dropout=self._dropout,
rel_attn_type=self._rel_attn_type,
scale_ratio=self._scale_ratio,
survival_prob=survival_prob,
ln_epsilon=self._ln_epsilon,
ln_dtype=self._ln_dtype,
norm_type=self._norm_type,
bn_epsilon=self._bn_epsilon,
bn_momentum=self._bn_momentum,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
name=block_name,
)
else:
raise ValueError(f'Unsupported block_type {self._block_type[i]}')
self._blocks[-1].append(block)
bid += 1
if self._representation_size and self._representation_size > 0:
self._dense = tf.keras.layers.Dense(
self._representation_size, name='pre_logits')
if self._add_gap_layer_norm:
self._final_layer_norm = tf.keras.layers.LayerNormalization(
epsilon=self._ln_epsilon, name='final_layer_norm')
def _add_absolute_position_encoding(self, inputs: tf.Tensor) -> tf.Tensor:
"""Add absolute sinusoid position encoding, which is computed on the fly."""
output = ops.maybe_reshape_to_2d(inputs)
h, w = tf.shape(output)[1], tf.shape(output)[2]
enc_size = output.shape.as_list()[-1] // 2
# sinusoid positional encoding that can be generated online
h_seq = tf.range(-h / 2, h / 2)
w_seq = tf.range(-w / 2, w / 2)
pos_enc_h = ops.absolute_position_encoding(
h_seq, enc_size, dtype=output.dtype
)
pos_enc_w = ops.absolute_position_encoding(
w_seq, enc_size, dtype=output.dtype
)
abs_pos_enc = tf.concat(
[
tf.tile(pos_enc_h[:, None, :], [1, w, 1]),
tf.tile(pos_enc_w[None, :, :], [h, 1, 1]),
],
axis=-1,
)
output += abs_pos_enc
if inputs.shape.rank == 3:
output = ops.maybe_reshape_to_1d(output)
return output
def call(
self, inputs: tf.Tensor, mask: Optional[Any] = None, training: bool = None
) -> Mapping[str, tf.Tensor]:
logging.info(
'MaxViT inputs: shape %s, dtype %s.', inputs.shape, inputs.dtype
)
output = self._stem(inputs, training=training)
logging.info(
'Stage 0 (stem) output: shape %s, dtype %s.', output.shape, output.dtype
)
endpoints = {}
add_pos_enc = self._add_pos_enc
for idx, stage_blocks in enumerate(self._blocks):
# Add position encoding
# Note: the position encoding is usually added to the input of the first
# transformer block. For MaxViT, it is the first block of stage 3.
if (isinstance(add_pos_enc, (tuple, list)) and add_pos_enc[idx]) or (
isinstance(add_pos_enc, bool) and add_pos_enc
):
logging.info('Add position encoding at stage %d.', idx + 1)
output = self._add_absolute_position_encoding(output)
# Blocks forward
for block in stage_blocks:
output = block(output, training=training)
if self._block_type[idx] == 'tfm':
height, width = ops.get_shape_from_length(
output.shape[1], self.height, self.width
)
output = tf.reshape(output, [-1, height, width, output.shape[-1]])
endpoints[str(idx + 2)] = output
logging.info(
'Stage %d output: feature level %s shape %s, dtype %s.',
idx + 1,
idx + 2,
output.shape,
output.dtype,
)
self._output_specs = {
idx: endpoint.get_shape() for idx, endpoint in endpoints.items()
}
if self._representation_size and self._representation_size > 0:
# Backbone's output is [batch_size, height, weight, channel_size].
output = tf.keras.layers.GlobalAveragePooling2D()(output)
# Maybe add a layer_norm after global average pooling.
if self._add_gap_layer_norm:
output = self._final_layer_norm(output)
endpoints['pre_logits'] = tf.nn.tanh(self._dense(output))
return endpoints
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
def override_predefined_spec_and_build_maxvit(
predefined_maxvit_spec, backbone_cfg, norm_activation_config
):
"""Builds a MaxViT backbone.
Args:
predefined_maxvit_spec: a dict predefined maxvit specifications.
backbone_cfg: the MaxViT backbone config.
norm_activation_config: normalization and activation config.
Returns:
The built MaxViT backbone.
"""
survival_prob = (
predefined_maxvit_spec['survival_prob']
if backbone_cfg.survival_prob is None
else backbone_cfg.survival_prob
)
stem_hsize = (
predefined_maxvit_spec['stem_hsize']
if backbone_cfg.stem_hsize is None
else backbone_cfg.stem_hsize
)
block_type = (
predefined_maxvit_spec['block_type']
if backbone_cfg.block_type is None
else backbone_cfg.block_type
)
num_blocks = (
predefined_maxvit_spec['num_blocks']
if backbone_cfg.num_blocks is None
else backbone_cfg.num_blocks
)
hidden_size = (
predefined_maxvit_spec['hidden_size']
if backbone_cfg.hidden_size is None
else backbone_cfg.hidden_size
)
logging.info(
(
'Final MaxViT specs: survival_prob=%s, stem_hsize=%s, hidden_size=%s,'
'block_type=%s, num_blocks=%s,.'
),
survival_prob,
stem_hsize,
hidden_size,
block_type,
num_blocks,
)
return MaxViT(
block_type=block_type,
num_blocks=num_blocks,
hidden_size=hidden_size,
stem_hsize=stem_hsize,
head_size=backbone_cfg.head_size,
dropatt=backbone_cfg.dropatt,
dropout=backbone_cfg.dropout,
rel_attn_type=backbone_cfg.rel_attn_type,
window_size=backbone_cfg.window_size,
grid_size=backbone_cfg.grid_size,
scale_ratio=backbone_cfg.scale_ratio,
ln_epsilon=backbone_cfg.ln_epsilon,
ln_dtype=backbone_cfg.ln_dtype,
downsample_loc=backbone_cfg.downsample_loc,
kernel_size=backbone_cfg.kernel_size,
se_ratio=backbone_cfg.se_ratio,
dropcnn=backbone_cfg.dropcnn,
data_format=backbone_cfg.data_format,
norm_type=backbone_cfg.norm_type,
bn_epsilon=norm_activation_config.norm_epsilon,
bn_momentum=norm_activation_config.norm_momentum,
add_pos_enc=backbone_cfg.add_pos_enc,
pool_type=backbone_cfg.pool_type,
pool_stride=backbone_cfg.pool_stride,
expansion_rate=backbone_cfg.expansion_rate,
activation=norm_activation_config.activation,
survival_prob=survival_prob,
survival_prob_anneal=backbone_cfg.survival_prob_anneal,
representation_size=backbone_cfg.representation_size,
add_gap_layer_norm=backbone_cfg.add_gap_layer_norm,
kernel_initializer=backbone_cfg.kernel_initializer,
bias_initializer=backbone_cfg.bias_initializer,
)
@factory.register_backbone_builder('maxvit')
def build_maxvit(
input_specs,
backbone_config,
norm_activation_config,
l2_regularizer=None,
):
"""Builds a MaxViT backbone."""
del l2_regularizer
backbone_cfg = backbone_config.get()
maxvit = override_predefined_spec_and_build_maxvit(
predefined_maxvit_spec=MAXVIT_SPECS[backbone_cfg.model_name],
backbone_cfg=backbone_cfg,
norm_activation_config=norm_activation_config,
)
# Build the backbone to get a proper `output_specs`.
dummy_inputs = tf.keras.Input(input_specs.shape[1:])
_ = maxvit(dummy_inputs, training=False)
return maxvit
| 32,299 | 33.582441 | 89 | py |
models | models-master/official/projects/maxvit/modeling/common_ops.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common operations."""
import functools
import math
from typing import Optional
from absl import logging
import numpy as np
import tensorflow as tf
def activation_fn(features: tf.Tensor, act_fn: str):
"""Customized non-linear activation type."""
if act_fn in ('silu', 'swish'):
return tf.nn.swish(features)
elif act_fn == 'silu_native':
return features * tf.sigmoid(features)
elif act_fn == 'hswish':
return features * tf.nn.relu6(features + 3) / 6
elif act_fn == 'relu':
return tf.nn.relu(features)
elif act_fn == 'relu6':
return tf.nn.relu6(features)
elif act_fn == 'elu':
return tf.nn.elu(features)
elif act_fn == 'leaky_relu':
return tf.nn.leaky_relu(features)
elif act_fn == 'selu':
return tf.nn.selu(features)
elif act_fn == 'mish':
return features * tf.math.tanh(tf.math.softplus(features))
elif act_fn == 'gelu':
return (
0.5
* features
* (
1
+ tf.tanh(
np.sqrt(2 / np.pi) * (features + 0.044715 * tf.pow(features, 3))
)
)
)
else:
raise ValueError('Unsupported act_fn {}'.format(act_fn))
def get_act_fn(act_fn):
if act_fn is None:
act_fn = 'gelu'
if isinstance(act_fn, str):
return functools.partial(activation_fn, act_fn=act_fn)
elif callable(act_fn):
return act_fn
else:
raise ValueError('Unsupported act_fn %s.' % act_fn)
def pooling_2d(inputs, pool_type, stride, **kwargs):
"""Perform 2D pooling."""
if stride > 1:
if pool_type == 'max':
pool_op = tf.keras.layers.MaxPool2D
elif pool_type == 'avg':
pool_op = tf.keras.layers.AveragePooling2D
else:
raise ValueError('Unsurpported pool_type %s' % pool_type)
output = pool_op(
pool_size=(stride, stride), strides=(stride, stride), **kwargs
)(inputs)
else:
output = inputs
return output
def drop_connect(inputs, training, survival_prob):
"""Drop the entire conv with given survival probability."""
# "Deep Networks with Stochastic Depth", https://arxiv.org/pdf/1603.09382.pdf
if not training:
return inputs
# Compute tensor.
batch_size = tf.shape(inputs)[0]
random_tensor = survival_prob
random_tensor += tf.random.uniform([batch_size], dtype=inputs.dtype)
for _ in range(inputs.shape.rank - 1):
random_tensor = tf.expand_dims(random_tensor, axis=-1)
binary_tensor = tf.floor(random_tensor)
# Unlike conventional way that multiply survival_prob at test time, here we
# divide survival_prob at training time, such that no addition compute is
# needed at test time.
output = inputs / survival_prob * binary_tensor
return output
def residual_add(residual, shortcut, survival_prob, training):
"""Combine residual and shortcut."""
if survival_prob is not None and 0 < survival_prob < 1:
residual = drop_connect(residual, training, survival_prob)
return shortcut + residual
def maybe_reshape_to_2d(x, height=None):
"""Reshape tensor to 2d if not already 2d."""
if x.shape.rank == 3:
_, length, num_channel = x.shape.as_list()
if height is None:
height = int(np.sqrt(length))
else:
assert length % height == 0
width = length // height
logging.debug(
'Reshape %s -> %s', [length, num_channel], [height, width, num_channel]
)
return tf.reshape(x, [-1, height, width, num_channel])
elif x.shape.rank == 4:
return x
else:
raise ValueError('Unsupport shape {}'.format(x.shape))
def maybe_reshape_to_1d(x):
"""Reshape tensor to 1d if not already 1d."""
if x.shape.rank == 4:
_, h, w, num_channel = x.shape.as_list()
logging.debug('Reshape %s -> %s', [h, w, num_channel], [h * w, num_channel])
return tf.reshape(x, [-1, h * w, num_channel])
elif x.shape.rank == 3:
return x
else:
raise ValueError('Unsupport shape {}'.format(x.shape))
def generate_lookup_tensor(
length: int,
max_relative_position: Optional[int] = None,
clamp_out_of_range: bool = False,
dtype: tf.DType = tf.float32) -> tf.Tensor:
"""Generate a one_hot lookup tensor to reindex embeddings along one dimension.
Args:
length: the length to reindex to.
max_relative_position: the maximum relative position to consider.
Relative position embeddings for distances above this threshold
are zeroed out.
clamp_out_of_range: bool. Whether to clamp out of range locations to the
maximum relative distance. If False, the out of range locations will be
filled with all-zero vectors.
dtype: dtype for the returned lookup tensor.
Returns:
ret: [length, length, vocab_size] lookup tensor that satisfies
ret[n,m,v] = 1{m - n + max_relative_position = v}.
"""
if max_relative_position is None:
max_relative_position = length - 1
vocab_size = 2 * max_relative_position + 1
ret = np.zeros((length, length, vocab_size))
for i in range(length):
for x in range(length):
v = x - i + max_relative_position
if abs(x - i) > max_relative_position:
if clamp_out_of_range:
v = np.clip(v, 0, vocab_size - 1)
else:
continue
ret[i, x, v] = 1
return tf.constant(ret, dtype)
def reindex_2d_einsum_lookup(
relative_position_tensor: tf.Tensor,
height: int,
width: int,
max_relative_height: Optional[int] = None,
max_relative_width: Optional[int] = None,
h_axis=None) -> tf.Tensor:
"""Reindex 2d relative position bias with 2 independent einsum lookups.
Args:
relative_position_tensor: tensor of shape
[..., vocab_height, vocab_width, ...].
height: height to reindex to.
width: width to reindex to.
max_relative_height: maximum relative height.
Position embeddings corresponding to vertical distances larger
than max_relative_height are zeroed out. None to disable.
max_relative_width: maximum relative width.
Position embeddings corresponding to horizontal distances larger
than max_relative_width are zeroed out. None to disable.
h_axis: Axis corresponding to vocab_height. Default to 0 if None.
Returns:
reindexed_bias: a Tensor of shape
[..., height * width, height * width, ...]
"""
height_lookup = generate_lookup_tensor(
height, max_relative_position=max_relative_height,
dtype=relative_position_tensor.dtype)
width_lookup = generate_lookup_tensor(
width, max_relative_position=max_relative_width,
dtype=relative_position_tensor.dtype)
if h_axis is None:
h_axis = 0
non_spatial_rank = relative_position_tensor.shape.rank - 2
non_spatial_expr = ''.join(chr(ord('n') + i) for i in range(non_spatial_rank))
prefix = non_spatial_expr[:h_axis]
suffix = non_spatial_expr[h_axis:]
reindexed_tensor = tf.einsum(
'{0}hw{1},ixh->{0}ixw{1}'.format(prefix, suffix),
relative_position_tensor, height_lookup, name='height_lookup')
reindexed_tensor = tf.einsum(
'{0}ixw{1},jyw->{0}ijxy{1}'.format(prefix, suffix),
reindexed_tensor, width_lookup, name='width_lookup')
ret_shape = relative_position_tensor.shape.as_list()
ret_shape[h_axis] = height * width
ret_shape[h_axis + 1] = height * width
reindexed_tensor = tf.reshape(reindexed_tensor, ret_shape)
return reindexed_tensor
def float32_softmax(x: tf.Tensor, *args, **kwargs) -> tf.Tensor:
y = tf.cast(tf.nn.softmax(tf.cast(x, tf.float32), *args, **kwargs), x.dtype)
return y
def get_shape_from_length(length: int, height: int = 1, width: int = 1):
"""Gets input 2D shape from 1D sequence length."""
input_height = int(math.sqrt(length * height // width))
input_width = input_height * width // height
if input_height * input_width != length:
raise ValueError(
f'Invalid sequence length: {length} or shape: ({height, width}).'
)
return (input_height, input_width)
| 8,454 | 32.418972 | 80 | py |
models | models-master/official/projects/video_ssl/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/video_ssl/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training driver."""
from absl import app
from absl import flags
import gin
# pylint: disable=unused-import
from official.common import distribute_utils
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
from official.projects.video_ssl.modeling import video_ssl_model
from official.projects.video_ssl.tasks import linear_eval
from official.projects.video_ssl.tasks import pretrain
from official.vision import registry_imports
# pylint: disable=unused-import
FLAGS = flags.FLAGS
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
if 'train_and_eval' in FLAGS.mode:
assert (params.task.train_data.feature_shape ==
params.task.validation_data.feature_shape), (
f'train {params.task.train_data.feature_shape} != validate '
f'{params.task.validation_data.feature_shape}')
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu)
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir)
train_utils.save_gin_config(FLAGS.mode, model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(main)
| 2,967 | 37.051282 | 80 | py |
models | models-master/official/projects/video_ssl/configs/video_ssl_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=unused-import
from absl.testing import parameterized
import tensorflow as tf
from official import vision
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.video_ssl.configs import video_ssl as exp_cfg
class VideoClassificationConfigTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(('video_ssl_pretrain_kinetics400',),
('video_ssl_pretrain_kinetics600',))
def test_video_ssl_pretrain_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, exp_cfg.VideoSSLPretrainTask)
self.assertIsInstance(config.task.model, exp_cfg.VideoSSLModel)
self.assertIsInstance(config.task.losses, exp_cfg.SSLLosses)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.task.train_data.is_training = None
with self.assertRaises(KeyError):
config.validate()
@parameterized.parameters(('video_ssl_linear_eval_kinetics400',),
('video_ssl_linear_eval_kinetics600',))
def test_video_ssl_linear_eval_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, exp_cfg.VideoSSLEvalTask)
self.assertIsInstance(config.task.model, exp_cfg.VideoSSLModel)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.task.train_data.is_training = None
with self.assertRaises(KeyError):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 2,316 | 40.375 | 78 | py |
models | models-master/official/projects/video_ssl/configs/video_ssl.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video classification configuration definition."""
import dataclasses
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.vision.configs import common
from official.vision.configs import video_classification
Losses = video_classification.Losses
VideoClassificationModel = video_classification.VideoClassificationModel
VideoClassificationTask = video_classification.VideoClassificationTask
@dataclasses.dataclass
class DataConfig(video_classification.DataConfig):
"""The base configuration for building datasets."""
is_ssl: bool = False
is_training: bool = True
drop_remainder: bool = True
@dataclasses.dataclass
class VideoSSLModel(VideoClassificationModel):
"""The model config."""
normalize_feature: bool = False
hidden_dim: int = 2048
hidden_layer_num: int = 3
projection_dim: int = 128
hidden_norm_activation: common.NormActivation = dataclasses.field(
default_factory=lambda: common.NormActivation(
use_sync_bn=False, norm_momentum=0.997, norm_epsilon=1.0e-05
)
)
@dataclasses.dataclass
class SSLLosses(Losses):
normalize_hidden: bool = True
temperature: float = 0.1
@dataclasses.dataclass
class VideoSSLPretrainTask(VideoClassificationTask):
model: VideoSSLModel = dataclasses.field(default_factory=VideoSSLModel)
losses: SSLLosses = dataclasses.field(default_factory=SSLLosses)
train_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=True, drop_remainder=True)
)
validation_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig( # pylint: disable=g-long-lambda
is_training=False, drop_remainder=False
)
)
losses: SSLLosses = dataclasses.field(default_factory=SSLLosses)
@dataclasses.dataclass
class VideoSSLEvalTask(VideoClassificationTask):
model: VideoSSLModel = dataclasses.field(default_factory=VideoSSLModel)
train_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=True, drop_remainder=True)
)
validation_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig( # pylint: disable=g-long-lambda
is_training=False, drop_remainder=False
)
)
losses: SSLLosses = dataclasses.field(default_factory=SSLLosses)
@exp_factory.register_config_factory('video_ssl_pretrain_kinetics400')
def video_ssl_pretrain_kinetics400() -> cfg.ExperimentConfig:
"""Pretrain SSL Video classification on Kinectics 400 with resnet."""
exp = video_classification.video_classification_kinetics400()
task = VideoSSLPretrainTask()
task.override(exp.task)
task.train_data.is_ssl = True
task.train_data.feature_shape = (16, 224, 224, 3)
task.train_data.temporal_stride = 2
task.model.model_type = 'video_ssl_model'
exp.task = task
return exp
@exp_factory.register_config_factory('video_ssl_linear_eval_kinetics400')
def video_ssl_linear_eval_kinetics400() -> cfg.ExperimentConfig:
"""Pretrain SSL Video classification on Kinectics 400 with resnet."""
exp = video_classification.video_classification_kinetics400()
task = VideoSSLEvalTask() # Replaces the task type.
task.override(exp.task)
task.train_data.is_ssl = False
task.train_data.feature_shape = (32, 224, 224, 3)
task.train_data.temporal_stride = 2
task.validation_data.is_ssl = False
task.validation_data.feature_shape = (32, 256, 256, 3)
task.validation_data.temporal_stride = 2
task.validation_data.min_image_size = 256
task.validation_data.num_test_clips = 10
task.validation_data.num_test_crops = 3
task.model.model_type = 'video_ssl_model'
task.model.normalize_feature = True
task.model.hidden_layer_num = 0
task.model.projection_dim = 600
exp.task = task
return exp
@exp_factory.register_config_factory('video_ssl_pretrain_kinetics600')
def video_ssl_pretrain_kinetics600() -> cfg.ExperimentConfig:
"""Pretrain SSL Video classification on Kinectics 400 with resnet."""
exp = video_classification.video_classification_kinetics600()
task = VideoSSLPretrainTask()
task.override(exp.task)
task.train_data.is_ssl = True
task.train_data.feature_shape = (16, 224, 224, 3)
task.train_data.temporal_stride = 2
task.model.model_type = 'video_ssl_model'
exp.task = task
return exp
@exp_factory.register_config_factory('video_ssl_linear_eval_kinetics600')
def video_ssl_linear_eval_kinetics600() -> cfg.ExperimentConfig:
"""Pretrain SSL Video classification on Kinectics 400 with resnet."""
exp = video_classification.video_classification_kinetics600()
task = VideoSSLEvalTask() # Replaces the task type.
task.override(exp.task)
task.train_data.is_ssl = False
task.train_data.feature_shape = (32, 224, 224, 3)
task.train_data.temporal_stride = 2
task.validation_data.is_ssl = False
task.validation_data.feature_shape = (32, 256, 256, 3)
task.validation_data.temporal_stride = 2
task.validation_data.min_image_size = 256
task.validation_data.num_test_clips = 10
task.validation_data.num_test_crops = 3
task.model.model_type = 'video_ssl_model'
task.model.normalize_feature = True
task.model.hidden_layer_num = 0
task.model.projection_dim = 600
exp.task = task
return exp
| 5,841 | 35.5125 | 79 | py |
models | models-master/official/projects/video_ssl/configs/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configs package definition."""
from official.projects.video_ssl.configs import video_ssl
| 702 | 38.055556 | 74 | py |
models | models-master/official/projects/video_ssl/dataloaders/video_ssl_input.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parser for video and label datasets."""
from typing import Dict, Optional, Tuple
from absl import logging
import tensorflow as tf
from official.projects.video_ssl.configs import video_ssl as exp_cfg
from official.projects.video_ssl.ops import video_ssl_preprocess_ops
from official.vision.dataloaders import video_input
from official.vision.ops import preprocess_ops_3d
IMAGE_KEY = 'image/encoded'
LABEL_KEY = 'clip/label/index'
Decoder = video_input.Decoder
def _process_image(image: tf.Tensor,
is_training: bool = True,
is_ssl: bool = False,
num_frames: int = 32,
stride: int = 1,
num_test_clips: int = 1,
min_resize: int = 256,
crop_size: int = 224,
num_crops: int = 1,
zero_centering_image: bool = False,
seed: Optional[int] = None) -> tf.Tensor:
"""Processes a serialized image tensor.
Args:
image: Input Tensor of shape [timesteps] and type tf.string of serialized
frames.
is_training: Whether or not in training mode. If True, random sample, crop
and left right flip is used.
is_ssl: Whether or not in self-supervised pre-training mode.
num_frames: Number of frames per subclip.
stride: Temporal stride to sample frames.
num_test_clips: Number of test clips (1 by default). If more than 1, this
will sample multiple linearly spaced clips within each video at test time.
If 1, then a single clip in the middle of the video is sampled. The clips
are aggreagated in the batch dimension.
min_resize: Frames are resized so that min(height, width) is min_resize.
crop_size: Final size of the frame after cropping the resized frames. Both
height and width are the same.
num_crops: Number of crops to perform on the resized frames.
zero_centering_image: If True, frames are normalized to values in [-1, 1].
If False, values in [0, 1].
seed: A deterministic seed to use when sampling.
Returns:
Processed frames. Tensor of shape
[num_frames * num_test_clips, crop_size, crop_size, 3].
"""
# Validate parameters.
if is_training and num_test_clips != 1:
logging.warning(
'`num_test_clips` %d is ignored since `is_training` is `True`.',
num_test_clips)
# Temporal sampler.
if is_training:
# Sampler for training.
if is_ssl:
# Sample two clips from linear decreasing distribution.
image = video_ssl_preprocess_ops.sample_ssl_sequence(
image, num_frames, True, stride)
else:
# Sample random clip.
image = preprocess_ops_3d.sample_sequence(image, num_frames, True, stride)
else:
# Sampler for evaluation.
if num_test_clips > 1:
# Sample linspace clips.
image = preprocess_ops_3d.sample_linspace_sequence(image, num_test_clips,
num_frames, stride)
else:
# Sample middle clip.
image = preprocess_ops_3d.sample_sequence(image, num_frames, False,
stride)
# Decode JPEG string to tf.uint8.
image = preprocess_ops_3d.decode_jpeg(image, 3)
if is_training:
# Standard image data augmentation: random resized crop and random flip.
if is_ssl:
image_1, image_2 = tf.split(image, num_or_size_splits=2, axis=0)
image_1 = preprocess_ops_3d.random_crop_resize(
image_1, crop_size, crop_size, num_frames, 3, (0.5, 2), (0.3, 1))
image_1 = preprocess_ops_3d.random_flip_left_right(image_1, seed)
image_2 = preprocess_ops_3d.random_crop_resize(
image_2, crop_size, crop_size, num_frames, 3, (0.5, 2), (0.3, 1))
image_2 = preprocess_ops_3d.random_flip_left_right(image_2, seed)
else:
image = preprocess_ops_3d.random_crop_resize(
image, crop_size, crop_size, num_frames, 3, (0.5, 2), (0.3, 1))
image = preprocess_ops_3d.random_flip_left_right(image, seed)
else:
# Resize images (resize happens only if necessary to save compute).
image = preprocess_ops_3d.resize_smallest(image, min_resize)
# Three-crop of the frames.
image = preprocess_ops_3d.crop_image(image, crop_size, crop_size, False,
num_crops)
# Cast the frames in float32, normalizing according to zero_centering_image.
if is_training and is_ssl:
image_1 = preprocess_ops_3d.normalize_image(image_1, zero_centering_image)
image_2 = preprocess_ops_3d.normalize_image(image_2, zero_centering_image)
else:
image = preprocess_ops_3d.normalize_image(image, zero_centering_image)
# Self-supervised pre-training augmentations.
if is_training and is_ssl:
if zero_centering_image:
image_1 = 0.5 * (image_1 + 1.0)
image_2 = 0.5 * (image_2 + 1.0)
# Temporally consistent color jittering.
image_1 = video_ssl_preprocess_ops.random_color_jitter_3d(image_1)
image_2 = video_ssl_preprocess_ops.random_color_jitter_3d(image_2)
# Temporally consistent gaussian blurring.
image_1 = video_ssl_preprocess_ops.random_blur(image_1, crop_size,
crop_size, 1.0)
image_2 = video_ssl_preprocess_ops.random_blur(image_2, crop_size,
crop_size, 0.1)
image_2 = video_ssl_preprocess_ops.random_solarization(image_2)
image = tf.concat([image_1, image_2], axis=0)
image = tf.clip_by_value(image, 0., 1.)
if zero_centering_image:
image = 2 * (image - 0.5)
return image
def _postprocess_image(image: tf.Tensor,
is_training: bool = True,
is_ssl: bool = False,
num_frames: int = 32,
num_test_clips: int = 1,
num_test_crops: int = 1) -> tf.Tensor:
"""Processes a batched Tensor of frames.
The same parameters used in process should be used here.
Args:
image: Input Tensor of shape [batch, timesteps, height, width, 3].
is_training: Whether or not in training mode. If True, random sample, crop
and left right flip is used.
is_ssl: Whether or not in self-supervised pre-training mode.
num_frames: Number of frames per subclip.
num_test_clips: Number of test clips (1 by default). If more than 1, this
will sample multiple linearly spaced clips within each video at test time.
If 1, then a single clip in the middle of the video is sampled. The clips
are aggreagated in the batch dimension.
num_test_crops: Number of test crops (1 by default). If more than 1, there
are multiple crops for each clip at test time. If 1, there is a single
central crop. The crops are aggreagated in the batch dimension.
Returns:
Processed frames. Tensor of shape
[batch * num_test_clips * num_test_crops, num_frames, height, width, 3].
"""
if is_ssl and is_training:
# In this case, two clips of self-supervised pre-training are merged
# together in batch dimenstion which will be 2 * batch.
image = tf.concat(tf.split(image, num_or_size_splits=2, axis=1), axis=0)
num_views = num_test_clips * num_test_crops
if num_views > 1 and not is_training:
# In this case, multiple views are merged together in batch dimenstion which
# will be batch * num_views.
image = tf.reshape(image, [-1, num_frames] + image.shape[2:].as_list())
return image
def _process_label(label: tf.Tensor,
one_hot_label: bool = True,
num_classes: Optional[int] = None) -> tf.Tensor:
"""Processes label Tensor."""
# Validate parameters.
if one_hot_label and not num_classes:
raise ValueError(
'`num_classes` should be given when requesting one hot label.')
# Cast to tf.int32.
label = tf.cast(label, dtype=tf.int32)
if one_hot_label:
# Replace label index by one hot representation.
label = tf.one_hot(label, num_classes)
if len(label.shape.as_list()) > 1:
label = tf.reduce_sum(label, axis=0)
if num_classes == 1:
# The trick for single label.
label = 1 - label
return label
class Parser(video_input.Parser):
"""Parses a video and label dataset."""
def __init__(self,
input_params: exp_cfg.DataConfig,
image_key: str = IMAGE_KEY,
label_key: str = LABEL_KEY):
super().__init__(input_params, image_key, label_key)
self._is_ssl = input_params.is_ssl
def _parse_train_data(
self, decoded_tensors: Dict[str, tf.Tensor]
) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]:
"""Parses data for training."""
# Process image and label.
image = decoded_tensors[self._image_key]
image = _process_image(
image=image,
is_training=True,
is_ssl=self._is_ssl,
num_frames=self._num_frames,
stride=self._stride,
num_test_clips=self._num_test_clips,
min_resize=self._min_resize,
crop_size=self._crop_size,
zero_centering_image=self._zero_centering_image)
image = tf.cast(image, dtype=self._dtype)
features = {'image': image}
label = decoded_tensors[self._label_key]
label = _process_label(label, self._one_hot_label, self._num_classes)
return features, label
def _parse_eval_data(
self, decoded_tensors: Dict[str, tf.Tensor]
) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]:
"""Parses data for evaluation."""
image = decoded_tensors[self._image_key]
image = _process_image(
image=image,
is_training=False,
num_frames=self._num_frames,
stride=self._stride,
num_test_clips=self._num_test_clips,
min_resize=self._min_resize,
crop_size=self._crop_size,
num_crops=self._num_crops,
zero_centering_image=self._zero_centering_image)
image = tf.cast(image, dtype=self._dtype)
features = {'image': image}
label = decoded_tensors[self._label_key]
label = _process_label(label, self._one_hot_label, self._num_classes)
if self._output_audio:
audio = decoded_tensors[self._audio_feature]
audio = tf.cast(audio, dtype=self._dtype)
audio = preprocess_ops_3d.sample_sequence(
audio, 20, random=False, stride=1)
audio = tf.ensure_shape(audio, [20, 2048])
features['audio'] = audio
return features, label
def parse_fn(self, is_training):
"""Returns a parse fn that reads and parses raw tensors from the decoder.
Args:
is_training: a `bool` to indicate whether it is in training mode.
Returns:
parse: a `callable` that takes the serialized examle and generate the
images, labels tuple where labels is a dict of Tensors that contains
labels.
"""
def parse(decoded_tensors):
"""Parses the serialized example data."""
if is_training:
return self._parse_train_data(decoded_tensors)
else:
return self._parse_eval_data(decoded_tensors)
return parse
class PostBatchProcessor(object):
"""Processes a video and label dataset which is batched."""
def __init__(self, input_params: exp_cfg.DataConfig):
self._is_training = input_params.is_training
self._is_ssl = input_params.is_ssl
self._num_frames = input_params.feature_shape[0]
self._num_test_clips = input_params.num_test_clips
self._num_test_crops = input_params.num_test_crops
def __call__(self, features: Dict[str, tf.Tensor],
label: tf.Tensor) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]:
"""Parses a single tf.Example into image and label tensors."""
for key in ['image', 'audio']:
if key in features:
features[key] = _postprocess_image(
image=features[key],
is_training=self._is_training,
is_ssl=self._is_ssl,
num_frames=self._num_frames,
num_test_clips=self._num_test_clips,
num_test_crops=self._num_test_crops)
return features, label
| 12,662 | 37.724771 | 80 | py |
models | models-master/official/projects/video_ssl/dataloaders/video_ssl_input_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
# Import libraries
import numpy as np
from PIL import Image
import tensorflow as tf
from official.projects.video_ssl.configs import video_ssl as exp_cfg
from official.projects.video_ssl.dataloaders import video_ssl_input
AUDIO_KEY = 'features/audio'
def fake_seq_example():
# Create fake data.
random_image = np.random.randint(0, 256, size=(263, 320, 3), dtype=np.uint8)
random_image = Image.fromarray(random_image)
label = 42
with io.BytesIO() as buffer:
random_image.save(buffer, format='JPEG')
raw_image_bytes = buffer.getvalue()
seq_example = tf.train.SequenceExample()
seq_example.feature_lists.feature_list.get_or_create(
video_ssl_input.IMAGE_KEY).feature.add().bytes_list.value[:] = [
raw_image_bytes
]
seq_example.feature_lists.feature_list.get_or_create(
video_ssl_input.IMAGE_KEY).feature.add().bytes_list.value[:] = [
raw_image_bytes
]
seq_example.context.feature[video_ssl_input.LABEL_KEY].int64_list.value[:] = [
label
]
random_audio = np.random.normal(size=(10, 256)).tolist()
for s in random_audio:
seq_example.feature_lists.feature_list.get_or_create(
AUDIO_KEY).feature.add().float_list.value[:] = s
return seq_example, label
class VideoAndLabelParserTest(tf.test.TestCase):
def test_video_ssl_input_pretrain(self):
params = exp_cfg.video_ssl_pretrain_kinetics600().task.train_data
decoder = video_ssl_input.Decoder()
parser = video_ssl_input.Parser(params).parse_fn(params.is_training)
seq_example, _ = fake_seq_example()
input_tensor = tf.constant(seq_example.SerializeToString())
decoded_tensors = decoder.decode(input_tensor)
output_tensor = parser(decoded_tensors)
image_features, _ = output_tensor
image = image_features['image']
self.assertAllEqual(image.shape, (32, 224, 224, 3))
def test_video_ssl_input_linear_train(self):
params = exp_cfg.video_ssl_linear_eval_kinetics600().task.train_data
decoder = video_ssl_input.Decoder()
parser = video_ssl_input.Parser(params).parse_fn(params.is_training)
seq_example, label = fake_seq_example()
input_tensor = tf.constant(seq_example.SerializeToString())
decoded_tensors = decoder.decode(input_tensor)
output_tensor = parser(decoded_tensors)
image_features, label = output_tensor
image = image_features['image']
self.assertAllEqual(image.shape, (32, 224, 224, 3))
self.assertAllEqual(label.shape, (600,))
def test_video_ssl_input_linear_eval(self):
params = exp_cfg.video_ssl_linear_eval_kinetics600().task.validation_data
print('!!!', params)
decoder = video_ssl_input.Decoder()
parser = video_ssl_input.Parser(params).parse_fn(params.is_training)
seq_example, label = fake_seq_example()
input_tensor = tf.constant(seq_example.SerializeToString())
decoded_tensors = decoder.decode(input_tensor)
output_tensor = parser(decoded_tensors)
image_features, label = output_tensor
image = image_features['image']
self.assertAllEqual(image.shape, (960, 256, 256, 3))
self.assertAllEqual(label.shape, (600,))
if __name__ == '__main__':
tf.test.main()
| 3,791 | 33.162162 | 80 | py |
models | models-master/official/projects/video_ssl/dataloaders/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/video_ssl/modeling/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/video_ssl/modeling/video_ssl_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build video classification models."""
from typing import Mapping, Optional
# Import libraries
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.video_ssl.configs import video_ssl as video_ssl_cfg
from official.vision.modeling import backbones
from official.vision.modeling import factory_3d as model_factory
layers = tf.keras.layers
class VideoSSLModel(tf.keras.Model):
"""A video ssl model class builder."""
def __init__(self,
backbone,
normalize_feature,
hidden_dim,
hidden_layer_num,
hidden_norm_args,
projection_dim,
input_specs: Optional[Mapping[str,
tf.keras.layers.InputSpec]] = None,
dropout_rate: float = 0.0,
aggregate_endpoints: bool = False,
kernel_initializer='random_uniform',
kernel_regularizer=None,
bias_regularizer=None,
**kwargs):
"""Video Classification initialization function.
Args:
backbone: a 3d backbone network.
normalize_feature: whether normalize backbone feature.
hidden_dim: `int` number of hidden units in MLP.
hidden_layer_num: `int` number of hidden layers in MLP.
hidden_norm_args: `dict` for batchnorm arguments in MLP.
projection_dim: `int` number of output dimension for MLP.
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
dropout_rate: `float` rate for dropout regularization.
aggregate_endpoints: `bool` aggregate all end ponits or only use the
final end point.
kernel_initializer: kernel initializer for the dense layer.
kernel_regularizer: tf.keras.regularizers.Regularizer object. Default to
None.
bias_regularizer: tf.keras.regularizers.Regularizer object. Default to
None.
**kwargs: keyword arguments to be passed.
"""
if not input_specs:
input_specs = {
'image': layers.InputSpec(shape=[None, None, None, None, 3])
}
self._self_setattr_tracking = False
self._config_dict = {
'backbone': backbone,
'normalize_feature': normalize_feature,
'hidden_dim': hidden_dim,
'hidden_layer_num': hidden_layer_num,
'use_sync_bn': hidden_norm_args.use_sync_bn,
'norm_momentum': hidden_norm_args.norm_momentum,
'norm_epsilon': hidden_norm_args.norm_epsilon,
'activation': hidden_norm_args.activation,
'projection_dim': projection_dim,
'input_specs': input_specs,
'dropout_rate': dropout_rate,
'aggregate_endpoints': aggregate_endpoints,
'kernel_initializer': kernel_initializer,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
}
self._input_specs = input_specs
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._backbone = backbone
inputs = {
k: tf.keras.Input(shape=v.shape[1:]) for k, v in input_specs.items()
}
endpoints = backbone(inputs['image'])
if aggregate_endpoints:
pooled_feats = []
for endpoint in endpoints.values():
x_pool = tf.keras.layers.GlobalAveragePooling3D()(endpoint)
pooled_feats.append(x_pool)
x = tf.concat(pooled_feats, axis=1)
else:
x = endpoints[max(endpoints.keys())]
x = tf.keras.layers.GlobalAveragePooling3D()(x)
# L2 Normalize feature after backbone
if normalize_feature:
x = tf.nn.l2_normalize(x, axis=-1)
# MLP hidden layers
for _ in range(hidden_layer_num):
x = tf.keras.layers.Dense(hidden_dim)(x)
if self._config_dict['use_sync_bn']:
x = tf.keras.layers.experimental.SyncBatchNormalization(
momentum=self._config_dict['norm_momentum'],
epsilon=self._config_dict['norm_epsilon'])(x)
else:
x = tf.keras.layers.BatchNormalization(
momentum=self._config_dict['norm_momentum'],
epsilon=self._config_dict['norm_epsilon'])(x)
x = tf_utils.get_activation(self._config_dict['activation'])(x)
# Projection head
x = tf.keras.layers.Dense(projection_dim)(x)
super().__init__(inputs=inputs, outputs=x, **kwargs)
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
return dict(backbone=self.backbone)
@property
def backbone(self):
return self._backbone
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@model_factory.register_model_builder('video_ssl_model')
def build_video_ssl_pretrain_model(
input_specs: tf.keras.layers.InputSpec,
model_config: video_ssl_cfg.VideoSSLModel,
num_classes: int,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None):
"""Builds the video classification model."""
del num_classes
input_specs_dict = {'image': input_specs}
backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=model_config.backbone,
norm_activation_config=model_config.norm_activation,
l2_regularizer=l2_regularizer)
# Norm layer type in the MLP head should same with backbone
assert model_config.norm_activation.use_sync_bn == model_config.hidden_norm_activation.use_sync_bn
model = VideoSSLModel(
backbone=backbone,
normalize_feature=model_config.normalize_feature,
hidden_dim=model_config.hidden_dim,
hidden_layer_num=model_config.hidden_layer_num,
hidden_norm_args=model_config.hidden_norm_activation,
projection_dim=model_config.projection_dim,
input_specs=input_specs_dict,
dropout_rate=model_config.dropout_rate,
aggregate_endpoints=model_config.aggregate_endpoints,
kernel_regularizer=l2_regularizer)
return model
| 6,601 | 35.882682 | 100 | py |
models | models-master/official/projects/video_ssl/tasks/pretrain.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video ssl pretrain task definition."""
from absl import logging
import tensorflow as tf
# pylint: disable=unused-import
from official.core import input_reader
from official.core import task_factory
from official.projects.video_ssl.configs import video_ssl as exp_cfg
from official.projects.video_ssl.dataloaders import video_ssl_input
from official.projects.video_ssl.losses import losses
from official.projects.video_ssl.modeling import video_ssl_model
from official.vision.modeling import factory_3d
from official.vision.tasks import video_classification
# pylint: enable=unused-import
@task_factory.register_task_cls(exp_cfg.VideoSSLPretrainTask)
class VideoSSLPretrainTask(video_classification.VideoClassificationTask):
"""A task for video ssl pretraining."""
def build_model(self):
"""Builds video ssl pretraining model."""
common_input_shape = [
d1 if d1 == d2 else None
for d1, d2 in zip(self.task_config.train_data.feature_shape,
self.task_config.validation_data.feature_shape)
]
input_specs = tf.keras.layers.InputSpec(shape=[None] + common_input_shape)
logging.info('Build model input %r', common_input_shape)
model = factory_3d.build_model(
self.task_config.model.model_type,
input_specs=input_specs,
model_config=self.task_config.model,
num_classes=self.task_config.train_data.num_classes)
return model
def _get_decoder_fn(self, params):
decoder = video_ssl_input.Decoder()
return decoder.decode
def build_inputs(self, params: exp_cfg.DataConfig, input_context=None):
"""Builds classification input."""
parser = video_ssl_input.Parser(input_params=params)
postprocess_fn = video_ssl_input.PostBatchProcessor(params)
reader = input_reader.InputReader(
params,
dataset_fn=self._get_dataset_fn(params),
decoder_fn=self._get_decoder_fn(params),
parser_fn=parser.parse_fn(params.is_training),
postprocess_fn=postprocess_fn)
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self, model_outputs, num_replicas, model):
"""Sparse categorical cross entropy loss.
Args:
model_outputs: Output logits of the model.
num_replicas: distributed replica number.
model: keras model for calculating weight decay.
Returns:
The total loss tensor.
"""
all_losses = {}
contrastive_metrics = {}
losses_config = self.task_config.losses
total_loss = None
contrastive_loss_dict = losses.contrastive_loss(
model_outputs, num_replicas, losses_config.normalize_hidden,
losses_config.temperature, model,
self.task_config.losses.l2_weight_decay)
total_loss = contrastive_loss_dict['total_loss']
all_losses.update({
'total_loss': total_loss
})
all_losses[self.loss] = total_loss
contrastive_metrics.update({
'contrast_acc': contrastive_loss_dict['contrast_acc'],
'contrast_entropy': contrastive_loss_dict['contrast_entropy'],
'reg_loss': contrastive_loss_dict['reg_loss']
})
return all_losses, contrastive_metrics
def build_metrics(self, training=True):
"""Gets streaming metrics for training/validation."""
metrics = [
tf.keras.metrics.Mean(name='contrast_acc'),
tf.keras.metrics.Mean(name='contrast_entropy'),
tf.keras.metrics.Mean(name='reg_loss')
]
return metrics
def process_metrics(self, metrics, contrastive_metrics):
"""Process and update metrics."""
contrastive_metric_values = contrastive_metrics.values()
for metric, contrastive_metric_value in zip(metrics,
contrastive_metric_values):
metric.update_state(contrastive_metric_value)
def train_step(self, inputs, model, optimizer, metrics=None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, _ = inputs
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
if self.task_config.train_data.output_audio:
outputs = model(features, training=True)
else:
outputs = model(features['image'], training=True)
# Casting output layer as float32 is necessary when mixed_precision is
# mixed_float16 or mixed_bfloat16 to ensure output is casted as float32.
outputs = tf.nest.map_structure(
lambda x: tf.cast(x, tf.float32), outputs)
all_losses, contrastive_metrics = self.build_losses(
model_outputs=outputs, num_replicas=num_replicas,
model=model)
loss = all_losses[self.loss]
scaled_loss = loss
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(
optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient before apply_gradients when LossScaleOptimizer is
# used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = all_losses
if metrics:
self.process_metrics(metrics, contrastive_metrics)
logs.update({m.name: m.result() for m in metrics})
return logs
def validation_step(self, inputs, model, metrics=None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
raise NotImplementedError
def inference_step(self, features, model):
"""Performs the forward step."""
raise NotImplementedError
| 6,748 | 35.090909 | 78 | py |
models | models-master/official/projects/video_ssl/tasks/linear_eval.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video ssl linear evaluation task definition."""
from typing import Any, Optional, List, Tuple
from absl import logging
import tensorflow as tf
# pylint: disable=unused-import
from official.core import task_factory
from official.projects.video_ssl.configs import video_ssl as exp_cfg
from official.projects.video_ssl.modeling import video_ssl_model
from official.vision.tasks import video_classification
@task_factory.register_task_cls(exp_cfg.VideoSSLEvalTask)
class VideoSSLEvalTask(video_classification.VideoClassificationTask):
"""A task for video ssl linear evaluation."""
def initialize(self, model: tf.keras.Model):
"""Loading pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if self.task_config.init_checkpoint_modules == 'backbone':
ckpt = tf.train.Checkpoint(backbone=model.backbone)
ckpt.read(ckpt_dir_or_file)
else:
raise NotImplementedError
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def train_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[List[Any]] = None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
model.backbone.trainable = False
logging.info('Setting the backbone to non-trainable.')
return super().train_step(inputs, model, optimizer, metrics)
| 2,521 | 35.028571 | 74 | py |
models | models-master/official/projects/video_ssl/tasks/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tasks package definition."""
from official.projects.video_ssl.tasks import linear_eval
from official.projects.video_ssl.tasks import pretrain
| 755 | 38.789474 | 74 | py |
models | models-master/official/projects/video_ssl/tasks/pretrain_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
import random
import orbit
import tensorflow as tf
# pylint: disable=unused-import
from official import vision
from official.core import exp_factory
from official.core import task_factory
from official.modeling import optimization
from official.projects.video_ssl.tasks import pretrain
from official.vision.dataloaders import tfexample_utils
# pylint: enable=unused-import
class VideoClassificationTaskTest(tf.test.TestCase):
def setUp(self):
super().setUp()
data_dir = os.path.join(self.get_temp_dir(), 'data')
tf.io.gfile.makedirs(data_dir)
self._data_path = os.path.join(data_dir, 'data.tfrecord')
# pylint: disable=g-complex-comprehension
examples = [
tfexample_utils.make_video_test_example(
image_shape=(36, 36, 3),
audio_shape=(20, 128),
label=random.randint(0, 100)) for _ in range(2)
]
# pylint: enable=g-complex-comprehension
tfexample_utils.dump_to_tfrecord(self._data_path, tf_examples=examples)
def test_task(self):
config = exp_factory.get_exp_config('video_ssl_pretrain_kinetics600')
config.task.train_data.global_batch_size = 2
config.task.train_data.input_path = self._data_path
task = pretrain.VideoSSLPretrainTask(
config.task)
model = task.build_model()
metrics = task.build_metrics()
strategy = tf.distribute.get_strategy()
dataset = orbit.utils.make_distributed_dataset(
strategy,
functools.partial(task.build_inputs),
config.task.train_data)
iterator = iter(dataset)
opt_factory = optimization.OptimizerFactory(config.trainer.optimizer_config)
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
logs = task.train_step(next(iterator), model, optimizer, metrics=metrics)
self.assertIn('total_loss', logs)
self.assertIn('reg_loss', logs)
self.assertIn('contrast_acc', logs)
self.assertIn('contrast_entropy', logs)
def test_task_factory(self):
config = exp_factory.get_exp_config('video_ssl_pretrain_kinetics600')
task = task_factory.get_task(config.task)
self.assertIs(type(task), pretrain.VideoSSLPretrainTask)
if __name__ == '__main__':
tf.test.main()
| 2,843 | 33.26506 | 80 | py |
models | models-master/official/projects/video_ssl/ops/video_ssl_preprocess_ops_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from official.projects.video_ssl.ops import video_ssl_preprocess_ops
from official.vision.ops import preprocess_ops_3d
class VideoSslPreprocessOpsTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self._raw_frames = tf.random.uniform((250, 256, 256, 3), minval=0,
maxval=255, dtype=tf.dtypes.int32)
self._sampled_frames = self._raw_frames[:16]
self._frames = preprocess_ops_3d.normalize_image(
self._sampled_frames, False, tf.float32)
def test_sample_ssl_sequence(self):
sampled_seq = video_ssl_preprocess_ops.sample_ssl_sequence(
self._raw_frames, 16, True, 2)
self.assertAllEqual(sampled_seq.shape, (32, 256, 256, 3))
def test_random_color_jitter_3d(self):
jittered_clip = video_ssl_preprocess_ops.random_color_jitter_3d(
self._frames)
self.assertAllEqual(jittered_clip.shape, (16, 256, 256, 3))
def test_random_blur_3d(self):
blurred_clip = video_ssl_preprocess_ops.random_blur_3d(
self._frames, 256, 256)
self.assertAllEqual(blurred_clip.shape, (16, 256, 256, 3))
if __name__ == '__main__':
tf.test.main()
| 1,779 | 36.87234 | 75 | py |
models | models-master/official/projects/video_ssl/ops/video_ssl_preprocess_ops.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for customed ops for video ssl."""
import functools
from typing import Optional
import tensorflow as tf
def random_apply(func, p, x):
"""Randomly apply function func to x with probability p."""
return tf.cond(
tf.less(tf.random.uniform([], minval=0, maxval=1, dtype=tf.float32),
tf.cast(p, tf.float32)),
lambda: func(x),
lambda: x)
def random_brightness(image, max_delta):
"""Distort brightness of image (SimCLRv2 style)."""
factor = tf.random.uniform(
[], tf.maximum(1.0 - max_delta, 0), 1.0 + max_delta)
image = image * factor
return image
def random_solarization(image, p=0.2):
"""Random solarize image."""
def _transform(image):
image = image * tf.cast(tf.less(image, 0.5), dtype=image.dtype) + (
1.0 - image) * tf.cast(tf.greater_equal(image, 0.5), dtype=image.dtype)
return image
return random_apply(_transform, p=p, x=image)
def to_grayscale(image, keep_channels=True):
"""Turn the input image to gray scale.
Args:
image: The input image tensor.
keep_channels: Whether maintaining the channel number for the image.
If true, the transformed image will repeat three times in channel.
If false, the transformed image will only have one channel.
Returns:
The distorted image tensor.
"""
image = tf.image.rgb_to_grayscale(image)
if keep_channels:
image = tf.tile(image, [1, 1, 3])
return image
def color_jitter(image, strength, random_order=True):
"""Distorts the color of the image (SimCLRv2 style).
Args:
image: The input image tensor.
strength: The floating number for the strength of the color augmentation.
random_order: A bool, specifying whether to randomize the jittering order.
Returns:
The distorted image tensor.
"""
brightness = 0.8 * strength
contrast = 0.8 * strength
saturation = 0.8 * strength
hue = 0.2 * strength
if random_order:
return color_jitter_rand(
image, brightness, contrast, saturation, hue)
else:
return color_jitter_nonrand(
image, brightness, contrast, saturation, hue)
def color_jitter_nonrand(image,
brightness=0,
contrast=0,
saturation=0,
hue=0):
"""Distorts the color of the image (jittering order is fixed, SimCLRv2 style).
Args:
image: The input image tensor.
brightness: A float, specifying the brightness for color jitter.
contrast: A float, specifying the contrast for color jitter.
saturation: A float, specifying the saturation for color jitter.
hue: A float, specifying the hue for color jitter.
Returns:
The distorted image tensor.
"""
with tf.name_scope('distort_color'):
def apply_transform(i, x, brightness, contrast, saturation, hue):
"""Apply the i-th transformation."""
if brightness != 0 and i == 0:
x = random_brightness(x, max_delta=brightness)
elif contrast != 0 and i == 1:
x = tf.image.random_contrast(
x, lower=1-contrast, upper=1+contrast)
elif saturation != 0 and i == 2:
x = tf.image.random_saturation(
x, lower=1-saturation, upper=1+saturation)
elif hue != 0:
x = tf.image.random_hue(x, max_delta=hue)
return x
for i in range(4):
image = apply_transform(i, image, brightness, contrast, saturation, hue)
image = tf.clip_by_value(image, 0., 1.)
return image
def color_jitter_rand(image,
brightness=0,
contrast=0,
saturation=0,
hue=0):
"""Distorts the color of the image (jittering order is random, SimCLRv2 style).
Args:
image: The input image tensor.
brightness: A float, specifying the brightness for color jitter.
contrast: A float, specifying the contrast for color jitter.
saturation: A float, specifying the saturation for color jitter.
hue: A float, specifying the hue for color jitter.
Returns:
The distorted image tensor.
"""
with tf.name_scope('distort_color'):
def apply_transform(i, x):
"""Apply the i-th transformation."""
def brightness_transform():
if brightness == 0:
return x
else:
return random_brightness(x, max_delta=brightness)
def contrast_transform():
if contrast == 0:
return x
else:
return tf.image.random_contrast(x, lower=1-contrast, upper=1+contrast)
def saturation_transform():
if saturation == 0:
return x
else:
return tf.image.random_saturation(
x, lower=1-saturation, upper=1+saturation)
def hue_transform():
if hue == 0:
return x
else:
return tf.image.random_hue(x, max_delta=hue)
# pylint:disable=g-long-lambda
x = tf.cond(
tf.less(i, 2), lambda: tf.cond(
tf.less(i, 1), brightness_transform, contrast_transform),
lambda: tf.cond(tf.less(i, 3), saturation_transform, hue_transform))
# pylint:disable=g-long-lambda
return x
perm = tf.random.shuffle(tf.range(4))
for i in range(4):
image = apply_transform(perm[i], image)
image = tf.clip_by_value(image, 0., 1.)
return image
def random_color_jitter_3d(frames):
"""Applies temporally consistent color jittering to one video clip.
Args:
frames: `Tensor` of shape [num_frames, height, width, channels].
Returns:
A Tensor of shape [num_frames, height, width, channels] being color jittered
with the same operation.
"""
def random_color_jitter(image, p=1.0):
def _transform(image):
color_jitter_t = functools.partial(
color_jitter, strength=1.0)
image = random_apply(color_jitter_t, p=0.8, x=image)
return random_apply(to_grayscale, p=0.2, x=image)
return random_apply(_transform, p=p, x=image)
num_frames, width, height, channels = frames.shape.as_list()
big_image = tf.reshape(frames, [num_frames*width, height, channels])
big_image = random_color_jitter(big_image)
return tf.reshape(big_image, [num_frames, width, height, channels])
def gaussian_blur(image, kernel_size, sigma, padding='SAME'):
"""Blurs the given image with separable convolution.
Args:
image: Tensor of shape [height, width, channels] and dtype float to blur.
kernel_size: Integer Tensor for the size of the blur kernel. This is should
be an odd number. If it is an even number, the actual kernel size will be
size + 1.
sigma: Sigma value for gaussian operator.
padding: Padding to use for the convolution. Typically 'SAME' or 'VALID'.
Returns:
A Tensor representing the blurred image.
"""
radius = tf.cast(kernel_size / 2, dtype=tf.int32)
kernel_size = radius * 2 + 1
x = tf.cast(tf.range(-radius, radius + 1), dtype=tf.float32)
blur_filter = tf.exp(
-tf.pow(x, 2.0) / (2.0 * tf.pow(tf.cast(sigma, dtype=tf.float32), 2.0)))
blur_filter /= tf.reduce_sum(blur_filter)
# One vertical and one horizontal filter.
blur_v = tf.reshape(blur_filter, [kernel_size, 1, 1, 1])
blur_h = tf.reshape(blur_filter, [1, kernel_size, 1, 1])
num_channels = tf.shape(image)[-1]
blur_h = tf.tile(blur_h, [1, 1, num_channels, 1])
blur_v = tf.tile(blur_v, [1, 1, num_channels, 1])
expand_batch_dim = image.shape.ndims == 3
if expand_batch_dim:
# Tensorflow requires batched input to convolutions, which we can fake with
# an extra dimension.
image = tf.expand_dims(image, axis=0)
blurred = tf.nn.depthwise_conv2d(
image, blur_h, strides=[1, 1, 1, 1], padding=padding)
blurred = tf.nn.depthwise_conv2d(
blurred, blur_v, strides=[1, 1, 1, 1], padding=padding)
if expand_batch_dim:
blurred = tf.squeeze(blurred, axis=0)
return blurred
def random_blur(image, height, width, p=1.0):
"""Randomly blur an image.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
p: probability of applying this transformation.
Returns:
A preprocessed image `Tensor`.
"""
del width
def _transform(image):
sigma = tf.random.uniform([], 0.1, 2.0, dtype=tf.float32)
return gaussian_blur(
image, kernel_size=height//10, sigma=sigma, padding='SAME')
return random_apply(_transform, p=p, x=image)
def random_blur_3d(frames, height, width, blur_probability=0.5):
"""Apply efficient batch data transformations.
Args:
frames: `Tensor` of shape [timesteps, height, width, 3].
height: the height of image.
width: the width of image.
blur_probability: the probaility to apply the blur operator.
Returns:
Preprocessed feature list.
"""
def generate_selector(p, bsz):
shape = [bsz, 1, 1, 1]
selector = tf.cast(
tf.less(tf.random.uniform(shape, 0, 1, dtype=tf.float32), p),
tf.float32)
return selector
frames_new = random_blur(frames, height, width, p=1.)
selector = generate_selector(blur_probability, 1)
frames = frames_new * selector + frames * (1 - selector)
frames = tf.clip_by_value(frames, 0., 1.)
return frames
def _sample_or_pad_sequence_indices(sequence: tf.Tensor,
num_steps: int,
stride: int,
offset: tf.Tensor) -> tf.Tensor:
"""Returns indices to take for sampling or padding sequences to fixed size."""
sequence_length = tf.shape(sequence)[0]
sel_idx = tf.range(sequence_length)
# Repeats sequence until num_steps are available in total.
max_length = num_steps * stride + offset
num_repeats = tf.math.floordiv(
max_length + sequence_length - 1, sequence_length)
sel_idx = tf.tile(sel_idx, [num_repeats])
steps = tf.range(offset, offset + num_steps * stride, stride)
return tf.gather(sel_idx, steps)
def sample_ssl_sequence(sequence: tf.Tensor,
num_steps: int,
random: bool,
stride: int = 1,
num_windows: Optional[int] = 2) -> tf.Tensor:
"""Samples two segments of size num_steps randomly from a given sequence.
Currently it only supports images, and specically designed for video self-
supervised learning.
Args:
sequence: Any tensor where the first dimension is timesteps.
num_steps: Number of steps (e.g. frames) to take.
random: A boolean indicating whether to random sample the single window. If
True, the offset is randomized. Only True is supported.
stride: Distance to sample between timesteps.
num_windows: Number of sequence sampled.
Returns:
A single Tensor with first dimension num_steps with the sampled segment.
"""
sequence_length = tf.shape(sequence)[0]
sequence_length = tf.cast(sequence_length, tf.float32)
if random:
max_offset = tf.cond(
tf.greater(sequence_length, (num_steps - 1) * stride),
lambda: sequence_length - (num_steps - 1) * stride,
lambda: sequence_length)
max_offset = tf.cast(max_offset, dtype=tf.float32)
def cdf(k, power=1.0):
"""Cumulative distribution function for x^power."""
p = -tf.math.pow(k, power + 1) / (
power * tf.math.pow(max_offset, power + 1)) + k * (power + 1) / (
power * max_offset)
return p
u = tf.random.uniform(())
k_low = tf.constant(0, dtype=tf.float32)
k_up = max_offset
k = tf.math.floordiv(max_offset, 2.0)
c = lambda k_low, k_up, k: tf.greater(tf.math.abs(k_up - k_low), 1.0)
# pylint:disable=g-long-lambda
b = lambda k_low, k_up, k: tf.cond(
tf.greater(cdf(k), u),
lambda: [k_low, k, tf.math.floordiv(k + k_low, 2.0)],
lambda: [k, k_up, tf.math.floordiv(k_up + k, 2.0)])
_, _, k = tf.while_loop(c, b, [k_low, k_up, k])
delta = tf.cast(k, tf.int32)
max_offset = tf.cast(max_offset, tf.int32)
sequence_length = tf.cast(sequence_length, tf.int32)
choice_1 = tf.cond(
tf.equal(max_offset, sequence_length),
lambda: tf.random.uniform((),
maxval=tf.cast(max_offset, dtype=tf.int32),
dtype=tf.int32),
lambda: tf.random.uniform((),
maxval=tf.cast(max_offset - delta,
dtype=tf.int32),
dtype=tf.int32))
choice_2 = tf.cond(
tf.equal(max_offset, sequence_length),
lambda: tf.random.uniform((),
maxval=tf.cast(max_offset, dtype=tf.int32),
dtype=tf.int32),
lambda: choice_1 + delta)
# pylint:disable=g-long-lambda
shuffle_choice = tf.random.shuffle((choice_1, choice_2))
offset_1 = shuffle_choice[0]
offset_2 = shuffle_choice[1]
else:
raise NotImplementedError
indices_1 = _sample_or_pad_sequence_indices(
sequence=sequence,
num_steps=num_steps,
stride=stride,
offset=offset_1)
indices_2 = _sample_or_pad_sequence_indices(
sequence=sequence,
num_steps=num_steps,
stride=stride,
offset=offset_2)
indices = tf.concat([indices_1, indices_2], axis=0)
indices.set_shape((num_windows * num_steps,))
output = tf.gather(sequence, indices)
return output
| 14,031 | 33.561576 | 81 | py |
models | models-master/official/projects/video_ssl/ops/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/video_ssl/losses/losses.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define losses."""
# Import libraries
import tensorflow as tf
from tensorflow.compiler.tf2xla.python import xla
def contrastive_loss(hidden,
num_replicas,
normalize_hidden,
temperature,
model,
weight_decay):
"""Computes contrastive loss.
Args:
hidden: embedding of video clips after projection head.
num_replicas: number of distributed replicas.
normalize_hidden: whether or not to l2 normalize the hidden vector.
temperature: temperature in the InfoNCE contrastive loss.
model: keras model for calculating weight decay.
weight_decay: weight decay parameter.
Returns:
A loss scalar.
The logits for contrastive prediction task.
The labels for contrastive prediction task.
"""
large_num = 1e9
hidden1, hidden2 = tf.split(hidden, num_or_size_splits=2, axis=0)
if normalize_hidden:
hidden1 = tf.math.l2_normalize(hidden1, -1)
hidden2 = tf.math.l2_normalize(hidden2, -1)
batch_size = tf.shape(hidden1)[0]
if num_replicas == 1:
# This is the local version
hidden1_large = hidden1
hidden2_large = hidden2
labels = tf.one_hot(tf.range(batch_size), batch_size * 2)
masks = tf.one_hot(tf.range(batch_size), batch_size)
else:
# This is the cross-tpu version.
hidden1_large = tpu_cross_replica_concat(hidden1, num_replicas)
hidden2_large = tpu_cross_replica_concat(hidden2, num_replicas)
enlarged_batch_size = tf.shape(hidden1_large)[0]
replica_id = tf.cast(tf.cast(xla.replica_id(), tf.uint32), tf.int32)
labels_idx = tf.range(batch_size) + replica_id * batch_size
labels = tf.one_hot(labels_idx, enlarged_batch_size * 2)
masks = tf.one_hot(labels_idx, enlarged_batch_size)
logits_aa = tf.matmul(hidden1, hidden1_large, transpose_b=True) / temperature
logits_aa = logits_aa - tf.cast(masks, logits_aa.dtype) * large_num
logits_bb = tf.matmul(hidden2, hidden2_large, transpose_b=True) / temperature
logits_bb = logits_bb - tf.cast(masks, logits_bb.dtype) * large_num
logits_ab = tf.matmul(hidden1, hidden2_large, transpose_b=True) / temperature
logits_ba = tf.matmul(hidden2, hidden1_large, transpose_b=True) / temperature
loss_a = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels, tf.concat([logits_ab, logits_aa], 1)))
loss_b = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels, tf.concat([logits_ba, logits_bb], 1)))
loss = loss_a + loss_b
l2_loss = weight_decay * tf.add_n([
tf.nn.l2_loss(v)
for v in model.trainable_variables
if 'kernel' in v.name
])
total_loss = loss + tf.cast(l2_loss, loss.dtype)
contrast_prob = tf.nn.softmax(logits_ab)
contrast_entropy = - tf.reduce_mean(
tf.reduce_sum(contrast_prob * tf.math.log(contrast_prob + 1e-8), -1))
contrast_acc = tf.equal(tf.argmax(labels, 1), tf.argmax(logits_ab, axis=1))
contrast_acc = tf.reduce_mean(tf.cast(contrast_acc, tf.float32))
return {
'total_loss': total_loss,
'contrastive_loss': loss,
'reg_loss': l2_loss,
'contrast_acc': contrast_acc,
'contrast_entropy': contrast_entropy,
}
def tpu_cross_replica_concat(tensor, num_replicas):
"""Reduce a concatenation of the `tensor` across TPU cores.
Args:
tensor: tensor to concatenate.
num_replicas: number of TPU device replicas.
Returns:
Tensor of the same rank as `tensor` with first dimension `num_replicas`
times larger.
"""
with tf.name_scope('tpu_cross_replica_concat'):
# This creates a tensor that is like the input tensor but has an added
# replica dimension as the outermost dimension. On each replica it will
# contain the local values and zeros for all other values that need to be
# fetched from other replicas.
ext_tensor = tf.scatter_nd(
indices=[[xla.replica_id()]],
updates=[tensor],
shape=[num_replicas] + tensor.shape.as_list())
# As every value is only present on one replica and 0 in all others, adding
# them all together will result in the full tensor on all replicas.
replica_context = tf.distribute.get_replica_context()
ext_tensor = replica_context.all_reduce(tf.distribute.ReduceOp.SUM,
ext_tensor)
# Flatten the replica dimension.
# The first dimension size will be: tensor.shape[0] * num_replicas
# Using [-1] trick to support also scalar input.
return tf.reshape(ext_tensor, [-1] + ext_tensor.shape.as_list()[2:])
| 5,173 | 37.044118 | 79 | py |
models | models-master/official/projects/video_ssl/losses/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/panoptic/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/panoptic/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Panoptic MaskRCNN trainer."""
from absl import app
from official.common import flags as tfm_flags
# pylint: disable=unused-import
from official.projects.panoptic.configs import panoptic_deeplab
from official.projects.panoptic.configs import panoptic_maskrcnn
from official.projects.panoptic.tasks import panoptic_deeplab as panoptic_deeplab_task
from official.projects.panoptic.tasks import panoptic_maskrcnn as panoptic_maskrcnn_task
from official.projects.uvit import configs
from official.projects.uvit import tasks
from official.vision import train
# pylint: enable=unused-import
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(train.main)
| 1,275 | 37.666667 | 88 | py |
models | models-master/official/projects/panoptic/serving/panoptic_deeplab.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Panoptic Segmentation input and model functions for serving/inference."""
from typing import List
import tensorflow as tf
from official.core import config_definitions as cfg
from official.projects.panoptic.modeling import factory
from official.projects.panoptic.modeling import panoptic_deeplab_model
from official.vision.serving import semantic_segmentation
class PanopticSegmentationModule(
semantic_segmentation.SegmentationModule):
"""Panoptic Deeplab Segmentation Module."""
def __init__(self,
params: cfg.ExperimentConfig,
*,
model: tf.keras.Model,
batch_size: int,
input_image_size: List[int],
num_channels: int = 3):
"""Initializes panoptic segmentation module for export."""
if batch_size is None:
raise ValueError('batch_size cannot be None for panoptic segmentation '
'model.')
if not isinstance(model, panoptic_deeplab_model.PanopticDeeplabModel):
raise ValueError('PanopticSegmentationModule module not '
'implemented for {} model.'.format(type(model)))
params.task.train_data.preserve_aspect_ratio = True
super(PanopticSegmentationModule, self).__init__(
params=params,
model=model,
batch_size=batch_size,
input_image_size=input_image_size,
num_channels=num_channels)
def _build_model(self):
input_specs = tf.keras.layers.InputSpec(shape=[self._batch_size] +
self._input_image_size + [3])
return factory.build_panoptic_deeplab(
input_specs=input_specs,
model_config=self.params.task.model,
l2_regularizer=None)
def serve(self, images: tf.Tensor):
"""Cast image to float and run inference.
Args:
images: uint8 Tensor of shape [batch_size, None, None, 3]
Returns:
Tensor holding detection output logits.
"""
if self._input_type != 'tflite':
with tf.device('cpu:0'):
images = tf.cast(images, dtype=tf.float32)
images_spec = tf.TensorSpec(
shape=self._input_image_size + [3], dtype=tf.float32)
image_info_spec = tf.TensorSpec(shape=[4, 2], dtype=tf.float32)
images, image_info = tf.nest.map_structure(
tf.identity,
tf.map_fn(
self._build_inputs,
elems=images,
fn_output_signature=(images_spec, image_info_spec),
parallel_iterations=32))
outputs = self.model.call(
inputs=images, image_info=image_info, training=False)
masks = outputs['segmentation_outputs']
masks = tf.image.resize(masks, self._input_image_size, method='bilinear')
classes = tf.math.argmax(masks, axis=-1)
scores = tf.nn.softmax(masks, axis=-1)
final_outputs = {
'semantic_logits': masks,
'semantic_scores': scores,
'semantic_classes': classes,
'image_info': image_info,
'panoptic_category_mask': outputs['category_mask'],
'panoptic_instance_mask': outputs['instance_mask'],
}
return final_outputs
| 3,757 | 35.134615 | 77 | py |
models | models-master/official/projects/panoptic/serving/export_saved_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Panoptic MaskRCNN model export binary for serving/inference.
To export a trained checkpoint in saved_model format (shell script):
CHECKPOINT_PATH = XX
EXPORT_DIR_PATH = XX
CONFIG_FILE_PATH = XX
export_saved_model --export_dir=${EXPORT_DIR_PATH}/ \
--checkpoint_path=${CHECKPOINT_PATH} \
--config_file=${CONFIG_FILE_PATH} \
--batch_size=2 \
--input_image_size=224,224
To serve (python):
export_dir_path = XX
input_type = XX
input_images = XX
imported = tf.saved_model.load(export_dir_path)
model_fn = imported.signatures['serving_default']
output = model_fn(input_images)
"""
from absl import app
from absl import flags
import tensorflow as tf
from official.core import exp_factory
from official.modeling import hyperparams
# pylint: disable=unused-import
from official.projects.panoptic.configs import panoptic_deeplab as panoptic_deeplab_cfg
from official.projects.panoptic.configs import panoptic_maskrcnn as panoptic_maskrcnn_cfg
# pylint: enable=unused-import
from official.projects.panoptic.modeling import factory
from official.projects.panoptic.serving import panoptic_deeplab
from official.projects.panoptic.serving import panoptic_maskrcnn
# pylint: disable=unused-import
from official.projects.panoptic.tasks import panoptic_deeplab as panoptic_deeplab_task
from official.projects.panoptic.tasks import panoptic_maskrcnn as panoptic_maskrcnn_task
# pylint: enable=unused-import
from official.vision.serving import export_saved_model_lib
FLAGS = flags.FLAGS
flags.DEFINE_string('model', 'panoptic_maskrcnn',
'model type, one of panoptic_maskrcnn and panoptic_deeplab')
flags.DEFINE_string('experiment', 'panoptic_fpn_coco',
'experiment type, e.g. panoptic_fpn_coco')
flags.DEFINE_string('export_dir', None, 'The export directory.')
flags.DEFINE_string('checkpoint_path', None, 'Checkpoint path.')
flags.DEFINE_multi_string(
'config_file',
default=None,
help='YAML/JSON files which specifies overrides. The override order '
'follows the order of args. Note that each file '
'can be used as an override template to override the default parameters '
'specified in Python. If the same parameter is specified in both '
'`--config_file` and `--params_override`, `config_file` will be used '
'first, followed by params_override.')
flags.DEFINE_string(
'params_override', '',
'The JSON/YAML file or string which specifies the parameter to be overriden'
' on top of `config_file` template.')
flags.DEFINE_integer('batch_size', None, 'The batch size.')
flags.DEFINE_string('input_type', 'image_tensor',
'One of `image_tensor`, `image_bytes`, `tf_example`.')
flags.DEFINE_string(
'input_image_size', '224,224',
'The comma-separated string of two integers representing the height,width '
'of the input to the model.')
def main(_):
params = exp_factory.get_exp_config(FLAGS.experiment)
for config_file in FLAGS.config_file or []:
params = hyperparams.override_params_dict(
params, config_file, is_strict=True)
if FLAGS.params_override:
params = hyperparams.override_params_dict(
params, FLAGS.params_override, is_strict=True)
params.validate()
params.lock()
input_image_size = [int(x) for x in FLAGS.input_image_size.split(',')]
input_specs = tf.keras.layers.InputSpec(
shape=[FLAGS.batch_size, *input_image_size, 3])
if FLAGS.model == 'panoptic_deeplab':
build_model = factory.build_panoptic_deeplab
panoptic_module = panoptic_deeplab.PanopticSegmentationModule
elif FLAGS.model == 'panoptic_maskrcnn':
build_model = factory.build_panoptic_maskrcnn
panoptic_module = panoptic_maskrcnn.PanopticSegmentationModule
else:
raise ValueError('Unsupported model type: %s' % FLAGS.model)
model = build_model(input_specs=input_specs, model_config=params.task.model)
export_module = panoptic_module(
params=params,
model=model,
batch_size=FLAGS.batch_size,
input_image_size=[int(x) for x in FLAGS.input_image_size.split(',')],
num_channels=3)
export_saved_model_lib.export_inference_graph(
input_type=FLAGS.input_type,
batch_size=FLAGS.batch_size,
input_image_size=input_image_size,
params=params,
checkpoint_path=FLAGS.checkpoint_path,
export_dir=FLAGS.export_dir,
export_module=export_module,
export_checkpoint_subdir='checkpoint',
export_saved_model_subdir='saved_model')
if __name__ == '__main__':
app.run(main)
| 5,183 | 38.572519 | 89 | py |
models | models-master/official/projects/panoptic/serving/panoptic_maskrcnn.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Panoptic Segmentation input and model functions for serving/inference."""
from typing import List
import tensorflow as tf
from official.core import config_definitions as cfg
from official.projects.panoptic.modeling import panoptic_maskrcnn_model
from official.vision.serving import detection
class PanopticSegmentationModule(detection.DetectionModule):
"""Panoptic Segmentation Module."""
def __init__(self,
params: cfg.ExperimentConfig,
*,
model: tf.keras.Model,
batch_size: int,
input_image_size: List[int],
num_channels: int = 3):
"""Initializes panoptic segmentation module for export."""
if batch_size is None:
raise ValueError('batch_size cannot be None for panoptic segmentation '
'model.')
if not isinstance(model, panoptic_maskrcnn_model.PanopticMaskRCNNModel):
raise ValueError('PanopticSegmentationModule module not implemented for '
'{} model.'.format(type(model)))
super().__init__(
params=params,
model=model,
batch_size=batch_size,
input_image_size=input_image_size,
num_channels=num_channels)
def serve(self, images: tf.Tensor):
"""Casts image to float and run inference.
Args:
images: uint8 Tensor of shape [batch_size, None, None, 3]
Returns:
Tensor holding detection output logits.
"""
model_params = self.params.task.model
with tf.device('cpu:0'):
images = tf.cast(images, dtype=tf.float32)
# Tensor Specs for map_fn outputs (images, anchor_boxes, and image_info).
images_spec = tf.TensorSpec(shape=self._input_image_size + [3],
dtype=tf.float32)
num_anchors = model_params.anchor.num_scales * len(
model_params.anchor.aspect_ratios) * 4
anchor_shapes = []
for level in range(model_params.min_level, model_params.max_level + 1):
anchor_level_spec = tf.TensorSpec(
shape=[
self._input_image_size[0] // 2**level,
self._input_image_size[1] // 2**level, num_anchors
],
dtype=tf.float32)
anchor_shapes.append((str(level), anchor_level_spec))
image_info_spec = tf.TensorSpec(shape=[4, 2], dtype=tf.float32)
images, anchor_boxes, image_info = tf.nest.map_structure(
tf.identity,
tf.map_fn(
self._build_inputs,
elems=images,
fn_output_signature=(images_spec, dict(anchor_shapes),
image_info_spec),
parallel_iterations=32))
# To overcome keras.Model extra limitation to save a model with layers that
# have multiple inputs, we use `model.call` here to trigger the forward
# path. Note that, this disables some keras magics happens in `__call__`.
detections = self.model.call(
images=images,
image_info=image_info,
anchor_boxes=anchor_boxes,
training=False)
detections.pop('rpn_boxes')
detections.pop('rpn_scores')
detections.pop('cls_outputs')
detections.pop('box_outputs')
detections.pop('backbone_features')
detections.pop('decoder_features')
if model_params.detection_generator.apply_nms:
# Normalize detection boxes to [0, 1]. Here we first map them to the
# original image size, then normalize them to [0, 1].
detections['detection_boxes'] = (
detections['detection_boxes'] /
tf.tile(image_info[:, 2:3, :], [1, 1, 2]) /
tf.tile(image_info[:, 0:1, :], [1, 1, 2]))
final_outputs = {
'detection_boxes': detections['detection_boxes'],
'detection_scores': detections['detection_scores'],
'detection_classes': detections['detection_classes'],
'num_detections': detections['num_detections']
}
if 'detection_outer_boxes' in detections:
detections['detection_outer_boxes'] = (
detections['detection_outer_boxes'] /
tf.tile(image_info[:, 2:3, :], [1, 1, 2]) /
tf.tile(image_info[:, 0:1, :], [1, 1, 2]))
final_outputs['detection_outer_boxes'] = (
detections['detection_outer_boxes'])
else:
final_outputs = {
'decoded_boxes': detections['decoded_boxes'],
'decoded_box_scores': detections['decoded_box_scores']
}
masks = detections['segmentation_outputs']
masks = tf.image.resize(masks, self._input_image_size, method='bilinear')
classes = tf.math.argmax(masks, axis=-1)
if self.params.task.losses.semantic_segmentation_use_binary_cross_entropy:
scores = tf.nn.sigmoid(masks)
else:
scores = tf.nn.softmax(masks, axis=-1)
final_outputs.update({
'detection_masks': detections['detection_masks'],
'semantic_logits': masks,
'semantic_scores': scores,
'semantic_classes': classes,
'image_info': image_info
})
if model_params.generate_panoptic_masks:
final_outputs.update({
'panoptic_category_mask':
detections['panoptic_outputs']['category_mask'],
'panoptic_instance_mask':
detections['panoptic_outputs']['instance_mask'],
})
return final_outputs
| 5,945 | 37.115385 | 79 | py |
models | models-master/official/projects/panoptic/configs/panoptic_deeplab.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Panoptic Deeplab configuration definition."""
import dataclasses
import os
from typing import List, Optional, Union
import numpy as np
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.vision.configs import common
from official.vision.configs import decoders
from official.vision.configs import backbones
_COCO_INPUT_PATH_BASE = 'coco/tfrecords'
_COCO_TRAIN_EXAMPLES = 118287
_COCO_VAL_EXAMPLES = 5000
@dataclasses.dataclass
class Parser(hyperparams.Config):
"""Panoptic deeplab parser."""
ignore_label: int = 0
# If resize_eval_groundtruth is set to False, original image sizes are used
# for eval. In that case, groundtruth_padded_size has to be specified too to
# allow for batching the variable input sizes of images.
resize_eval_groundtruth: bool = True
groundtruth_padded_size: List[int] = dataclasses.field(default_factory=list)
aug_scale_min: float = 1.0
aug_scale_max: float = 1.0
aug_rand_hflip: bool = True
aug_type: common.Augmentation = dataclasses.field(
default_factory=common.Augmentation
)
sigma: float = 8.0
small_instance_area_threshold: int = 4096
small_instance_weight: float = 3.0
dtype = 'float32'
@dataclasses.dataclass
class TfExampleDecoder(common.TfExampleDecoder):
"""A simple TF Example decoder config."""
panoptic_category_mask_key: str = 'image/panoptic/category_mask'
panoptic_instance_mask_key: str = 'image/panoptic/instance_mask'
@dataclasses.dataclass
class DataDecoder(common.DataDecoder):
"""Data decoder config."""
simple_decoder: TfExampleDecoder = dataclasses.field(
default_factory=TfExampleDecoder
)
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""Input config for training."""
decoder: DataDecoder = dataclasses.field(default_factory=DataDecoder)
parser: Parser = dataclasses.field(default_factory=Parser)
input_path: str = ''
drop_remainder: bool = True
file_type: str = 'tfrecord'
is_training: bool = True
global_batch_size: int = 1
@dataclasses.dataclass
class PanopticDeeplabHead(hyperparams.Config):
"""Panoptic Deeplab head config."""
level: int = 3
num_convs: int = 2
num_filters: int = 256
kernel_size: int = 5
use_depthwise_convolution: bool = False
upsample_factor: int = 1
low_level: List[int] = dataclasses.field(default_factory=lambda: [3, 2])
low_level_num_filters: List[int] = dataclasses.field(
default_factory=lambda: [64, 32])
fusion_num_output_filters: int = 256
@dataclasses.dataclass
class SemanticHead(PanopticDeeplabHead):
"""Semantic head config."""
prediction_kernel_size: int = 1
@dataclasses.dataclass
class InstanceHead(PanopticDeeplabHead):
"""Instance head config."""
prediction_kernel_size: int = 1
@dataclasses.dataclass
class PanopticDeeplabPostProcessor(hyperparams.Config):
"""Panoptic Deeplab PostProcessing config."""
output_size: List[int] = dataclasses.field(
default_factory=list)
center_score_threshold: float = 0.1
thing_class_ids: List[int] = dataclasses.field(default_factory=list)
label_divisor: int = 256 * 256 * 256
stuff_area_limit: int = 4096
ignore_label: int = 0
nms_kernel: int = 7
keep_k_centers: int = 200
rescale_predictions: bool = True
@dataclasses.dataclass
class PanopticDeeplab(hyperparams.Config):
"""Panoptic Deeplab model config."""
num_classes: int = 2
input_size: List[int] = dataclasses.field(default_factory=list)
min_level: int = 3
max_level: int = 6
norm_activation: common.NormActivation = dataclasses.field(
default_factory=common.NormActivation
)
backbone: backbones.Backbone = dataclasses.field(
default_factory=lambda: backbones.Backbone(
type='resnet', resnet=backbones.ResNet()
)
)
decoder: decoders.Decoder = dataclasses.field(
default_factory=lambda: decoders.Decoder(
type='aspp', aspp=decoders.ASPP(level=3)
)
)
semantic_head: SemanticHead = dataclasses.field(default_factory=SemanticHead)
instance_head: InstanceHead = dataclasses.field(default_factory=InstanceHead)
shared_decoder: bool = False
generate_panoptic_masks: bool = True
post_processor: PanopticDeeplabPostProcessor = dataclasses.field(
default_factory=PanopticDeeplabPostProcessor
)
@dataclasses.dataclass
class Losses(hyperparams.Config):
label_smoothing: float = 0.0
ignore_label: int = 0
class_weights: List[float] = dataclasses.field(default_factory=list)
l2_weight_decay: float = 1e-4
top_k_percent_pixels: float = 0.15
segmentation_loss_weight: float = 1.0
center_heatmap_loss_weight: float = 200
center_offset_loss_weight: float = 0.01
@dataclasses.dataclass
class Evaluation(hyperparams.Config):
"""Evaluation config."""
ignored_label: int = 0
max_instances_per_category: int = 256
offset: int = 256 * 256 * 256
is_thing: List[float] = dataclasses.field(
default_factory=list)
rescale_predictions: bool = True
report_per_class_pq: bool = False
report_per_class_iou: bool = False
report_train_mean_iou: bool = True # Turning this off can speed up training.
@dataclasses.dataclass
class PanopticDeeplabTask(cfg.TaskConfig):
"""Panoptic deeplab task config."""
model: PanopticDeeplab = dataclasses.field(default_factory=PanopticDeeplab)
train_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=True)
)
validation_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig( # pylint: disable=g-long-lambda
is_training=False, drop_remainder=False
)
)
losses: Losses = dataclasses.field(default_factory=Losses)
init_checkpoint: Optional[str] = None
init_checkpoint_modules: Union[
str, List[str]] = 'all' # all, backbone, and/or decoder
evaluation: Evaluation = dataclasses.field(default_factory=Evaluation)
@exp_factory.register_config_factory('panoptic_deeplab_resnet_coco')
def panoptic_deeplab_resnet_coco() -> cfg.ExperimentConfig:
"""COCO panoptic segmentation with Panoptic Deeplab."""
train_steps = 200000
train_batch_size = 64
eval_batch_size = 1
steps_per_epoch = _COCO_TRAIN_EXAMPLES // train_batch_size
validation_steps = _COCO_VAL_EXAMPLES // eval_batch_size
num_panoptic_categories = 201
num_thing_categories = 91
ignore_label = 0
is_thing = [False]
for idx in range(1, num_panoptic_categories):
is_thing.append(True if idx <= num_thing_categories else False)
input_size = [640, 640, 3]
output_stride = 16
aspp_dilation_rates = [6, 12, 18]
multigrid = [1, 2, 4]
stem_type = 'v1'
level = int(np.math.log2(output_stride))
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(
mixed_precision_dtype='bfloat16', enable_xla=True),
task=PanopticDeeplabTask(
init_checkpoint='gs://tf_model_garden/vision/panoptic/panoptic_deeplab/imagenet/resnet50_v1/ckpt-436800', # pylint: disable=line-too-long
init_checkpoint_modules=['backbone'],
model=PanopticDeeplab(
num_classes=num_panoptic_categories,
input_size=input_size,
backbone=backbones.Backbone(
type='dilated_resnet', dilated_resnet=backbones.DilatedResNet(
model_id=50,
stem_type=stem_type,
output_stride=output_stride,
multigrid=multigrid,
se_ratio=0.25,
last_stage_repeats=1,
stochastic_depth_drop_rate=0.2)),
decoder=decoders.Decoder(
type='aspp',
aspp=decoders.ASPP(
level=level,
num_filters=256,
pool_kernel_size=input_size[:2],
dilation_rates=aspp_dilation_rates,
use_depthwise_convolution=True,
dropout_rate=0.1)),
semantic_head=SemanticHead(
level=level,
num_convs=1,
num_filters=256,
kernel_size=5,
use_depthwise_convolution=True,
upsample_factor=1,
low_level=[3, 2],
low_level_num_filters=[64, 32],
fusion_num_output_filters=256,
prediction_kernel_size=1),
instance_head=InstanceHead(
level=level,
num_convs=1,
num_filters=32,
kernel_size=5,
use_depthwise_convolution=True,
upsample_factor=1,
low_level=[3, 2],
low_level_num_filters=[32, 16],
fusion_num_output_filters=128,
prediction_kernel_size=1),
shared_decoder=False,
generate_panoptic_masks=True,
post_processor=PanopticDeeplabPostProcessor(
output_size=input_size[:2],
center_score_threshold=0.1,
thing_class_ids=list(range(1, num_thing_categories)),
label_divisor=256,
stuff_area_limit=4096,
ignore_label=ignore_label,
nms_kernel=41,
keep_k_centers=200,
rescale_predictions=True)),
losses=Losses(
label_smoothing=0.0,
ignore_label=ignore_label,
l2_weight_decay=0.0,
top_k_percent_pixels=0.2,
segmentation_loss_weight=1.0,
center_heatmap_loss_weight=200,
center_offset_loss_weight=0.01),
train_data=DataConfig(
input_path=os.path.join(_COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_scale_min=0.5,
aug_scale_max=1.5,
aug_rand_hflip=True,
aug_type=common.Augmentation(
type='autoaug',
autoaug=common.AutoAugment(
augmentation_name='panoptic_deeplab_policy')),
sigma=8.0,
small_instance_area_threshold=4096,
small_instance_weight=3.0)),
validation_data=DataConfig(
input_path=os.path.join(_COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size,
parser=Parser(
resize_eval_groundtruth=False,
groundtruth_padded_size=[640, 640],
aug_scale_min=1.0,
aug_scale_max=1.0,
aug_rand_hflip=False,
aug_type=None,
sigma=8.0,
small_instance_area_threshold=4096,
small_instance_weight=3.0),
drop_remainder=False),
evaluation=Evaluation(
ignored_label=ignore_label,
max_instances_per_category=256,
offset=256*256*256,
is_thing=is_thing,
rescale_predictions=True,
report_per_class_pq=False,
report_per_class_iou=False,
report_train_mean_iou=False)),
trainer=cfg.TrainerConfig(
train_steps=train_steps,
validation_steps=validation_steps,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adam',
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 0.0005,
'decay_steps': train_steps,
'end_learning_rate': 0.0,
'power': 0.9
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 2000,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('panoptic_deeplab_mobilenetv3_large_coco')
def panoptic_deeplab_mobilenetv3_large_coco() -> cfg.ExperimentConfig:
"""COCO panoptic segmentation with Panoptic Deeplab."""
train_steps = 200000
train_batch_size = 64
eval_batch_size = 1
steps_per_epoch = _COCO_TRAIN_EXAMPLES // train_batch_size
validation_steps = _COCO_VAL_EXAMPLES // eval_batch_size
num_panoptic_categories = 201
num_thing_categories = 91
ignore_label = 0
is_thing = [False]
for idx in range(1, num_panoptic_categories):
is_thing.append(True if idx <= num_thing_categories else False)
input_size = [640, 640, 3]
output_stride = 16
aspp_dilation_rates = [6, 12, 18]
level = int(np.math.log2(output_stride))
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(
mixed_precision_dtype='float32', enable_xla=True),
task=PanopticDeeplabTask(
init_checkpoint='gs://tf_model_garden/vision/panoptic/panoptic_deeplab/imagenet/mobilenetv3_large/ckpt-156000',
init_checkpoint_modules=['backbone'],
model=PanopticDeeplab(
num_classes=num_panoptic_categories,
input_size=input_size,
backbone=backbones.Backbone(
type='mobilenet', mobilenet=backbones.MobileNet(
model_id='MobileNetV3Large',
filter_size_scale=1.0,
stochastic_depth_drop_rate=0.0,
output_stride=output_stride)),
decoder=decoders.Decoder(
type='aspp',
aspp=decoders.ASPP(
level=level,
num_filters=256,
pool_kernel_size=input_size[:2],
dilation_rates=aspp_dilation_rates,
use_depthwise_convolution=True,
dropout_rate=0.1)),
semantic_head=SemanticHead(
level=level,
num_convs=1,
num_filters=256,
kernel_size=5,
use_depthwise_convolution=True,
upsample_factor=1,
low_level=[3, 2],
low_level_num_filters=[64, 32],
fusion_num_output_filters=256,
prediction_kernel_size=1),
instance_head=InstanceHead(
level=level,
num_convs=1,
num_filters=32,
kernel_size=5,
use_depthwise_convolution=True,
upsample_factor=1,
low_level=[3, 2],
low_level_num_filters=[32, 16],
fusion_num_output_filters=128,
prediction_kernel_size=1),
shared_decoder=False,
generate_panoptic_masks=True,
post_processor=PanopticDeeplabPostProcessor(
output_size=input_size[:2],
center_score_threshold=0.1,
thing_class_ids=list(range(1, num_thing_categories)),
label_divisor=256,
stuff_area_limit=4096,
ignore_label=ignore_label,
nms_kernel=41,
keep_k_centers=200,
rescale_predictions=True)),
losses=Losses(
label_smoothing=0.0,
ignore_label=ignore_label,
l2_weight_decay=0.0,
top_k_percent_pixels=0.2,
segmentation_loss_weight=1.0,
center_heatmap_loss_weight=200,
center_offset_loss_weight=0.01),
train_data=DataConfig(
input_path=os.path.join(_COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_scale_min=0.5,
aug_scale_max=2.0,
aug_rand_hflip=True,
aug_type=common.Augmentation(
type='autoaug',
autoaug=common.AutoAugment(
augmentation_name='panoptic_deeplab_policy')),
sigma=8.0,
small_instance_area_threshold=4096,
small_instance_weight=3.0)),
validation_data=DataConfig(
input_path=os.path.join(_COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size,
parser=Parser(
resize_eval_groundtruth=False,
groundtruth_padded_size=[640, 640],
aug_scale_min=1.0,
aug_scale_max=1.0,
aug_rand_hflip=False,
aug_type=None,
sigma=8.0,
small_instance_area_threshold=4096,
small_instance_weight=3.0),
drop_remainder=False),
evaluation=Evaluation(
ignored_label=ignore_label,
max_instances_per_category=256,
offset=256*256*256,
is_thing=is_thing,
rescale_predictions=True,
report_per_class_pq=False,
report_per_class_iou=False,
report_train_mean_iou=False)),
trainer=cfg.TrainerConfig(
train_steps=train_steps,
validation_steps=validation_steps,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adam',
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 0.001,
'decay_steps': train_steps,
'end_learning_rate': 0.0,
'power': 0.9
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 2000,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('panoptic_deeplab_mobilenetv3_small_coco')
def panoptic_deeplab_mobilenetv3_small_coco() -> cfg.ExperimentConfig:
"""COCO panoptic segmentation with Panoptic Deeplab."""
train_steps = 200000
train_batch_size = 64
eval_batch_size = 1
steps_per_epoch = _COCO_TRAIN_EXAMPLES // train_batch_size
validation_steps = _COCO_VAL_EXAMPLES // eval_batch_size
num_panoptic_categories = 201
num_thing_categories = 91
ignore_label = 0
is_thing = [False]
for idx in range(1, num_panoptic_categories):
is_thing.append(True if idx <= num_thing_categories else False)
input_size = [640, 640, 3]
output_stride = 16
aspp_dilation_rates = [6, 12, 18]
level = int(np.math.log2(output_stride))
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(
mixed_precision_dtype='float32', enable_xla=True),
task=PanopticDeeplabTask(
init_checkpoint='gs://tf_model_garden/vision/panoptic/panoptic_deeplab/imagenet/mobilenetv3_small/ckpt-312000',
init_checkpoint_modules=['backbone'],
model=PanopticDeeplab(
num_classes=num_panoptic_categories,
input_size=input_size,
backbone=backbones.Backbone(
type='mobilenet', mobilenet=backbones.MobileNet(
model_id='MobileNetV3Small',
filter_size_scale=1.0,
stochastic_depth_drop_rate=0.0,
output_stride=output_stride)),
decoder=decoders.Decoder(
type='aspp',
aspp=decoders.ASPP(
level=level,
num_filters=256,
pool_kernel_size=input_size[:2],
dilation_rates=aspp_dilation_rates,
use_depthwise_convolution=True,
dropout_rate=0.1)),
semantic_head=SemanticHead(
level=level,
num_convs=1,
num_filters=256,
kernel_size=5,
use_depthwise_convolution=True,
upsample_factor=1,
low_level=[3, 2],
low_level_num_filters=[64, 32],
fusion_num_output_filters=256,
prediction_kernel_size=1),
instance_head=InstanceHead(
level=level,
num_convs=1,
num_filters=32,
kernel_size=5,
use_depthwise_convolution=True,
upsample_factor=1,
low_level=[3, 2],
low_level_num_filters=[32, 16],
fusion_num_output_filters=128,
prediction_kernel_size=1),
shared_decoder=False,
generate_panoptic_masks=True,
post_processor=PanopticDeeplabPostProcessor(
output_size=input_size[:2],
center_score_threshold=0.1,
thing_class_ids=list(range(1, num_thing_categories)),
label_divisor=256,
stuff_area_limit=4096,
ignore_label=ignore_label,
nms_kernel=41,
keep_k_centers=200,
rescale_predictions=True)),
losses=Losses(
label_smoothing=0.0,
ignore_label=ignore_label,
l2_weight_decay=0.0,
top_k_percent_pixels=0.2,
segmentation_loss_weight=1.0,
center_heatmap_loss_weight=200,
center_offset_loss_weight=0.01),
train_data=DataConfig(
input_path=os.path.join(_COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_scale_min=0.5,
aug_scale_max=2.0,
aug_rand_hflip=True,
aug_type=common.Augmentation(
type='autoaug',
autoaug=common.AutoAugment(
augmentation_name='panoptic_deeplab_policy')),
sigma=8.0,
small_instance_area_threshold=4096,
small_instance_weight=3.0)),
validation_data=DataConfig(
input_path=os.path.join(_COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size,
parser=Parser(
resize_eval_groundtruth=False,
groundtruth_padded_size=[640, 640],
aug_scale_min=1.0,
aug_scale_max=1.0,
aug_rand_hflip=False,
aug_type=None,
sigma=8.0,
small_instance_area_threshold=4096,
small_instance_weight=3.0),
drop_remainder=False),
evaluation=Evaluation(
ignored_label=ignore_label,
max_instances_per_category=256,
offset=256*256*256,
is_thing=is_thing,
rescale_predictions=True,
report_per_class_pq=False,
report_per_class_iou=False,
report_train_mean_iou=False)),
trainer=cfg.TrainerConfig(
train_steps=train_steps,
validation_steps=validation_steps,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adam',
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 0.001,
'decay_steps': train_steps,
'end_learning_rate': 0.0,
'power': 0.9
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 2000,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
| 26,059 | 36.768116 | 148 | py |
models | models-master/official/projects/panoptic/configs/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/panoptic/configs/panoptic_maskrcnn.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Panoptic Mask R-CNN configuration definition."""
import dataclasses
import os
from typing import List, Optional
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.projects.deepmac_maskrcnn.configs import deep_mask_head_rcnn as deepmac_maskrcnn
from official.projects.uvit.configs import backbones as uvit_backbones
from official.vision.configs import common
from official.vision.configs import maskrcnn
from official.vision.configs import semantic_segmentation
from official.vision.configs.google import backbones
SEGMENTATION_MODEL = semantic_segmentation.SemanticSegmentationModel
SEGMENTATION_HEAD = semantic_segmentation.SegmentationHead
_COCO_INPUT_PATH_BASE = 'coco/tfrecords'
_COCO_TRAIN_EXAMPLES = 118287
_COCO_VAL_EXAMPLES = 5000
# pytype: disable=wrong-keyword-args
# pylint: disable=unexpected-keyword-arg
@dataclasses.dataclass
class Parser(maskrcnn.Parser):
"""Panoptic Mask R-CNN parser config."""
# If segmentation_resize_eval_groundtruth is set to False, original image
# sizes are used for eval. In that case,
# segmentation_groundtruth_padded_size has to be specified too to allow for
# batching the variable input sizes of images.
segmentation_resize_eval_groundtruth: bool = True
segmentation_groundtruth_padded_size: List[int] = dataclasses.field(
default_factory=list)
segmentation_ignore_label: int = 255
panoptic_ignore_label: int = 0
# Setting this to true will enable parsing category_mask and instance_mask.
include_panoptic_masks: bool = True
@dataclasses.dataclass
class TfExampleDecoder(common.TfExampleDecoder):
"""A simple TF Example decoder config."""
# Setting this to true will enable decoding category_mask and instance_mask.
include_panoptic_masks: bool = True
panoptic_category_mask_key: str = 'image/panoptic/category_mask'
panoptic_instance_mask_key: str = 'image/panoptic/instance_mask'
@dataclasses.dataclass
class DataDecoder(common.DataDecoder):
"""Data decoder config."""
simple_decoder: TfExampleDecoder = dataclasses.field(
default_factory=TfExampleDecoder
)
@dataclasses.dataclass
class DataConfig(maskrcnn.DataConfig):
"""Input config for training."""
decoder: DataDecoder = dataclasses.field(default_factory=DataDecoder)
parser: Parser = dataclasses.field(default_factory=Parser)
@dataclasses.dataclass
class PanopticSegmentationGenerator(hyperparams.Config):
"""Panoptic segmentation generator config."""
output_size: List[int] = dataclasses.field(
default_factory=list)
mask_binarize_threshold: float = 0.5
score_threshold: float = 0.5
things_overlap_threshold: float = 0.5
stuff_area_threshold: float = 4096.0
things_class_label: int = 1
void_class_label: int = 0
void_instance_id: int = 0
rescale_predictions: bool = False
@dataclasses.dataclass
class Backbone(backbones.Backbone):
"""Configuration for backbones.
Attributes:
type: "str", type of backbone be used, one the of fields below.
uvit: uvit backbone config.
"""
type: Optional[str] = None
uvit: uvit_backbones.VisionTransformer = dataclasses.field(
default_factory=uvit_backbones.VisionTransformer
)
@dataclasses.dataclass
class PanopticMaskRCNN(deepmac_maskrcnn.DeepMaskHeadRCNN):
"""Panoptic Mask R-CNN model config."""
backbone: Backbone = dataclasses.field(
default_factory=lambda: Backbone(type='resnet', resnet=backbones.ResNet())
)
segmentation_model: SEGMENTATION_MODEL = dataclasses.field(
default_factory=lambda: SEGMENTATION_MODEL(num_classes=2)
)
include_mask: bool = True
shared_backbone: bool = True
shared_decoder: bool = True
stuff_classes_offset: int = 0
generate_panoptic_masks: bool = True
panoptic_segmentation_generator: PanopticSegmentationGenerator = (
dataclasses.field(default_factory=PanopticSegmentationGenerator)
)
@dataclasses.dataclass
class Losses(maskrcnn.Losses):
"""Panoptic Mask R-CNN loss config."""
semantic_segmentation_label_smoothing: float = 0.0
semantic_segmentation_ignore_label: int = 255
semantic_segmentation_gt_is_matting_map: bool = False
semantic_segmentation_class_weights: List[float] = dataclasses.field(
default_factory=list)
semantic_segmentation_use_groundtruth_dimension: bool = True
# If true, use binary cross entropy (sigmoid) in loss, otherwise, use
# categorical cross entropy (softmax).
semantic_segmentation_use_binary_cross_entropy: bool = False
semantic_segmentation_top_k_percent_pixels: float = 1.0
instance_segmentation_weight: float = 1.0
semantic_segmentation_weight: float = 0.5
@dataclasses.dataclass
class PanopticQualityEvaluator(hyperparams.Config):
"""Panoptic Quality Evaluator config."""
num_categories: int = 2
ignored_label: int = 0
max_instances_per_category: int = 256
offset: int = 256 * 256 * 256
is_thing: List[float] = dataclasses.field(
default_factory=list)
rescale_predictions: bool = False
report_per_class_metrics: bool = False
@dataclasses.dataclass
class PanopticMaskRCNNTask(maskrcnn.MaskRCNNTask):
"""Panoptic Mask R-CNN task config."""
model: PanopticMaskRCNN = dataclasses.field(default_factory=PanopticMaskRCNN)
train_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=True)
)
validation_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig( # pylint: disable=g-long-lambda
is_training=False, drop_remainder=False
)
)
segmentation_evaluation: semantic_segmentation.Evaluation = dataclasses.field(
default_factory=semantic_segmentation.Evaluation
)
losses: Losses = dataclasses.field(default_factory=Losses)
init_checkpoint: Optional[str] = None
segmentation_init_checkpoint: Optional[str] = None
# 'init_checkpoint_modules' controls the modules that need to be initialized
# from checkpoint paths given by 'init_checkpoint' and/or
# 'segmentation_init_checkpoint. Supports modules:
# 'backbone': Initialize MaskRCNN backbone
# 'segmentation_backbone': Initialize segmentation backbone
# 'segmentation_decoder': Initialize segmentation decoder
# 'all': Initialize all modules
init_checkpoint_modules: Optional[List[str]] = dataclasses.field(
default_factory=list)
panoptic_quality_evaluator: PanopticQualityEvaluator = dataclasses.field(
default_factory=PanopticQualityEvaluator
)
@exp_factory.register_config_factory('panoptic_fpn_coco')
def panoptic_fpn_coco() -> cfg.ExperimentConfig:
"""COCO panoptic segmentation with Panoptic Mask R-CNN."""
train_batch_size = 64
eval_batch_size = 8
steps_per_epoch = _COCO_TRAIN_EXAMPLES // train_batch_size
validation_steps = _COCO_VAL_EXAMPLES // eval_batch_size
# coco panoptic dataset has category ids ranging from [0-200] inclusive.
# 0 is not used and represents the background class
# ids 1-91 represent thing categories (91)
# ids 92-200 represent stuff categories (109)
# for the segmentation task, we continue using id=0 for the background
# and map all thing categories to id=1, the remaining 109 stuff categories
# are shifted by an offset=90 given by num_thing classes - 1. This shifting
# will make all the stuff categories begin from id=2 and end at id=110
num_panoptic_categories = 201
num_thing_categories = 91
num_semantic_segmentation_classes = 111
is_thing = [False]
for idx in range(1, num_panoptic_categories):
is_thing.append(True if idx <= num_thing_categories else False)
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(
mixed_precision_dtype='float32', enable_xla=True),
task=PanopticMaskRCNNTask(
init_checkpoint='gs://cloud-tpu-checkpoints/vision-2.0/resnet50_imagenet/ckpt-28080', # pylint: disable=line-too-long
init_checkpoint_modules=['backbone'],
model=PanopticMaskRCNN(
num_classes=91, input_size=[1024, 1024, 3],
panoptic_segmentation_generator=PanopticSegmentationGenerator(
output_size=[640, 640], rescale_predictions=True),
stuff_classes_offset=90,
segmentation_model=SEGMENTATION_MODEL(
num_classes=num_semantic_segmentation_classes,
head=SEGMENTATION_HEAD(
level=2,
num_convs=0,
num_filters=128,
decoder_min_level=2,
decoder_max_level=6,
feature_fusion='panoptic_fpn_fusion'))),
losses=Losses(l2_weight_decay=0.00004),
train_data=DataConfig(
input_path=os.path.join(_COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.8, aug_scale_max=1.25)),
validation_data=DataConfig(
input_path=os.path.join(_COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size,
parser=Parser(
segmentation_resize_eval_groundtruth=False,
segmentation_groundtruth_padded_size=[640, 640]),
drop_remainder=False),
annotation_file=os.path.join(_COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
segmentation_evaluation=semantic_segmentation.Evaluation(
report_per_class_iou=False, report_train_mean_iou=False),
panoptic_quality_evaluator=PanopticQualityEvaluator(
num_categories=num_panoptic_categories,
ignored_label=0,
is_thing=is_thing,
rescale_predictions=True)),
trainer=cfg.TrainerConfig(
train_steps=22500,
validation_steps=validation_steps,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [15000, 20000],
'values': [0.12, 0.012, 0.0012],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 500,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
| 11,613 | 38.104377 | 128 | py |
models | models-master/official/projects/panoptic/dataloaders/panoptic_deeplab_input.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data parser and processing for Panoptic Deeplab."""
from typing import List, Optional
import numpy as np
import tensorflow as tf
from official.vision.configs import common
from official.vision.dataloaders import parser
from official.vision.dataloaders import tf_example_decoder
from official.vision.ops import augment
from official.vision.ops import preprocess_ops
def _compute_gaussian_from_std(sigma):
"""Computes the Gaussian and its size from a given standard deviation."""
size = int(6 * sigma + 3)
x = np.arange(size, dtype=float)
y = x[:, np.newaxis]
x0, y0 = 3 * sigma + 1, 3 * sigma + 1
gaussian = tf.constant(
np.exp(-((x - x0)**2 + (y - y0)**2) / (2 * sigma**2)),
dtype=tf.float32)
return gaussian, size
class TfExampleDecoder(tf_example_decoder.TfExampleDecoder):
"""Tensorflow Example proto decoder."""
def __init__(
self,
regenerate_source_id: bool,
panoptic_category_mask_key: str = 'image/panoptic/category_mask',
panoptic_instance_mask_key: str = 'image/panoptic/instance_mask'):
super(TfExampleDecoder,
self).__init__(
include_mask=True,
regenerate_source_id=regenerate_source_id)
self._panoptic_category_mask_key = panoptic_category_mask_key
self._panoptic_instance_mask_key = panoptic_instance_mask_key
self._panoptic_keys_to_features = {
panoptic_category_mask_key:
tf.io.FixedLenFeature((), tf.string, default_value=''),
panoptic_instance_mask_key:
tf.io.FixedLenFeature((), tf.string, default_value='')
}
def decode(self, serialized_example):
decoded_tensors = super(TfExampleDecoder,
self).decode(serialized_example)
parsed_tensors = tf.io.parse_single_example(
serialized_example, self._panoptic_keys_to_features)
category_mask = tf.io.decode_image(
parsed_tensors[self._panoptic_category_mask_key], channels=1)
instance_mask = tf.io.decode_image(
parsed_tensors[self._panoptic_instance_mask_key], channels=1)
category_mask.set_shape([None, None, 1])
instance_mask.set_shape([None, None, 1])
decoded_tensors.update({
'groundtruth_panoptic_category_mask': category_mask,
'groundtruth_panoptic_instance_mask': instance_mask
})
return decoded_tensors
class Parser(parser.Parser):
"""Parser to parse an image and its annotations into a dictionary of tensors."""
def __init__(
self,
output_size: List[int],
resize_eval_groundtruth: bool = True,
groundtruth_padded_size: Optional[List[int]] = None,
ignore_label: int = 0,
aug_rand_hflip: bool = False,
aug_scale_min: float = 1.0,
aug_scale_max: float = 1.0,
aug_type: Optional[common.Augmentation] = None,
sigma: float = 8.0,
small_instance_area_threshold: int = 4096,
small_instance_weight: float = 3.0,
dtype: str = 'float32'):
"""Initializes parameters for parsing annotations in the dataset.
Args:
output_size: `Tensor` or `list` for [height, width] of output image. The
output_size should be divided by the largest feature stride 2^max_level.
resize_eval_groundtruth: `bool`, if True, eval groundtruth masks are
resized to output_size.
groundtruth_padded_size: `Tensor` or `list` for [height, width]. When
resize_eval_groundtruth is set to False, the groundtruth masks are
padded to this size.
ignore_label: `int` the pixel with ignore label will not used for training
and evaluation.
aug_rand_hflip: `bool`, if True, augment training with random
horizontal flip.
aug_scale_min: `float`, the minimum scale applied to `output_size` for
data augmentation during training.
aug_scale_max: `float`, the maximum scale applied to `output_size` for
data augmentation during training.
aug_type: An optional Augmentation object with params for AutoAugment.
sigma: `float`, standard deviation for generating 2D Gaussian to encode
centers.
small_instance_area_threshold: `int`, small instance area threshold.
small_instance_weight: `float`, small instance weight.
dtype: `str`, data type. One of {`bfloat16`, `float32`, `float16`}.
"""
self._output_size = output_size
self._resize_eval_groundtruth = resize_eval_groundtruth
if (not resize_eval_groundtruth) and (groundtruth_padded_size is None):
raise ValueError(
'groundtruth_padded_size ([height, width]) needs to be'
'specified when resize_eval_groundtruth is False.')
self._groundtruth_padded_size = groundtruth_padded_size
self._ignore_label = ignore_label
# Data augmentation.
self._aug_rand_hflip = aug_rand_hflip
self._aug_scale_min = aug_scale_min
self._aug_scale_max = aug_scale_max
if aug_type and aug_type.type:
if aug_type.type == 'autoaug':
self._augmenter = augment.AutoAugment(
augmentation_name=aug_type.autoaug.augmentation_name,
cutout_const=aug_type.autoaug.cutout_const,
translate_const=aug_type.autoaug.translate_const)
else:
raise ValueError('Augmentation policy {} not supported.'.format(
aug_type.type))
else:
self._augmenter = None
self._dtype = dtype
self._sigma = sigma
self._gaussian, self._gaussian_size = _compute_gaussian_from_std(
self._sigma)
self._gaussian = tf.reshape(self._gaussian, shape=[-1])
self._small_instance_area_threshold = small_instance_area_threshold
self._small_instance_weight = small_instance_weight
def _resize_and_crop_mask(self, mask, image_info, is_training):
"""Resizes and crops mask using `image_info` dict."""
height = image_info[0][0]
width = image_info[0][1]
mask = tf.reshape(mask, shape=[1, height, width, 1])
mask += 1
if is_training or self._resize_eval_groundtruth:
image_scale = image_info[2, :]
offset = image_info[3, :]
mask = preprocess_ops.resize_and_crop_masks(
mask,
image_scale,
self._output_size,
offset)
else:
mask = tf.image.pad_to_bounding_box(
mask, 0, 0,
self._groundtruth_padded_size[0],
self._groundtruth_padded_size[1])
mask -= 1
# Assign ignore label to the padded region.
mask = tf.where(
tf.equal(mask, -1),
self._ignore_label * tf.ones_like(mask),
mask)
mask = tf.squeeze(mask, axis=0)
return mask
def _parse_data(self, data, is_training):
image = data['image']
if self._augmenter is not None and is_training:
image = self._augmenter.distort(image)
image = preprocess_ops.normalize_image(image)
category_mask = tf.cast(
data['groundtruth_panoptic_category_mask'][:, :, 0],
dtype=tf.float32)
instance_mask = tf.cast(
data['groundtruth_panoptic_instance_mask'][:, :, 0],
dtype=tf.float32)
# Flips image randomly during training.
if self._aug_rand_hflip and is_training:
masks = tf.stack([category_mask, instance_mask], axis=0)
image, _, masks = preprocess_ops.random_horizontal_flip(
image=image, masks=masks)
category_mask = masks[0]
instance_mask = masks[1]
# Resizes and crops image.
image, image_info = preprocess_ops.resize_and_crop_image(
image,
self._output_size,
self._output_size,
aug_scale_min=self._aug_scale_min if is_training else 1.0,
aug_scale_max=self._aug_scale_max if is_training else 1.0)
category_mask = self._resize_and_crop_mask(
category_mask,
image_info,
is_training=is_training)
instance_mask = self._resize_and_crop_mask(
instance_mask,
image_info,
is_training=is_training)
(instance_centers_heatmap,
instance_centers_offset,
semantic_weights) = self._encode_centers_and_offets(
instance_mask=instance_mask[:, :, 0])
# Cast image and labels as self._dtype
image = tf.cast(image, dtype=self._dtype)
category_mask = tf.cast(category_mask, dtype=self._dtype)
instance_mask = tf.cast(instance_mask, dtype=self._dtype)
instance_centers_heatmap = tf.cast(
instance_centers_heatmap, dtype=self._dtype)
instance_centers_offset = tf.cast(
instance_centers_offset, dtype=self._dtype)
valid_mask = tf.not_equal(
category_mask, self._ignore_label)
things_mask = tf.not_equal(
instance_mask, self._ignore_label)
labels = {
'category_mask': category_mask,
'instance_mask': instance_mask,
'instance_centers_heatmap': instance_centers_heatmap,
'instance_centers_offset': instance_centers_offset,
'semantic_weights': semantic_weights,
'valid_mask': valid_mask,
'things_mask': things_mask,
'image_info': image_info
}
return image, labels
def _parse_train_data(self, data):
"""Parses data for training."""
return self._parse_data(data=data, is_training=True)
def _parse_eval_data(self, data):
"""Parses data for evaluation."""
return self._parse_data(data=data, is_training=False)
def _encode_centers_and_offets(self, instance_mask):
"""Generates center heatmaps and offets from instance id mask.
Args:
instance_mask: `tf.Tensor` of shape [height, width] representing
groundtruth instance id mask.
Returns:
instance_centers_heatmap: `tf.Tensor` of shape [height, width, 1]
instance_centers_offset: `tf.Tensor` of shape [height, width, 2]
"""
shape = tf.shape(instance_mask)
height, width = shape[0], shape[1]
padding_start = int(3 * self._sigma + 1)
padding_end = int(3 * self._sigma + 2)
# padding should be equal to self._gaussian_size which is calculated
# as size = int(6 * sigma + 3)
padding = padding_start + padding_end
instance_centers_heatmap = tf.zeros(
shape=[height + padding, width + padding],
dtype=tf.float32)
centers_offset_y = tf.zeros(
shape=[height, width],
dtype=tf.float32)
centers_offset_x = tf.zeros(
shape=[height, width],
dtype=tf.float32)
semantic_weights = tf.ones(
shape=[height, width],
dtype=tf.float32)
unique_instance_ids, _ = tf.unique(tf.reshape(instance_mask, [-1]))
# The following method for encoding center heatmaps and offets is inspired
# by the reference implementation available at
# https://github.com/google-research/deeplab2/blob/main/data/sample_generator.py # pylint: disable=line-too-long
for instance_id in unique_instance_ids:
if instance_id == self._ignore_label:
continue
mask = tf.equal(instance_mask, instance_id)
mask_area = tf.reduce_sum(tf.cast(mask, dtype=tf.float32))
mask_indices = tf.cast(tf.where(mask), dtype=tf.float32)
mask_center = tf.reduce_mean(mask_indices, axis=0)
mask_center_y = tf.cast(tf.round(mask_center[0]), dtype=tf.int32)
mask_center_x = tf.cast(tf.round(mask_center[1]), dtype=tf.int32)
if mask_area < self._small_instance_area_threshold:
semantic_weights = tf.where(
mask,
self._small_instance_weight,
semantic_weights)
gaussian_size = self._gaussian_size
indices_y = tf.range(mask_center_y, mask_center_y + gaussian_size)
indices_x = tf.range(mask_center_x, mask_center_x + gaussian_size)
indices = tf.stack(tf.meshgrid(indices_y, indices_x))
indices = tf.reshape(
indices, shape=[2, gaussian_size * gaussian_size])
indices = tf.transpose(indices)
instance_centers_heatmap = tf.tensor_scatter_nd_max(
tensor=instance_centers_heatmap,
indices=indices,
updates=self._gaussian)
centers_offset_y = tf.tensor_scatter_nd_update(
tensor=centers_offset_y,
indices=tf.cast(mask_indices, dtype=tf.int32),
updates=tf.cast(mask_center_y, dtype=tf.float32) - mask_indices[:, 0])
centers_offset_x = tf.tensor_scatter_nd_update(
tensor=centers_offset_x,
indices=tf.cast(mask_indices, dtype=tf.int32),
updates=tf.cast(mask_center_x, dtype=tf.float32) - mask_indices[:, 1])
instance_centers_heatmap = instance_centers_heatmap[
padding_start:padding_start + height,
padding_start:padding_start + width]
instance_centers_heatmap = tf.expand_dims(instance_centers_heatmap, axis=-1)
instance_centers_offset = tf.stack(
[centers_offset_y, centers_offset_x],
axis=-1)
return (instance_centers_heatmap,
instance_centers_offset,
semantic_weights)
| 13,411 | 36.255556 | 117 | py |
models | models-master/official/projects/panoptic/dataloaders/panoptic_maskrcnn_input.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data parser and processing for Panoptic Mask R-CNN."""
import tensorflow as tf
from official.vision.dataloaders import maskrcnn_input
from official.vision.dataloaders import tf_example_decoder
from official.vision.ops import preprocess_ops
class TfExampleDecoder(tf_example_decoder.TfExampleDecoder):
"""Tensorflow Example proto decoder."""
def __init__(
self,
regenerate_source_id: bool,
mask_binarize_threshold: float,
include_panoptic_masks: bool,
panoptic_category_mask_key: str = 'image/panoptic/category_mask',
panoptic_instance_mask_key: str = 'image/panoptic/instance_mask'):
super().__init__(
include_mask=True,
regenerate_source_id=regenerate_source_id,
mask_binarize_threshold=None)
self._include_panoptic_masks = include_panoptic_masks
self._panoptic_category_mask_key = panoptic_category_mask_key
self._panoptic_instance_mask_key = panoptic_instance_mask_key
keys_to_features = {
'image/segmentation/class/encoded':
tf.io.FixedLenFeature((), tf.string, default_value='')}
if include_panoptic_masks:
keys_to_features.update({
panoptic_category_mask_key:
tf.io.FixedLenFeature((), tf.string, default_value=''),
panoptic_instance_mask_key:
tf.io.FixedLenFeature((), tf.string, default_value='')
})
self._segmentation_keys_to_features = keys_to_features
def decode_segmentation_mask(self, parsed_tensors):
segmentation_mask = tf.io.decode_image(
parsed_tensors['image/segmentation/class/encoded'], channels=1)
segmentation_mask.set_shape([None, None, 1])
return segmentation_mask
def decode(self, serialized_example):
decoded_tensors = super().decode(serialized_example)
parsed_tensors = tf.io.parse_single_example(
serialized_example, self._segmentation_keys_to_features)
decoded_tensors.update({
'groundtruth_segmentation_mask':
self.decode_segmentation_mask(parsed_tensors)
})
if self._include_panoptic_masks:
category_mask = tf.io.decode_image(
parsed_tensors[self._panoptic_category_mask_key],
channels=1)
instance_mask = tf.io.decode_image(
parsed_tensors[self._panoptic_instance_mask_key],
channels=1)
category_mask.set_shape([None, None, 1])
instance_mask.set_shape([None, None, 1])
decoded_tensors.update({
'groundtruth_panoptic_category_mask':
category_mask,
'groundtruth_panoptic_instance_mask':
instance_mask})
return decoded_tensors
class Parser(maskrcnn_input.Parser):
"""Parser to parse an image and its annotations into a dictionary of tensors."""
def __init__(self,
output_size,
min_level,
max_level,
num_scales,
aspect_ratios,
anchor_size,
rpn_match_threshold=0.7,
rpn_unmatched_threshold=0.3,
rpn_batch_size_per_im=256,
rpn_fg_fraction=0.5,
aug_rand_hflip=False,
aug_rand_vflip=False,
aug_scale_min=1.0,
aug_scale_max=1.0,
aug_type=None,
skip_crowd_during_training=True,
max_num_instances=100,
outer_boxes_scale=1.0,
mask_crop_size=112,
segmentation_resize_eval_groundtruth=True,
segmentation_groundtruth_padded_size=None,
segmentation_ignore_label=255,
panoptic_ignore_label=0,
include_panoptic_masks=True,
dtype='float32'):
"""Initializes parameters for parsing annotations in the dataset.
Args:
output_size: `Tensor` or `list` for [height, width] of output image. The
output_size should be divided by the largest feature stride 2^max_level.
min_level: `int` number of minimum level of the output feature pyramid.
max_level: `int` number of maximum level of the output feature pyramid.
num_scales: `int` number representing intermediate scales added
on each level. For instance, num_scales=2 adds one additional
intermediate anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: `list` of float numbers representing the aspect raito
anchors added on each level. The number indicates the ratio of width to
height. For instance, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors
on each scale level.
anchor_size: `float` number representing the scale of size of the base
anchor to the feature stride 2^level.
rpn_match_threshold: `float`, match threshold for anchors in RPN.
rpn_unmatched_threshold: `float`, unmatched threshold for anchors in RPN.
rpn_batch_size_per_im: `int` for batch size per image in RPN.
rpn_fg_fraction: `float` for forground fraction per batch in RPN.
aug_rand_hflip: `bool`, if True, augment training with random horizontal
flip.
aug_rand_vflip: `bool`, if True, augment training with random vertical
flip.
aug_scale_min: `float`, the minimum scale applied to `output_size` for
data augmentation during training.
aug_scale_max: `float`, the maximum scale applied to `output_size` for
data augmentation during training.
aug_type: An optional Augmentation object with params for AutoAugment.
skip_crowd_during_training: `bool`, if True, skip annotations labeled with
`is_crowd` equals to 1.
max_num_instances: `int` number of maximum number of instances in an
image. The groundtruth data will be padded to `max_num_instances`.
outer_boxes_scale: a float to scale up the bounding boxes to generate
more inclusive masks. The scale is expected to be >=1.0.
mask_crop_size: the size which groundtruth mask is cropped to.
segmentation_resize_eval_groundtruth: `bool`, if True, eval groundtruth
masks are resized to output_size.
segmentation_groundtruth_padded_size: `Tensor` or `list` for [height,
width]. When resize_eval_groundtruth is set to False, the groundtruth
masks are padded to this size.
segmentation_ignore_label: `int` the pixels with ignore label will not be
used for training and evaluation.
panoptic_ignore_label: `int` the pixels with ignore label will not be used
by the PQ evaluator.
include_panoptic_masks: `bool`, if True, category_mask and instance_mask
will be parsed. Set this to true if PQ evaluator is enabled.
dtype: `str`, data type. One of {`bfloat16`, `float32`, `float16`}.
"""
super().__init__(
output_size=output_size,
min_level=min_level,
max_level=max_level,
num_scales=num_scales,
aspect_ratios=aspect_ratios,
anchor_size=anchor_size,
rpn_match_threshold=rpn_match_threshold,
rpn_unmatched_threshold=rpn_unmatched_threshold,
rpn_batch_size_per_im=rpn_batch_size_per_im,
rpn_fg_fraction=rpn_fg_fraction,
aug_rand_hflip=False,
aug_rand_vflip=False,
aug_scale_min=aug_scale_min,
aug_scale_max=aug_scale_max,
aug_type=aug_type,
skip_crowd_during_training=skip_crowd_during_training,
max_num_instances=max_num_instances,
include_mask=True,
outer_boxes_scale=outer_boxes_scale,
mask_crop_size=mask_crop_size,
dtype=dtype,
)
self.aug_rand_hflip = aug_rand_hflip
self.aug_rand_vflip = aug_rand_vflip
self._segmentation_resize_eval_groundtruth = (
segmentation_resize_eval_groundtruth
)
if (not segmentation_resize_eval_groundtruth) and (
segmentation_groundtruth_padded_size is None
):
raise ValueError(
'segmentation_groundtruth_padded_size ([height, width]) needs to be'
'specified when segmentation_resize_eval_groundtruth is False.'
)
self._segmentation_groundtruth_padded_size = (
segmentation_groundtruth_padded_size
)
self._segmentation_ignore_label = segmentation_ignore_label
self._panoptic_ignore_label = panoptic_ignore_label
self._include_panoptic_masks = include_panoptic_masks
def _parse_train_data(self, data):
"""Parses data for training.
Args:
data: the decoded tensor dictionary from TfExampleDecoder.
Returns:
image: image tensor that is preproessed to have normalized value and
dimension [output_size[0], output_size[1], 3]
labels: a dictionary of tensors used for training. The following describes
{key: value} pairs in the dictionary.
image_info: a 2D `Tensor` that encodes the information of the image and
the applied preprocessing. It is in the format of
[[original_height, original_width], [scaled_height, scaled_width]],
anchor_boxes: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, 4] representing anchor boxes at each level.
rpn_score_targets: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, anchors_per_location]. The height_l and
width_l represent the dimension of class logits at l-th level.
rpn_box_targets: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, anchors_per_location * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
gt_boxes: Groundtruth bounding box annotations. The box is represented
in [y1, x1, y2, x2] format. The coordinates are w.r.t the scaled
image that is fed to the network. The tennsor is padded with -1 to
the fixed dimension [self._max_num_instances, 4].
gt_classes: Groundtruth classes annotations. The tennsor is padded
with -1 to the fixed dimension [self._max_num_instances].
gt_masks: Groundtruth masks cropped by the bounding box and
resized to a fixed size determined by mask_crop_size.
gt_segmentation_mask: Groundtruth mask for segmentation head, this is
resized to a fixed size determined by output_size.
gt_segmentation_valid_mask: Binary mask that marks the pixels that
are supposed to be used in computing the segmentation loss while
training.
"""
# (height, width, num_channels = 1)
# All the operations below support num_channels >= 1.
segmentation_mask = data['groundtruth_segmentation_mask']
# Flips image randomly during training.
image_mask = tf.concat([data['image'], segmentation_mask], axis=2)
boxes = data['groundtruth_boxes']
masks = data['groundtruth_instance_masks']
image_mask, boxes, masks = preprocess_ops.random_horizontal_flip(
image_mask,
boxes,
masks,
prob=tf.where(self.aug_rand_hflip, 0.5, 0.0),
)
image_mask, boxes, masks = preprocess_ops.random_vertical_flip(
image_mask,
boxes,
masks,
prob=tf.where(self.aug_rand_vflip, 0.5, 0.0),
)
num_image_channels = data['image'].shape.as_list()[-1]
image = image_mask[:, :, :num_image_channels]
segmentation_mask = image_mask[:, :, num_image_channels:]
data['image'] = image
data['groundtruth_boxes'] = boxes
data['groundtruth_instance_masks'] = masks
image, labels = super()._parse_train_data(data)
image_info = labels['image_info']
image_scale = image_info[2, :]
offset = image_info[3, :]
# (height, width, num_channels = 1)
segmentation_mask = tf.cast(segmentation_mask, tf.int32)
# Pad label and make sure the padded region assigned to the ignore label.
# The label is first offset by +1 and then padded with 0.
segmentation_mask += 1
# (1, height, width, num_channels = 1)
segmentation_mask = tf.expand_dims(segmentation_mask, axis=0)
segmentation_mask = preprocess_ops.resize_and_crop_masks(
segmentation_mask, image_scale, self._output_size, offset)
segmentation_mask -= 1
segmentation_mask = tf.where(
tf.equal(segmentation_mask, -1),
self._segmentation_ignore_label * tf.ones_like(segmentation_mask),
segmentation_mask)
# (height, width, num_channels = 1)
segmentation_mask = tf.squeeze(segmentation_mask, axis=0)
segmentation_valid_mask = tf.not_equal(
segmentation_mask, self._segmentation_ignore_label)
labels.update({
'gt_segmentation_mask': segmentation_mask,
'gt_segmentation_valid_mask': segmentation_valid_mask})
return image, labels
def _parse_eval_data(self, data):
"""Parses data for evaluation.
Args:
data: the decoded tensor dictionary from TfExampleDecoder.
Returns:
A dictionary of {'images': image, 'labels': labels} where
image: image tensor that is preproessed to have normalized value and
dimension [output_size[0], output_size[1], 3]
labels: a dictionary of tensors used for training. The following
describes {key: value} pairs in the dictionary.
source_ids: Source image id. Default value -1 if the source id is
empty in the groundtruth annotation.
image_info: a 2D `Tensor` that encodes the information of the image
and the applied preprocessing. It is in the format of
[[original_height, original_width], [scaled_height, scaled_width]],
anchor_boxes: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, 4] representing anchor boxes at each
level.
"""
def _process_mask(mask, ignore_label, image_info):
# (height, width, num_channels = 1)
# All the operations below support num_channels >= 1.
mask = tf.cast(mask, dtype=tf.int32)
# (1, height, width, num_channels = 1)
mask = tf.expand_dims(mask, axis=0)
mask += 1
if self._segmentation_resize_eval_groundtruth:
# Resizes eval masks to match input image sizes. In that case, mean IoU
# is computed on output_size not the original size of the images.
image_scale = image_info[2, :]
offset = image_info[3, :]
mask = preprocess_ops.resize_and_crop_masks(
mask, image_scale, self._output_size, offset)
else:
mask = tf.image.pad_to_bounding_box(
mask, 0, 0,
self._segmentation_groundtruth_padded_size[0],
self._segmentation_groundtruth_padded_size[1])
mask -= 1
# Assign ignore label to the padded region.
mask = tf.where(
tf.equal(mask, -1),
ignore_label * tf.ones_like(mask),
mask)
# (height, width, num_channels = 1)
mask = tf.squeeze(mask, axis=0)
return mask
image, labels = super()._parse_eval_data(data)
image_info = labels['image_info']
# (height, width, num_channels = 1)
segmentation_mask = _process_mask(
data['groundtruth_segmentation_mask'],
self._segmentation_ignore_label, image_info)
segmentation_valid_mask = tf.not_equal(
segmentation_mask, self._segmentation_ignore_label)
labels['groundtruths'].update({
'gt_segmentation_mask': segmentation_mask,
'gt_segmentation_valid_mask': segmentation_valid_mask})
if self._include_panoptic_masks:
panoptic_category_mask = _process_mask(
data['groundtruth_panoptic_category_mask'],
self._panoptic_ignore_label, image_info)
panoptic_instance_mask = _process_mask(
data['groundtruth_panoptic_instance_mask'],
self._panoptic_ignore_label, image_info)
panoptic_category_mask = panoptic_category_mask[:, :, 0]
panoptic_instance_mask = panoptic_instance_mask[:, :, 0]
labels['groundtruths'].update({
'gt_panoptic_category_mask':
tf.cast(panoptic_category_mask, dtype=tf.int32),
'gt_panoptic_instance_mask':
tf.cast(panoptic_instance_mask, dtype=tf.int32)})
return image, labels
| 17,018 | 42.195431 | 82 | py |
models | models-master/official/projects/panoptic/modeling/panoptic_deeplab_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build Panoptic Deeplab model."""
from typing import Any, Mapping, Optional, Union
import tensorflow as tf
from official.projects.panoptic.modeling.layers import panoptic_deeplab_merge
@tf.keras.utils.register_keras_serializable(package='Vision')
class PanopticDeeplabModel(tf.keras.Model):
"""Panoptic Deeplab model."""
def __init__(
self,
backbone: tf.keras.Model,
semantic_decoder: tf.keras.Model,
semantic_head: tf.keras.layers.Layer,
instance_head: tf.keras.layers.Layer,
instance_decoder: Optional[tf.keras.Model] = None,
post_processor: Optional[panoptic_deeplab_merge.PostProcessor] = None,
**kwargs):
"""Panoptic deeplab model initializer.
Args:
backbone: a backbone network.
semantic_decoder: a decoder network. E.g. FPN.
semantic_head: segmentation head.
instance_head: instance center head.
instance_decoder: Optional decoder network for instance predictions.
post_processor: Optional post processor layer.
**kwargs: keyword arguments to be passed.
"""
super(PanopticDeeplabModel, self).__init__(**kwargs)
self._config_dict = {
'backbone': backbone,
'semantic_decoder': semantic_decoder,
'instance_decoder': instance_decoder,
'semantic_head': semantic_head,
'instance_head': instance_head,
'post_processor': post_processor
}
self.backbone = backbone
self.semantic_decoder = semantic_decoder
self.instance_decoder = instance_decoder
self.semantic_head = semantic_head
self.instance_head = instance_head
self.post_processor = post_processor
def call( # pytype: disable=signature-mismatch # overriding-parameter-count-checks
self, inputs: tf.Tensor,
image_info: tf.Tensor,
training: bool = None):
if training is None:
training = tf.keras.backend.learning_phase()
backbone_features = self.backbone(inputs, training=training)
semantic_features = self.semantic_decoder(
backbone_features, training=training)
if self.instance_decoder is None:
instance_features = semantic_features
else:
instance_features = self.instance_decoder(
backbone_features, training=training)
segmentation_outputs = self.semantic_head(
(backbone_features, semantic_features),
training=training)
instance_outputs = self.instance_head(
(backbone_features, instance_features),
training=training)
outputs = {
'segmentation_outputs': segmentation_outputs,
'instance_centers_heatmap':
instance_outputs['instance_centers_heatmap'],
'instance_centers_offset':
instance_outputs['instance_centers_offset'],
}
if training:
return outputs
if self.post_processor is not None:
panoptic_masks = self.post_processor(outputs, image_info)
outputs.update(panoptic_masks)
return outputs
@property
def checkpoint_items(
self) -> Mapping[str, Union[tf.keras.Model, tf.keras.layers.Layer]]:
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(
backbone=self.backbone,
semantic_decoder=self.semantic_decoder,
semantic_head=self.semantic_head,
instance_head=self.instance_head)
if self.instance_decoder is not None:
items.update(instance_decoder=self.instance_decoder)
return items
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 4,220 | 33.317073 | 86 | py |
models | models-master/official/projects/panoptic/modeling/panoptic_maskrcnn_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Panoptic Segmentation model."""
from typing import List, Mapping, Optional, Union
import tensorflow as tf
from official.projects.deepmac_maskrcnn.modeling import maskrcnn_model
class PanopticMaskRCNNModel(maskrcnn_model.DeepMaskRCNNModel):
"""The Panoptic Segmentation model."""
def __init__(
self,
backbone: tf.keras.Model,
decoder: tf.keras.Model,
rpn_head: tf.keras.layers.Layer,
detection_head: Union[tf.keras.layers.Layer,
List[tf.keras.layers.Layer]],
roi_generator: tf.keras.layers.Layer,
roi_sampler: Union[tf.keras.layers.Layer,
List[tf.keras.layers.Layer]],
roi_aligner: tf.keras.layers.Layer,
detection_generator: tf.keras.layers.Layer,
panoptic_segmentation_generator: Optional[tf.keras.layers.Layer] = None,
mask_head: Optional[tf.keras.layers.Layer] = None,
mask_sampler: Optional[tf.keras.layers.Layer] = None,
mask_roi_aligner: Optional[tf.keras.layers.Layer] = None,
segmentation_backbone: Optional[tf.keras.Model] = None,
segmentation_decoder: Optional[tf.keras.Model] = None,
segmentation_head: tf.keras.layers.Layer = None,
class_agnostic_bbox_pred: bool = False,
cascade_class_ensemble: bool = False,
min_level: Optional[int] = None,
max_level: Optional[int] = None,
num_scales: Optional[int] = None,
aspect_ratios: Optional[List[float]] = None,
anchor_size: Optional[float] = None,
outer_boxes_scale: float = 1.0,
use_gt_boxes_for_masks: bool = False, # pytype: disable=annotation-type-mismatch # typed-keras
**kwargs):
"""Initializes the Panoptic Mask R-CNN model.
Args:
backbone: `tf.keras.Model`, the backbone network.
decoder: `tf.keras.Model`, the decoder network.
rpn_head: the RPN head.
detection_head: the detection head or a list of heads.
roi_generator: the ROI generator.
roi_sampler: a single ROI sampler or a list of ROI samplers for cascade
detection heads.
roi_aligner: the ROI aligner.
detection_generator: the detection generator.
panoptic_segmentation_generator: the panoptic segmentation generator that
is used to merge instance and semantic segmentation masks.
mask_head: the mask head.
mask_sampler: the mask sampler.
mask_roi_aligner: the ROI alginer for mask prediction.
segmentation_backbone: `tf.keras.Model`, the backbone network for the
segmentation head for panoptic task. Providing `segmentation_backbone`
will allow the segmentation head to use a standlone backbone. Setting
`segmentation_backbone=None` would enable backbone sharing between the
MaskRCNN model and segmentation head.
segmentation_decoder: `tf.keras.Model`, the decoder network for the
segmentation head for panoptic task. Providing `segmentation_decoder`
will allow the segmentation head to use a standlone decoder. Setting
`segmentation_decoder=None` would enable decoder sharing between the
MaskRCNN model and segmentation head. Decoders can only be shared when
`segmentation_backbone` is shared as well.
segmentation_head: segmentatation head for panoptic task.
class_agnostic_bbox_pred: if True, perform class agnostic bounding box
prediction. Needs to be `True` for Cascade RCNN models.
cascade_class_ensemble: if True, ensemble classification scores over all
detection heads.
min_level: Minimum level in output feature maps.
max_level: Maximum level in output feature maps.
num_scales: A number representing intermediate scales added on each level.
For instances, num_scales=2 adds one additional intermediate anchor
scales [2^0, 2^0.5] on each level.
aspect_ratios: A list representing the aspect raito anchors added on each
level. The number indicates the ratio of width to height. For instances,
aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each scale level.
anchor_size: A number representing the scale of size of the base anchor to
the feature stride 2^level.
outer_boxes_scale: a float to scale up the bounding boxes to generate
more inclusive masks. The scale is expected to be >=1.0.
use_gt_boxes_for_masks: `bool`, whether to use only gt boxes for masks.
**kwargs: keyword arguments to be passed.
"""
super().__init__(
backbone=backbone,
decoder=decoder,
rpn_head=rpn_head,
detection_head=detection_head,
roi_generator=roi_generator,
roi_sampler=roi_sampler,
roi_aligner=roi_aligner,
detection_generator=detection_generator,
mask_head=mask_head,
mask_sampler=mask_sampler,
mask_roi_aligner=mask_roi_aligner,
class_agnostic_bbox_pred=class_agnostic_bbox_pred,
cascade_class_ensemble=cascade_class_ensemble,
min_level=min_level,
max_level=max_level,
num_scales=num_scales,
aspect_ratios=aspect_ratios,
anchor_size=anchor_size,
outer_boxes_scale=outer_boxes_scale,
use_gt_boxes_for_masks=use_gt_boxes_for_masks,
**kwargs)
self._config_dict.update({
'segmentation_backbone': segmentation_backbone,
'segmentation_decoder': segmentation_decoder,
'segmentation_head': segmentation_head
})
if panoptic_segmentation_generator is not None:
self._config_dict.update(
{'panoptic_segmentation_generator': panoptic_segmentation_generator})
if not self._include_mask:
raise ValueError(
'`mask_head` needs to be provided for Panoptic Mask R-CNN.')
if segmentation_backbone is not None and segmentation_decoder is None:
raise ValueError(
'`segmentation_decoder` needs to be provided for Panoptic Mask R-CNN'
'if `backbone` is not shared.')
self.segmentation_backbone = segmentation_backbone
self.segmentation_decoder = segmentation_decoder
self.segmentation_head = segmentation_head
self.panoptic_segmentation_generator = panoptic_segmentation_generator
def call(self,
images: tf.Tensor,
image_info: tf.Tensor,
anchor_boxes: Optional[Mapping[str, tf.Tensor]] = None,
gt_boxes: Optional[tf.Tensor] = None,
gt_classes: Optional[tf.Tensor] = None,
gt_masks: Optional[tf.Tensor] = None,
gt_outer_boxes: Optional[tf.Tensor] = None,
training: Optional[bool] = None) -> Mapping[str, tf.Tensor]:
image_shape = image_info[:, 1, :]
model_kwargs = {
'images': images,
'image_shape': image_shape,
'anchor_boxes': anchor_boxes,
'gt_boxes': gt_boxes,
'gt_classes': gt_classes,
'gt_masks': gt_masks,
'training': training,
}
if self.outer_boxes_scale > 1.0:
model_kwargs['gt_outer_boxes'] = gt_outer_boxes
model_outputs = super().call(**model_kwargs)
if self.segmentation_backbone is not None:
backbone_features = self.segmentation_backbone(images, training=training)
else:
backbone_features = model_outputs['backbone_features']
if self.segmentation_decoder is not None:
decoder_features = self.segmentation_decoder(
backbone_features, training=training)
else:
decoder_features = model_outputs['decoder_features']
segmentation_outputs = self.segmentation_head(
(backbone_features, decoder_features), training=training)
model_outputs.update({
'segmentation_outputs': segmentation_outputs,
})
if not training and self.panoptic_segmentation_generator is not None:
panoptic_outputs = self.panoptic_segmentation_generator(
model_outputs, image_info=image_info)
model_outputs.update({'panoptic_outputs': panoptic_outputs})
return model_outputs
@property
def checkpoint_items(
self) -> Mapping[str, Union[tf.keras.Model, tf.keras.layers.Layer]]:
"""Returns a dictionary of items to be additionally checkpointed."""
items = super().checkpoint_items
if self.segmentation_backbone is not None:
items.update(segmentation_backbone=self.segmentation_backbone)
if self.segmentation_decoder is not None:
items.update(segmentation_decoder=self.segmentation_decoder)
items.update(segmentation_head=self.segmentation_head)
return items
| 9,102 | 42.555024 | 102 | py |
models | models-master/official/projects/panoptic/modeling/factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory method to build panoptic segmentation model."""
from typing import Optional
import tensorflow as tf
from official.projects.deepmac_maskrcnn.tasks import deep_mask_head_rcnn
from official.projects.panoptic.configs import panoptic_deeplab as panoptic_deeplab_cfg
from official.projects.panoptic.configs import panoptic_maskrcnn as panoptic_maskrcnn_cfg
from official.projects.panoptic.modeling import panoptic_deeplab_model
from official.projects.panoptic.modeling import panoptic_maskrcnn_model
from official.projects.panoptic.modeling.heads import panoptic_deeplab_heads
from official.projects.panoptic.modeling.layers import panoptic_deeplab_merge
from official.projects.panoptic.modeling.layers import panoptic_segmentation_generator
from official.vision.modeling import backbones
from official.vision.modeling.decoders import factory as decoder_factory
from official.vision.modeling.heads import segmentation_heads
def build_panoptic_maskrcnn(
input_specs: tf.keras.layers.InputSpec,
model_config: panoptic_maskrcnn_cfg.PanopticMaskRCNN,
l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds Panoptic Mask R-CNN model.
This factory function builds the mask rcnn first, builds the non-shared
semantic segmentation layers, and finally combines the two models to form
the panoptic segmentation model.
Args:
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
model_config: Config instance for the panoptic maskrcnn model.
l2_regularizer: Optional `tf.keras.regularizers.Regularizer`, if specified,
the model is built with the provided regularization layer.
Returns:
tf.keras.Model for the panoptic segmentation model.
"""
norm_activation_config = model_config.norm_activation
segmentation_config = model_config.segmentation_model
# Builds the maskrcnn model.
maskrcnn_model = deep_mask_head_rcnn.build_maskrcnn(
input_specs=input_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
# Builds the semantic segmentation branch.
if not model_config.shared_backbone:
segmentation_backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=segmentation_config.backbone,
norm_activation_config=norm_activation_config,
l2_regularizer=l2_regularizer)
segmentation_decoder_input_specs = segmentation_backbone.output_specs
else:
segmentation_backbone = None
segmentation_decoder_input_specs = maskrcnn_model.backbone.output_specs
if not model_config.shared_decoder:
segmentation_decoder = decoder_factory.build_decoder(
input_specs=segmentation_decoder_input_specs,
model_config=segmentation_config,
l2_regularizer=l2_regularizer)
decoder_config = segmentation_decoder.get_config()
else:
segmentation_decoder = None
decoder_config = maskrcnn_model.decoder.get_config()
segmentation_head_config = segmentation_config.head
detection_head_config = model_config.detection_head
postprocessing_config = model_config.panoptic_segmentation_generator
segmentation_head = segmentation_heads.SegmentationHead(
num_classes=segmentation_config.num_classes,
level=segmentation_head_config.level,
num_convs=segmentation_head_config.num_convs,
prediction_kernel_size=segmentation_head_config.prediction_kernel_size,
num_filters=segmentation_head_config.num_filters,
upsample_factor=segmentation_head_config.upsample_factor,
feature_fusion=segmentation_head_config.feature_fusion,
decoder_min_level=segmentation_head_config.decoder_min_level,
decoder_max_level=segmentation_head_config.decoder_max_level,
low_level=segmentation_head_config.low_level,
low_level_num_filters=segmentation_head_config.low_level_num_filters,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
num_decoder_filters=decoder_config['num_filters'],
kernel_regularizer=l2_regularizer)
if model_config.generate_panoptic_masks:
max_num_detections = model_config.detection_generator.max_num_detections
mask_binarize_threshold = postprocessing_config.mask_binarize_threshold
panoptic_segmentation_generator_obj = (
panoptic_segmentation_generator.PanopticSegmentationGeneratorV2(
output_size=postprocessing_config.output_size,
max_num_detections=max_num_detections,
stuff_classes_offset=model_config.stuff_classes_offset,
mask_binarize_threshold=mask_binarize_threshold,
score_threshold=postprocessing_config.score_threshold,
things_overlap_threshold=postprocessing_config
.things_overlap_threshold,
things_class_label=postprocessing_config.things_class_label,
stuff_area_threshold=postprocessing_config.stuff_area_threshold,
void_class_label=postprocessing_config.void_class_label,
void_instance_id=postprocessing_config.void_instance_id,
rescale_predictions=postprocessing_config.rescale_predictions))
else:
panoptic_segmentation_generator_obj = None
# Combines maskrcnn, and segmentation models to build panoptic segmentation
# model.
model = panoptic_maskrcnn_model.PanopticMaskRCNNModel(
backbone=maskrcnn_model.backbone,
decoder=maskrcnn_model.decoder,
rpn_head=maskrcnn_model.rpn_head,
detection_head=maskrcnn_model.detection_head,
roi_generator=maskrcnn_model.roi_generator,
roi_sampler=maskrcnn_model.roi_sampler,
roi_aligner=maskrcnn_model.roi_aligner,
detection_generator=maskrcnn_model.detection_generator,
panoptic_segmentation_generator=panoptic_segmentation_generator_obj,
mask_head=maskrcnn_model.mask_head,
mask_sampler=maskrcnn_model.mask_sampler,
mask_roi_aligner=maskrcnn_model.mask_roi_aligner,
segmentation_backbone=segmentation_backbone,
segmentation_decoder=segmentation_decoder,
segmentation_head=segmentation_head,
class_agnostic_bbox_pred=detection_head_config.class_agnostic_bbox_pred,
cascade_class_ensemble=detection_head_config.cascade_class_ensemble,
min_level=model_config.min_level,
max_level=model_config.max_level,
num_scales=model_config.anchor.num_scales,
aspect_ratios=model_config.anchor.aspect_ratios,
anchor_size=model_config.anchor.anchor_size,
outer_boxes_scale=maskrcnn_model.outer_boxes_scale)
return model
def build_panoptic_deeplab(
input_specs: tf.keras.layers.InputSpec,
model_config: panoptic_deeplab_cfg.PanopticDeeplab,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None
) -> tf.keras.Model:
"""Builds Panoptic Deeplab model.
Args:
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
model_config: Config instance for the panoptic deeplab model.
l2_regularizer: Optional `tf.keras.regularizers.Regularizer`, if specified,
the model is built with the provided regularization layer.
Returns:
tf.keras.Model for the panoptic segmentation model.
"""
norm_activation_config = model_config.norm_activation
backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=model_config.backbone,
norm_activation_config=norm_activation_config,
l2_regularizer=l2_regularizer)
semantic_decoder = decoder_factory.build_decoder(
input_specs=backbone.output_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
if model_config.shared_decoder:
instance_decoder = None
else:
# semantic and instance share the same decoder type
instance_decoder = decoder_factory.build_decoder(
input_specs=backbone.output_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
semantic_head_config = model_config.semantic_head
instance_head_config = model_config.instance_head
semantic_head = panoptic_deeplab_heads.SemanticHead(
num_classes=model_config.num_classes,
level=semantic_head_config.level,
num_convs=semantic_head_config.num_convs,
kernel_size=semantic_head_config.kernel_size,
prediction_kernel_size=semantic_head_config.prediction_kernel_size,
num_filters=semantic_head_config.num_filters,
use_depthwise_convolution=semantic_head_config.use_depthwise_convolution,
upsample_factor=semantic_head_config.upsample_factor,
low_level=semantic_head_config.low_level,
low_level_num_filters=semantic_head_config.low_level_num_filters,
fusion_num_output_filters=semantic_head_config.fusion_num_output_filters,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
instance_head = panoptic_deeplab_heads.InstanceHead(
level=instance_head_config.level,
num_convs=instance_head_config.num_convs,
kernel_size=instance_head_config.kernel_size,
prediction_kernel_size=instance_head_config.prediction_kernel_size,
num_filters=instance_head_config.num_filters,
use_depthwise_convolution=instance_head_config.use_depthwise_convolution,
upsample_factor=instance_head_config.upsample_factor,
low_level=instance_head_config.low_level,
low_level_num_filters=instance_head_config.low_level_num_filters,
fusion_num_output_filters=instance_head_config.fusion_num_output_filters,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
if model_config.generate_panoptic_masks:
post_processing_config = model_config.post_processor
post_processor = panoptic_deeplab_merge.PostProcessor(
output_size=post_processing_config.output_size,
center_score_threshold=post_processing_config.center_score_threshold,
thing_class_ids=post_processing_config.thing_class_ids,
label_divisor=post_processing_config.label_divisor,
stuff_area_limit=post_processing_config.stuff_area_limit,
ignore_label=post_processing_config.ignore_label,
nms_kernel=post_processing_config.nms_kernel,
keep_k_centers=post_processing_config.keep_k_centers,
rescale_predictions=post_processing_config.rescale_predictions)
else:
post_processor = None
model = panoptic_deeplab_model.PanopticDeeplabModel(
backbone=backbone,
semantic_decoder=semantic_decoder,
instance_decoder=instance_decoder,
semantic_head=semantic_head,
instance_head=instance_head,
post_processor=post_processor)
return model
| 11,721 | 45.149606 | 139 | py |
models | models-master/official/projects/panoptic/modeling/layers/panoptic_deeplab_merge.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains functions to post-process Panoptic-DeepLab results.
Note that the postprocessing class and the supporting functions are branched
from:
https://github.com/google-research/deeplab2/blob/main/model/post_processor/panoptic_deeplab.py
with minor changes.
"""
import functools
from typing import Dict, List, Text, Tuple
import tensorflow as tf
from official.projects.panoptic.ops import mask_ops
def _add_zero_padding(input_tensor: tf.Tensor, kernel_size: int,
rank: int) -> tf.Tensor:
"""Adds zero-padding to the input_tensor."""
pad_total = kernel_size - 1
pad_begin = pad_total // 2
pad_end = pad_total - pad_begin
if rank == 3:
return tf.pad(
input_tensor,
paddings=[[pad_begin, pad_end], [pad_begin, pad_end], [0, 0]])
else:
return tf.pad(
input_tensor,
paddings=[[0, 0], [pad_begin, pad_end], [pad_begin, pad_end], [0, 0]])
def _get_semantic_predictions(semantic_logits: tf.Tensor) -> tf.Tensor:
"""Computes the semantic classes from the predictions.
Args:
semantic_logits: A tf.tensor of shape [batch, height, width, classes].
Returns:
A tf.Tensor containing the semantic class prediction of shape
[batch, height, width].
"""
return tf.argmax(semantic_logits, axis=-1, output_type=tf.int32)
def _get_instance_centers_from_heatmap(
center_heatmap: tf.Tensor,
center_threshold: float,
nms_kernel_size: int,
keep_k_centers: int) -> Tuple[tf.Tensor, tf.Tensor]:
"""Computes a list of instance centers.
Args:
center_heatmap: A tf.Tensor of shape [height, width, 1].
center_threshold: A float setting the threshold for the center heatmap.
nms_kernel_size: An integer specifying the nms kernel size.
keep_k_centers: An integer specifying the number of centers to keep (K).
Non-positive values will keep all centers.
Returns:
A tuple of
- tf.Tensor of shape [N, 2] containing N center coordinates (after
non-maximum suppression) in (y, x) order.
- tf.Tensor of shape [height, width] containing the center heatmap after
non-maximum suppression.
"""
# Threshold center map.
center_heatmap = tf.where(
tf.greater(center_heatmap, center_threshold), center_heatmap, 0.0)
# Non-maximum suppression.
padded_map = _add_zero_padding(center_heatmap, nms_kernel_size, rank=3)
pooled_center_heatmap = tf.keras.backend.pool2d(
tf.expand_dims(padded_map, 0),
pool_size=(nms_kernel_size, nms_kernel_size),
strides=(1, 1),
padding='valid',
pool_mode='max')
center_heatmap = tf.where(
tf.equal(pooled_center_heatmap, center_heatmap), center_heatmap, 0.0)
center_heatmap = tf.squeeze(center_heatmap, axis=[0, 3])
# `centers` is of shape (N, 2) with (y, x) order of the second dimension.
centers = tf.where(tf.greater(center_heatmap, 0.0))
if keep_k_centers > 0 and tf.shape(centers)[0] > keep_k_centers:
topk_scores, _ = tf.math.top_k(
tf.reshape(center_heatmap, [-1]), keep_k_centers, sorted=False)
centers = tf.where(tf.greater(center_heatmap, topk_scores[-1]))
return centers, center_heatmap
def _find_closest_center_per_pixel(centers: tf.Tensor,
center_offsets: tf.Tensor) -> tf.Tensor:
"""Assigns all pixels to their closest center.
Args:
centers: A tf.Tensor of shape [N, 2] containing N centers with coordinate
order (y, x).
center_offsets: A tf.Tensor of shape [height, width, 2].
Returns:
A tf.Tensor of shape [height, width] containing the index of the closest
center, per pixel.
"""
height = tf.shape(center_offsets)[0]
width = tf.shape(center_offsets)[1]
x_coord, y_coord = tf.meshgrid(tf.range(width), tf.range(height))
coord = tf.stack([y_coord, x_coord], axis=-1)
center_per_pixel = tf.cast(coord, tf.float32) + center_offsets
# centers: [N, 2] -> [N, 1, 2].
# center_per_pixel: [H, W, 2] -> [1, H*W, 2].
centers = tf.cast(tf.expand_dims(centers, 1), tf.float32)
center_per_pixel = tf.reshape(center_per_pixel, [height*width, 2])
center_per_pixel = tf.expand_dims(center_per_pixel, 0)
# distances: [N, H*W].
distances = tf.norm(centers - center_per_pixel, axis=-1)
return tf.reshape(tf.argmin(distances, axis=0), [height, width])
def _get_instances_from_heatmap_and_offset(
semantic_segmentation: tf.Tensor, center_heatmap: tf.Tensor,
center_offsets: tf.Tensor, center_threshold: float,
thing_class_ids: tf.Tensor, nms_kernel_size: int,
keep_k_centers: int) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
"""Computes the instance assignment per pixel.
Args:
semantic_segmentation: A tf.Tensor containing the semantic labels of shape
[height, width].
center_heatmap: A tf.Tensor of shape [height, width, 1].
center_offsets: A tf.Tensor of shape [height, width, 2].
center_threshold: A float setting the threshold for the center heatmap.
thing_class_ids: A tf.Tensor of shape [N] containing N thing indices.
nms_kernel_size: An integer specifying the nms kernel size.
keep_k_centers: An integer specifying the number of centers to keep.
Negative values will keep all centers.
Returns:
A tuple of:
- tf.Tensor containing the instance segmentation (filtered with the `thing`
segmentation from the semantic segmentation output) with shape
[height, width].
- tf.Tensor containing the processed centermap with shape [height, width].
- tf.Tensor containing instance scores (where higher "score" is a reasonable
signal of a higher confidence detection.) Will be of shape [height, width]
with the score for a pixel being the score of the instance it belongs to.
The scores will be zero for pixels in background/"stuff" regions.
"""
thing_segmentation = tf.zeros_like(semantic_segmentation)
for thing_id in thing_class_ids:
thing_segmentation = tf.where(tf.equal(semantic_segmentation, thing_id),
1,
thing_segmentation)
centers, processed_center_heatmap = _get_instance_centers_from_heatmap(
center_heatmap, center_threshold, nms_kernel_size, keep_k_centers)
if tf.shape(centers)[0] == 0:
return (tf.zeros_like(semantic_segmentation), processed_center_heatmap,
tf.zeros_like(processed_center_heatmap))
instance_center_index = _find_closest_center_per_pixel(
centers, center_offsets)
# Instance IDs should start with 1. So we use the index into the centers, but
# shifted by 1.
instance_segmentation = tf.cast(instance_center_index, tf.int32) + 1
# The value of the heatmap at an instance's center is used as the score
# for that instance.
instance_scores = tf.gather_nd(processed_center_heatmap, centers)
# This will map the instance scores back to the image space: where each pixel
# has a value equal to the score of its instance.
flat_center_index = tf.reshape(instance_center_index, [-1])
instance_score_map = tf.gather(instance_scores, flat_center_index)
instance_score_map = tf.reshape(instance_score_map,
tf.shape(instance_segmentation))
instance_score_map *= tf.cast(thing_segmentation, tf.float32)
return (thing_segmentation * instance_segmentation, processed_center_heatmap,
instance_score_map)
@tf.function
def _get_panoptic_predictions(
semantic_logits: tf.Tensor, center_heatmap: tf.Tensor,
center_offsets: tf.Tensor, center_threshold: float,
thing_class_ids: tf.Tensor, label_divisor: int, stuff_area_limit: int,
void_label: int, nms_kernel_size: int, keep_k_centers: int
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
"""Computes the semantic class and instance ID per pixel.
Args:
semantic_logits: A tf.Tensor of shape [batch, height, width, classes].
center_heatmap: A tf.Tensor of shape [batch, height, width, 1].
center_offsets: A tf.Tensor of shape [batch, height, width, 2].
center_threshold: A float setting the threshold for the center heatmap.
thing_class_ids: A tf.Tensor of shape [N] containing N thing indices.
label_divisor: An integer specifying the label divisor of the dataset.
stuff_area_limit: An integer specifying the number of pixels that stuff
regions need to have at least. The stuff region will be included in the
panoptic prediction, only if its area is larger than the limit; otherwise,
it will be re-assigned as void_label.
void_label: An integer specifying the void label.
nms_kernel_size: An integer specifying the nms kernel size.
keep_k_centers: An integer specifying the number of centers to keep.
Negative values will keep all centers.
Returns:
A tuple of:
- the panoptic prediction as tf.Tensor with shape [batch, height, width].
- the centermap prediction as tf.Tensor with shape [batch, height, width].
- the instance score maps as tf.Tensor with shape [batch, height, width].
- the instance prediction as tf.Tensor with shape [batch, height, width].
"""
semantic_prediction = _get_semantic_predictions(semantic_logits)
batch_size = tf.shape(semantic_logits)[0]
instance_map_lists = tf.TensorArray(
tf.int32, size=batch_size, dynamic_size=False)
center_map_lists = tf.TensorArray(
tf.float32, size=batch_size, dynamic_size=False)
instance_score_map_lists = tf.TensorArray(
tf.float32, size=batch_size, dynamic_size=False)
for i in tf.range(batch_size):
(instance_map, center_map,
instance_score_map) = _get_instances_from_heatmap_and_offset(
semantic_prediction[i, ...], center_heatmap[i, ...],
center_offsets[i, ...], center_threshold, thing_class_ids,
nms_kernel_size, keep_k_centers)
instance_map_lists = instance_map_lists.write(i, instance_map)
center_map_lists = center_map_lists.write(i, center_map)
instance_score_map_lists = instance_score_map_lists.write(
i, instance_score_map)
# This does not work with unknown shapes.
instance_maps = instance_map_lists.stack()
center_maps = center_map_lists.stack()
instance_score_maps = instance_score_map_lists.stack()
panoptic_prediction = _merge_semantic_and_instance_maps(
semantic_prediction, instance_maps, thing_class_ids, label_divisor,
stuff_area_limit, void_label)
return (panoptic_prediction, center_maps, instance_score_maps, instance_maps)
@tf.function
def _merge_semantic_and_instance_maps(
semantic_prediction: tf.Tensor,
instance_maps: tf.Tensor,
thing_class_ids: tf.Tensor,
label_divisor: int,
stuff_area_limit: int,
void_label: int) -> tf.Tensor:
"""Merges semantic and instance maps to obtain panoptic segmentation.
This function merges the semantic segmentation and class-agnostic
instance segmentation to form the panoptic segmentation. In particular,
the class label of each instance mask is inferred from the majority
votes from the corresponding pixels in the semantic segmentation. This
operation is first proposed in the DeeperLab paper and adopted by the
Panoptic-DeepLab.
- DeeperLab: Single-Shot Image Parser, T-J Yang, et al. arXiv:1902.05093.
- Panoptic-DeepLab, B. Cheng, et al. In CVPR, 2020.
Note that this function only supports batch = 1 for simplicity. Additionally,
this function has a slightly different implementation from the provided
TensorFlow implementation `merge_ops` but with a similar performance. This
function is mainly used as a backup solution when you could not successfully
compile the provided TensorFlow implementation. To reproduce our results,
please use the provided TensorFlow implementation (i.e., not use this
function, but the `merge_ops.merge_semantic_and_instance_maps`).
Args:
semantic_prediction: A tf.Tensor of shape [batch, height, width].
instance_maps: A tf.Tensor of shape [batch, height, width].
thing_class_ids: A tf.Tensor of shape [N] containing N thing indices.
label_divisor: An integer specifying the label divisor of the dataset.
stuff_area_limit: An integer specifying the number of pixels that stuff
regions need to have at least. The stuff region will be included in the
panoptic prediction, only if its area is larger than the limit; otherwise,
it will be re-assigned as void_label.
void_label: An integer specifying the void label.
Returns:
panoptic_prediction: A tf.Tensor with shape [batch, height, width].
"""
prediction_shape = semantic_prediction.get_shape().as_list()
# This implementation only supports batch size of 1. Since model construction
# might lose batch size information (and leave it to None), override it here.
prediction_shape[0] = 1
semantic_prediction = tf.ensure_shape(semantic_prediction, prediction_shape)
instance_maps = tf.ensure_shape(instance_maps, prediction_shape)
# Default panoptic_prediction to have semantic label = void_label.
panoptic_prediction = tf.ones_like(
semantic_prediction) * void_label * label_divisor
# Start to paste predicted `thing` regions to panoptic_prediction.
# Infer `thing` segmentation regions from semantic prediction.
semantic_thing_segmentation = tf.zeros_like(semantic_prediction,
dtype=tf.bool)
for thing_class in thing_class_ids:
semantic_thing_segmentation = tf.math.logical_or(
semantic_thing_segmentation,
semantic_prediction == thing_class)
# Keep track of how many instances for each semantic label.
num_instance_per_semantic_label = tf.TensorArray(
tf.int32, size=0, dynamic_size=True, clear_after_read=False)
instance_ids, _ = tf.unique(tf.reshape(instance_maps, [-1]))
for instance_id in instance_ids:
# Instance ID 0 is reserved for crowd region.
if instance_id == 0:
continue
thing_mask = tf.math.logical_and(instance_maps == instance_id,
semantic_thing_segmentation)
if tf.reduce_sum(tf.cast(thing_mask, tf.int32)) == 0:
continue
semantic_bin_counts = tf.math.bincount(
tf.boolean_mask(semantic_prediction, thing_mask))
semantic_majority = tf.cast(
tf.math.argmax(semantic_bin_counts), tf.int32)
while num_instance_per_semantic_label.size() <= semantic_majority:
num_instance_per_semantic_label = num_instance_per_semantic_label.write(
num_instance_per_semantic_label.size(), 0)
new_instance_id = (
num_instance_per_semantic_label.read(semantic_majority) + 1)
num_instance_per_semantic_label = num_instance_per_semantic_label.write(
semantic_majority, new_instance_id)
panoptic_prediction = tf.where(
thing_mask,
tf.ones_like(panoptic_prediction) * semantic_majority * label_divisor
+ new_instance_id,
panoptic_prediction)
# Done with `num_instance_per_semantic_label` tensor array.
num_instance_per_semantic_label.close()
# Start to paste predicted `stuff` regions to panoptic prediction.
instance_stuff_regions = instance_maps == 0
semantic_ids, _ = tf.unique(tf.reshape(semantic_prediction, [-1]))
for semantic_id in semantic_ids:
if tf.reduce_sum(tf.cast(thing_class_ids == semantic_id, tf.int32)) > 0:
continue
# Check stuff area.
stuff_mask = tf.math.logical_and(semantic_prediction == semantic_id,
instance_stuff_regions)
stuff_area = tf.reduce_sum(tf.cast(stuff_mask, tf.int32))
if stuff_area >= stuff_area_limit:
panoptic_prediction = tf.where(
stuff_mask,
tf.ones_like(panoptic_prediction) * semantic_id * label_divisor,
panoptic_prediction)
return panoptic_prediction
class PostProcessor(tf.keras.layers.Layer):
"""This class contains code of a Panoptic-Deeplab post-processor."""
def __init__(
self,
output_size: List[int],
center_score_threshold: float,
thing_class_ids: List[int],
label_divisor: int,
stuff_area_limit: int,
ignore_label: int,
nms_kernel: int,
keep_k_centers: int,
rescale_predictions: bool,
**kwargs):
"""Initializes a Panoptic-Deeplab post-processor.
Args:
output_size: A `List` of integers that represent the height and width of
the output mask.
center_score_threshold: A float setting the threshold for the center
heatmap.
thing_class_ids: An integer list shape [N] containing N thing indices.
label_divisor: An integer specifying the label divisor of the dataset.
stuff_area_limit: An integer specifying the number of pixels that stuff
regions need to have at least. The stuff region will be included in the
panoptic prediction, only if its area is larger than the limit;
otherwise, it will be re-assigned as void_label.
ignore_label: An integer specifying the void label.
nms_kernel: An integer specifying the nms kernel size.
keep_k_centers: An integer specifying the number of centers to keep.
Negative values will keep all centers.
rescale_predictions: `bool`, whether to scale back prediction to original
image sizes. If True, image_info is used to rescale predictions.
**kwargs: additional kwargs arguments.
"""
super(PostProcessor, self).__init__(**kwargs)
self._config_dict = {
'output_size': output_size,
'center_score_threshold': center_score_threshold,
'thing_class_ids': thing_class_ids,
'label_divisor': label_divisor,
'stuff_area_limit': stuff_area_limit,
'ignore_label': ignore_label,
'nms_kernel': nms_kernel,
'keep_k_centers': keep_k_centers,
'rescale_predictions': rescale_predictions
}
self._post_processor = functools.partial(
_get_panoptic_predictions,
center_threshold=center_score_threshold,
thing_class_ids=tf.convert_to_tensor(thing_class_ids),
label_divisor=label_divisor,
stuff_area_limit=stuff_area_limit,
void_label=ignore_label,
nms_kernel_size=nms_kernel,
keep_k_centers=keep_k_centers)
def _resize_and_pad_masks(self, mask, image_info):
"""Resizes masks to match the original image shape and pads to`output_size`.
Args:
mask: a padded mask tensor.
image_info: a tensor that holds information about original and
preprocessed images.
Returns:
resized and padded masks: tf.Tensor.
"""
rescale_size = tf.cast(
tf.math.ceil(image_info[1, :] / image_info[2, :]), tf.int32)
image_shape = tf.cast(image_info[0, :], tf.int32)
offsets = tf.cast(image_info[3, :], tf.int32)
mask = tf.image.resize(
mask,
rescale_size,
method='bilinear')
mask = tf.image.crop_to_bounding_box(
mask,
offsets[0], offsets[1],
image_shape[0],
image_shape[1])
mask = tf.image.pad_to_bounding_box(
mask, 0, 0,
self._config_dict['output_size'][0],
self._config_dict['output_size'][1])
return mask
def _resize_and_pad_offset_mask(self, mask, image_info):
"""Rescales and resizes offset masks and pads to`output_size`.
Args:
mask: a padded offset mask tensor.
image_info: a tensor that holds information about original and
preprocessed images.
Returns:
rescaled, resized and padded masks: tf.Tensor.
"""
rescale_size = tf.cast(
tf.math.ceil(image_info[1, :] / image_info[2, :]), tf.int32)
image_shape = tf.cast(image_info[0, :], tf.int32)
offsets = tf.cast(image_info[3, :], tf.int32)
mask = mask_ops.resize_and_rescale_offsets(
tf.expand_dims(mask, axis=0),
rescale_size)[0]
mask = tf.image.crop_to_bounding_box(
mask,
offsets[0], offsets[1],
image_shape[0],
image_shape[1])
mask = tf.image.pad_to_bounding_box(
mask, 0, 0,
self._config_dict['output_size'][0],
self._config_dict['output_size'][1])
return mask
def call(
self,
result_dict: Dict[Text, tf.Tensor],
image_info: tf.Tensor) -> Dict[Text, tf.Tensor]:
"""Performs the post-processing given model predicted results.
Args:
result_dict: A dictionary of tf.Tensor containing model results. The dict
has to contain
- segmentation_outputs
- instance_centers_heatmap
- instance_centers_offset
image_info: A tf.Tensor of image infos.
Returns:
The post-processed dict of tf.Tensor, containing the following keys:
- panoptic_outputs
- category_mask
- instance_mask
- instance_centers
- instance_score
"""
if self._config_dict['rescale_predictions']:
segmentation_outputs = tf.map_fn(
fn=lambda x: self._resize_and_pad_masks(x[0], x[1]),
elems=(result_dict['segmentation_outputs'], image_info),
fn_output_signature=tf.float32,
parallel_iterations=32)
instance_centers_heatmap = tf.map_fn(
fn=lambda x: self._resize_and_pad_masks(x[0], x[1]),
elems=(result_dict['instance_centers_heatmap'], image_info),
fn_output_signature=tf.float32,
parallel_iterations=32)
instance_centers_offset = tf.map_fn(
fn=lambda x: self._resize_and_pad_offset_mask(x[0], x[1]),
elems=(result_dict['instance_centers_offset'], image_info),
fn_output_signature=tf.float32,
parallel_iterations=32)
else:
segmentation_outputs = tf.image.resize(
result_dict['segmentation_outputs'],
size=self._config_dict['output_size'],
method='bilinear')
instance_centers_heatmap = tf.image.resize(
result_dict['instance_centers_heatmap'],
size=self._config_dict['output_size'],
method='bilinear')
instance_centers_offset = mask_ops.resize_and_rescale_offsets(
result_dict['instance_centers_offset'],
target_size=self._config_dict['output_size'])
processed_dict = {}
(processed_dict['panoptic_outputs'],
processed_dict['instance_centers'],
processed_dict['instance_scores'],
_) = self._post_processor(
tf.nn.softmax(segmentation_outputs, axis=-1),
instance_centers_heatmap,
instance_centers_offset)
label_divisor = self._config_dict['label_divisor']
processed_dict['category_mask'] = (
processed_dict['panoptic_outputs'] // label_divisor)
processed_dict['instance_mask'] = (
processed_dict['panoptic_outputs'] % label_divisor)
processed_dict.update({
'segmentation_outputs': result_dict['segmentation_outputs']})
return processed_dict
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
| 23,569 | 40.42355 | 94 | py |
models | models-master/official/projects/panoptic/modeling/layers/paste_masks.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definition for bilinear grid sampling and mask pasting layers."""
from typing import List
import tensorflow as tf
class BilinearGridSampler(tf.keras.layers.Layer):
"""Bilinear Grid Sampling layer."""
def __init__(self, align_corners: bool = False, **kwargs):
"""Generates panoptic segmentation masks.
Args:
align_corners: A `bool` bool, if True, the centers of the 4 corner
pixels of the input and output tensors are aligned, preserving the
values at the corner pixels.
**kwargs: Additional kwargs arguments.
"""
super(BilinearGridSampler, self).__init__(**kwargs)
self.align_corners = align_corners
self._config = {
'align_corners': align_corners
}
def build(self, input_shape):
features_shape, _, _ = input_shape
_, height, width, channels = features_shape.as_list()
self._height = height
self._width = width
self._channels = channels
def _valid_coordinates(self, x_coord, y_coord):
return tf.logical_and(
tf.logical_and(
tf.greater_equal(x_coord, 0),
tf.greater_equal(y_coord, 0)),
tf.logical_and(
tf.less(x_coord, self._width),
tf.less(y_coord, self._height)))
def _get_pixel(self, features, x_coord, y_coord):
x_coord = tf.cast(x_coord, dtype=tf.int32)
y_coord = tf.cast(y_coord, dtype=tf.int32)
clipped_x = tf.clip_by_value(x_coord, 0, self._width - 1)
clipped_y = tf.clip_by_value(y_coord, 0, self._height - 1)
batch_size, _, _, _ = features.shape.as_list()
if batch_size is None:
batch_size = tf.shape(features)[0]
batch_indices = tf.reshape(
tf.range(batch_size, dtype=tf.int32),
shape=[batch_size, 1, 1])
batch_indices = tf.tile(
batch_indices,
multiples=[1, x_coord.shape[1], x_coord.shape[2]])
indices = tf.cast(
tf.stack([batch_indices, clipped_y, clipped_x], axis=-1),
dtype=tf.int32)
gathered_pixels = tf.gather_nd(features, indices)
return tf.where(
tf.expand_dims(self._valid_coordinates(x_coord, y_coord), axis=-1),
gathered_pixels,
tf.zeros_like(gathered_pixels))
def call(self, inputs):
features, x_coord, y_coord = inputs
x_coord += 1
y_coord += 1
if self.align_corners:
x_coord = (x_coord * 0.5) * (self._width - 1)
y_coord = (y_coord * 0.5) * (self._height - 1)
else:
x_coord = (x_coord * self._width - 1) * 0.5
y_coord = (y_coord * self._height - 1) * 0.5
left = tf.floor(x_coord)
top = tf.floor(y_coord)
right = left + 1
bottom = top + 1
top_left = (right - x_coord) * (bottom - y_coord)
top_right = (x_coord - left) * (bottom - y_coord)
bottom_left = (right - x_coord) * (y_coord - top)
bottom_right = (x_coord - left) * (y_coord - top)
i_top_left = self._get_pixel(features, left, top)
i_top_right = self._get_pixel(features, right, top)
i_bottom_left = self._get_pixel(features, left, bottom)
i_bottom_right = self._get_pixel(features, right, bottom)
i_top_left *= tf.expand_dims(top_left, axis=-1)
i_top_right *= tf.expand_dims(top_right, axis=-1)
i_bottom_left *= tf.expand_dims(bottom_left, axis=-1)
i_bottom_right *= tf.expand_dims(bottom_right, axis=-1)
interpolated_features = tf.math.add_n(
[i_top_left, i_top_right, i_bottom_left, i_bottom_right])
return interpolated_features
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
class PasteMasks(tf.keras.layers.Layer):
"""Layer to paste instance masks."""
def __init__(self, output_size: List[int],
grid_sampler, **kwargs):
"""Resizes and pastes instance masks to match image size.
Args:
output_size: A `List` of integers that represent the height and width of
the output mask.
grid_sampler: A grid sampling layer. Currently only `BilinearGridSampler`
is supported.
**kwargs: Additional kwargs arguments.
"""
super(PasteMasks, self).__init__(**kwargs)
self._output_size = output_size
self._grid_sampler = grid_sampler
self._config = {
'output_size': output_size,
'grid_sampler': grid_sampler
}
def build(self, input_shape):
self._x_coords = tf.range(0, self._output_size[1], dtype=tf.float32)
self._y_coords = tf.range(0, self._output_size[0], dtype=tf.float32)
def call(self, inputs):
masks, boxes = inputs
y0, x0, y1, x1 = tf.split(boxes, 4, axis=1)
x_coords = tf.cast(self._x_coords, dtype=boxes.dtype)
y_coords = tf.cast(self._y_coords, dtype=boxes.dtype)
x_coords = (x_coords - x0) / (x1 - x0) * 2 - 1
y_coords = (y_coords - y0) / (y1 - y0) * 2 - 1
x_coords = tf.tile(
tf.expand_dims(x_coords, axis=1),
multiples=[1, self._output_size[0], 1])
y_coords = tf.tile(
tf.expand_dims(y_coords, axis=2),
multiples=[1, 1, self._output_size[1]])
pasted_masks = self._grid_sampler((masks, x_coords, y_coords))
return pasted_masks
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
| 5,862 | 31.392265 | 79 | py |
models | models-master/official/projects/panoptic/modeling/layers/panoptic_segmentation_generator.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definition for postprocessing layer to genrate panoptic segmentations."""
from typing import Any, Dict, List, Optional, Tuple
import tensorflow as tf
from official.projects.panoptic.modeling.layers import paste_masks
from official.vision.ops import spatial_transform_ops
def _batch_count_ones(masks: tf.Tensor,
dtype: tf.dtypes.DType = tf.int32) -> tf.Tensor:
"""Counts the ones/trues for each mask in the batch.
Args:
masks: A tensor in shape (..., height, width) with arbitrary numbers of
batch dimensions.
dtype: DType of the resulting tensor. Default is tf.int32.
Returns:
A tensor which contains the count of non-zero elements for each mask in the
batch. The rank of the resulting tensor is equal to rank(masks) - 2.
"""
masks_shape = masks.get_shape().as_list()
if len(masks_shape) < 2:
raise ValueError(
'Expected the input masks (..., height, width) has rank >= 2, was: %s' %
masks_shape)
return tf.reduce_sum(tf.cast(masks, dtype), axis=[-2, -1])
class PanopticSegmentationGenerator(tf.keras.layers.Layer):
"""Panoptic segmentation generator layer."""
def __init__(
self,
output_size: List[int],
max_num_detections: int,
stuff_classes_offset: int,
mask_binarize_threshold: float = 0.5,
score_threshold: float = 0.5,
things_overlap_threshold: float = 0.5,
stuff_area_threshold: float = 4096,
things_class_label: int = 1,
void_class_label: int = 0,
void_instance_id: int = -1,
rescale_predictions: bool = False,
**kwargs):
"""Generates panoptic segmentation masks.
Args:
output_size: A `List` of integers that represent the height and width of
the output mask.
max_num_detections: `int` for maximum number of detections.
stuff_classes_offset: An `int` that is added to the output of the
semantic segmentation mask to make sure that the stuff class ids do not
ovelap with the thing class ids of the MaskRCNN outputs.
mask_binarize_threshold: A `float`
score_threshold: A `float` representing the threshold for deciding
when to remove objects based on score.
things_overlap_threshold: A `float` representing a threshold for deciding
to ignore a thing if overlap is above the threshold.
stuff_area_threshold: A `float` representing a threshold for deciding to
to ignore a stuff class if area is below certain threshold.
things_class_label: An `int` that represents a single merged category of
all thing classes in the semantic segmentation output.
void_class_label: An `int` that is used to represent empty or unlabelled
regions of the mask
void_instance_id: An `int` that is used to denote regions that are not
assigned to any thing class. That is, void_instance_id are assigned to
both stuff regions and empty regions.
rescale_predictions: `bool`, whether to scale back prediction to original
image sizes. If True, image_info is used to rescale predictions.
**kwargs: additional kewargs arguments.
"""
self._output_size = output_size
self._max_num_detections = max_num_detections
self._stuff_classes_offset = stuff_classes_offset
self._mask_binarize_threshold = mask_binarize_threshold
self._score_threshold = score_threshold
self._things_overlap_threshold = things_overlap_threshold
self._stuff_area_threshold = stuff_area_threshold
self._things_class_label = things_class_label
self._void_class_label = void_class_label
self._void_instance_id = void_instance_id
self._rescale_predictions = rescale_predictions
self._config_dict = {
'output_size': output_size,
'max_num_detections': max_num_detections,
'stuff_classes_offset': stuff_classes_offset,
'mask_binarize_threshold': mask_binarize_threshold,
'score_threshold': score_threshold,
'things_class_label': things_class_label,
'void_class_label': void_class_label,
'void_instance_id': void_instance_id,
'rescale_predictions': rescale_predictions
}
super().__init__(**kwargs)
def build(self, input_shape: tf.TensorShape):
grid_sampler = paste_masks.BilinearGridSampler(align_corners=False)
self._paste_masks_fn = paste_masks.PasteMasks(
output_size=self._output_size, grid_sampler=grid_sampler)
super().build(input_shape)
def _generate_panoptic_masks(
self, boxes: tf.Tensor, scores: tf.Tensor, classes: tf.Tensor,
detections_masks: tf.Tensor,
segmentation_mask: tf.Tensor) -> Dict[str, tf.Tensor]:
"""Generates panoptic masks for a single image.
This function implements the following steps to merge instance and semantic
segmentation masks described in https://arxiv.org/pdf/1901.02446.pdf
Steps:
1. resolving overlaps between different instances based on their
confidence scores
2. resolving overlaps between instance and semantic segmentation
outputs in favor of instances
3. removing any stuff regions labeled other or under a given area
threshold.
Args:
boxes: A `tf.Tensor` of shape [num_rois, 4], representing the bounding
boxes for detected objects.
scores: A `tf.Tensor` of shape [num_rois], representing the
confidence scores for each object.
classes: A `tf.Tensor` of shape [num_rois], representing the class
for each object.
detections_masks: A `tf.Tensor` of shape
[num_rois, mask_height, mask_width, 1], representing the cropped mask
for each object.
segmentation_mask: A `tf.Tensor` of shape [height, width], representing
the semantic segmentation output.
Returns:
Dict with the following keys:
- category_mask: A `tf.Tensor` for category masks.
- instance_mask: A `tf.Tensor for instance masks.
"""
# Offset stuff class predictions
segmentation_mask = tf.where(
tf.logical_or(
tf.equal(segmentation_mask, self._things_class_label),
tf.equal(segmentation_mask, self._void_class_label)),
segmentation_mask,
segmentation_mask + self._stuff_classes_offset
)
# sort instances by their scores
sorted_indices = tf.argsort(scores, direction='DESCENDING')
mask_shape = self._output_size + [1]
category_mask = tf.ones(mask_shape,
dtype=tf.float32) * self._void_class_label
instance_mask = tf.ones(
mask_shape, dtype=tf.float32) * self._void_instance_id
# filter instances with low confidence
sorted_scores = tf.sort(scores, direction='DESCENDING')
valid_indices = tf.where(sorted_scores > self._score_threshold)
# if no instance has sufficient confidence score, skip merging
# instance segmentation masks
if tf.shape(valid_indices)[0] > 0:
loop_end_idx = valid_indices[-1, 0] + 1
loop_end_idx = tf.minimum(
tf.cast(loop_end_idx, dtype=tf.int32),
self._max_num_detections)
pasted_masks = self._paste_masks_fn((
detections_masks[:loop_end_idx],
boxes[:loop_end_idx]))
# add things segmentation to panoptic masks
for i in range(loop_end_idx):
# we process instances in decending order, which will make sure
# the overlaps are resolved based on confidence score
instance_idx = sorted_indices[i]
pasted_mask = pasted_masks[instance_idx]
class_id = tf.cast(classes[instance_idx], dtype=tf.float32)
# convert sigmoid scores to binary values
binary_mask = tf.greater(
pasted_mask, self._mask_binarize_threshold)
# filter empty instance masks
if not tf.reduce_sum(tf.cast(binary_mask, tf.float32)) > 0:
continue
overlap = tf.logical_and(
binary_mask,
tf.not_equal(category_mask, self._void_class_label))
binary_mask_area = tf.reduce_sum(
tf.cast(binary_mask, dtype=tf.float32))
overlap_area = tf.reduce_sum(
tf.cast(overlap, dtype=tf.float32))
# skip instance that have a big enough overlap with instances with
# higer scores
if overlap_area / binary_mask_area > self._things_overlap_threshold:
continue
# fill empty regions in category_mask represented by
# void_class_label with class_id of the instance.
category_mask = tf.where(
tf.logical_and(
binary_mask, tf.equal(category_mask, self._void_class_label)),
tf.ones_like(category_mask) * class_id, category_mask)
# fill empty regions in the instance_mask represented by
# void_instance_id with the id of the instance, starting from 1
instance_mask = tf.where(
tf.logical_and(
binary_mask,
tf.equal(instance_mask, self._void_instance_id)),
tf.ones_like(instance_mask) *
tf.cast(instance_idx + 1, tf.float32), instance_mask)
stuff_class_ids = tf.unique(tf.reshape(segmentation_mask, [-1])).y
for stuff_class_id in stuff_class_ids:
if stuff_class_id == self._things_class_label:
continue
stuff_mask = tf.logical_and(
tf.equal(segmentation_mask, stuff_class_id),
tf.equal(category_mask, self._void_class_label))
stuff_mask_area = tf.reduce_sum(
tf.cast(stuff_mask, dtype=tf.float32))
if stuff_mask_area < self._stuff_area_threshold:
continue
category_mask = tf.where(
stuff_mask,
tf.ones_like(category_mask) * stuff_class_id,
category_mask)
results = {
'category_mask': category_mask[:, :, 0],
'instance_mask': instance_mask[:, :, 0]
}
return results
def _resize_and_pad_masks(self, mask, image_info):
"""Resizes masks to match the original image shape and pads to`output_size`.
Args:
mask: a padded mask tensor.
image_info: a tensor that holds information about original and
preprocessed images.
Returns:
resized and padded masks: tf.Tensor.
"""
rescale_size = tf.cast(
tf.math.ceil(image_info[1, :] / image_info[2, :]), tf.int32)
image_shape = tf.cast(image_info[0, :], tf.int32)
offsets = tf.cast(image_info[3, :], tf.int32)
mask = tf.image.resize(
mask,
rescale_size,
method='bilinear')
mask = tf.image.crop_to_bounding_box(
mask,
offsets[0], offsets[1],
image_shape[0],
image_shape[1])
mask = tf.image.pad_to_bounding_box(
mask, 0, 0, self._output_size[0], self._output_size[1])
return mask
def call(self,
inputs: tf.Tensor,
image_info: Optional[tf.Tensor] = None) -> Dict[str, tf.Tensor]:
detections = inputs
batched_scores = detections['detection_scores']
batched_classes = detections['detection_classes']
batched_detections_masks = tf.expand_dims(
detections['detection_masks'], axis=-1)
batched_boxes = detections['detection_boxes']
batched_segmentation_masks = tf.cast(
detections['segmentation_outputs'], dtype=tf.float32)
if self._rescale_predictions:
scale = tf.tile(
tf.cast(image_info[:, 2:3, :], dtype=batched_boxes.dtype),
multiples=[1, 1, 2])
batched_boxes /= scale
batched_segmentation_masks = tf.map_fn(
fn=lambda x: self._resize_and_pad_masks(x[0], x[1]),
elems=(
batched_segmentation_masks,
image_info),
fn_output_signature=tf.float32,
parallel_iterations=32)
else:
batched_segmentation_masks = tf.image.resize(
batched_segmentation_masks,
size=self._output_size,
method='bilinear')
batched_segmentation_masks = tf.expand_dims(tf.cast(
tf.argmax(batched_segmentation_masks, axis=-1),
dtype=tf.float32), axis=-1)
panoptic_masks = tf.map_fn(
fn=lambda x: self._generate_panoptic_masks( # pylint:disable=g-long-lambda
x[0], x[1], x[2], x[3], x[4]),
elems=(
batched_boxes,
batched_scores,
batched_classes,
batched_detections_masks,
batched_segmentation_masks),
fn_output_signature={
'category_mask': tf.float32,
'instance_mask': tf.float32
}, parallel_iterations=32)
for k, v in panoptic_masks.items():
panoptic_masks[k] = tf.cast(v, dtype=tf.int32)
return panoptic_masks
def get_config(self) -> Dict[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config: Dict[str,
Any]) -> 'PanopticSegmentationGenerator':
return cls(**config)
class PanopticSegmentationGeneratorV2(tf.keras.layers.Layer):
"""Panoptic segmentation generator layer V2."""
def __init__(self,
output_size: List[int],
max_num_detections: int,
stuff_classes_offset: int,
mask_binarize_threshold: float = 0.5,
score_threshold: float = 0.5,
things_overlap_threshold: float = 0.5,
stuff_area_threshold: float = 4096,
things_class_label: int = 1,
void_class_label: int = 0,
void_instance_id: int = -1,
rescale_predictions: bool = False,
**kwargs):
"""Generates panoptic segmentation masks.
Args:
output_size: A `List` of integers that represent the height and width of
the output mask.
max_num_detections: `int` for maximum number of detections.
stuff_classes_offset: An `int` that is added to the output of the semantic
segmentation mask to make sure that the stuff class ids do not ovelap
with the thing class ids of the MaskRCNN outputs.
mask_binarize_threshold: A `float`
score_threshold: A `float` representing the threshold for deciding when to
remove objects based on score.
things_overlap_threshold: A `float` representing a threshold for deciding
to ignore a thing if overlap is above the threshold.
stuff_area_threshold: A `float` representing a threshold for deciding to
to ignore a stuff class if area is below certain threshold.
things_class_label: An `int` that represents a single merged category of
all thing classes in the semantic segmentation output.
void_class_label: An `int` that is used to represent empty or unlabelled
regions of the mask
void_instance_id: An `int` that is used to denote regions that are not
assigned to any thing class. That is, void_instance_id are assigned to
both stuff regions and empty regions.
rescale_predictions: `bool`, whether to scale back prediction to original
image sizes. If True, image_info is used to rescale predictions.
**kwargs: additional kewargs arguments.
"""
self._output_size = output_size
self._max_num_detections = max_num_detections
self._stuff_classes_offset = stuff_classes_offset
self._mask_binarize_threshold = mask_binarize_threshold
self._score_threshold = score_threshold
self._things_overlap_threshold = things_overlap_threshold
self._stuff_area_threshold = stuff_area_threshold
self._things_class_label = things_class_label
self._void_class_label = void_class_label
self._void_instance_id = void_instance_id
self._rescale_predictions = rescale_predictions
self._config_dict = {
'output_size': output_size,
'max_num_detections': max_num_detections,
'stuff_classes_offset': stuff_classes_offset,
'mask_binarize_threshold': mask_binarize_threshold,
'score_threshold': score_threshold,
'things_class_label': things_class_label,
'void_class_label': void_class_label,
'void_instance_id': void_instance_id,
'rescale_predictions': rescale_predictions
}
super().__init__(**kwargs)
def call(self,
inputs: tf.Tensor,
image_info: Optional[tf.Tensor] = None) -> Dict[str, tf.Tensor]:
"""Generates panoptic segmentation masks."""
# (batch_size, num_rois, 4) in absolute coordinates.
detection_boxes = tf.cast(inputs['detection_boxes'], tf.float32)
# (batch_size, num_rois)
detection_classes = tf.cast(inputs['detection_classes'], tf.int32)
# (batch_size, num_rois)
detection_scores = inputs['detection_scores']
# (batch_size, num_rois, mask_height, mask_width)
detections_masks = inputs['detection_masks']
# (batch_size, height, width, num_semantic_classes)
segmentation_outputs = inputs['segmentation_outputs']
if self._rescale_predictions:
# (batch_size, 2)
original_size = tf.cast(image_info[:, 0, :], tf.float32)
desired_size = tf.cast(image_info[:, 1, :], tf.float32)
image_scale = tf.cast(image_info[:, 2, :], tf.float32)
offset = tf.cast(image_info[:, 3, :], tf.float32)
rescale_size = tf.math.ceil(desired_size / image_scale)
# (batch_size, output_height, output_width, num_semantic_classes)
segmentation_outputs = (
spatial_transform_ops.bilinear_resize_with_crop_and_pad(
segmentation_outputs,
rescale_size,
crop_offset=offset,
crop_size=original_size,
output_size=self._output_size))
# (batch_size, 1, 4)
image_scale = tf.tile(image_scale, multiples=[1, 2])[:, tf.newaxis]
detection_boxes /= image_scale
else:
# (batch_size, output_height, output_width, num_semantic_classes)
segmentation_outputs = tf.image.resize(
segmentation_outputs, size=self._output_size, method='bilinear')
# (batch_size, output_height, output_width)
instance_mask, instance_category_mask = self._generate_instances(
detection_boxes, detection_classes, detection_scores, detections_masks)
# (batch_size, output_height, output_width)
stuff_category_mask = self._generate_stuffs(segmentation_outputs)
# (batch_size, output_height, output_width)
category_mask = tf.where((stuff_category_mask != self._void_class_label) &
(instance_category_mask == self._void_class_label),
stuff_category_mask + self._stuff_classes_offset,
instance_category_mask)
return {'instance_mask': instance_mask, 'category_mask': category_mask}
def _generate_instances(
self, detection_boxes: tf.Tensor, detection_classes: tf.Tensor,
detection_scores: tf.Tensor,
detections_masks: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""Generates instance & category masks from instance segmentation outputs."""
batch_size = tf.shape(detections_masks)[0]
num_rois = tf.shape(detections_masks)[1]
mask_height = tf.shape(detections_masks)[2]
mask_width = tf.shape(detections_masks)[3]
output_height = self._output_size[0]
output_width = self._output_size[1]
# (batch_size, num_rois, mask_height, mask_width)
detections_masks = detections_masks * (
tf.cast((detection_scores > self._score_threshold) &
(detection_classes != self._void_class_label),
detections_masks.dtype)[:, :, tf.newaxis, tf.newaxis])
# Resizes and copies the detections_masks to the bounding boxes in the
# output canvas.
# (batch_size, num_rois, output_height, output_width)
pasted_detection_masks = tf.reshape(
spatial_transform_ops.bilinear_resize_to_bbox(
tf.reshape(detections_masks, [-1, mask_height, mask_width]),
tf.reshape(detection_boxes, [-1, 4]), self._output_size),
shape=[-1, num_rois, output_height, output_width])
# (batch_size, num_rois, output_height, output_width)
instance_binary_masks = (
pasted_detection_masks > self._mask_binarize_threshold)
# Sorts detection related tensors by scores.
# (batch_size, num_rois)
sorted_detection_indices = tf.argsort(
detection_scores, axis=1, direction='DESCENDING')
# (batch_size, num_rois)
sorted_detection_classes = tf.gather(
detection_classes, sorted_detection_indices, batch_dims=1)
# (batch_size, num_rois, output_height, output_width)
sorted_instance_binary_masks = tf.gather(
instance_binary_masks, sorted_detection_indices, batch_dims=1)
# (batch_size, num_rois)
instance_areas = _batch_count_ones(
sorted_instance_binary_masks, dtype=tf.float32)
init_loop_vars = (
0, # i: the loop counter
tf.ones([batch_size, output_height, output_width], dtype=tf.int32) *
self._void_instance_id, # combined_instance_mask
tf.ones([batch_size, output_height, output_width], dtype=tf.int32) *
self._void_class_label # combined_category_mask
)
def _copy_instances_loop_body(
i: int, combined_instance_mask: tf.Tensor,
combined_category_mask: tf.Tensor) -> Tuple[int, tf.Tensor, tf.Tensor]:
"""Iterates the sorted detections and copies the instances."""
# (batch_size, output_height, output_width)
instance_binary_mask = sorted_instance_binary_masks[:, i]
# Masks out the instances that have a big enough overlap with the other
# instances with higher scores.
# (batch_size, )
overlap_areas = _batch_count_ones(
(combined_instance_mask != self._void_instance_id)
& instance_binary_mask,
dtype=tf.float32)
# (batch_size, )
instance_overlap_threshold_mask = tf.math.divide_no_nan(
overlap_areas, instance_areas[:, i]) < self._things_overlap_threshold
# (batch_size, output_height, output_width)
instance_binary_mask &= (
instance_overlap_threshold_mask[:, tf.newaxis, tf.newaxis]
& (combined_instance_mask == self._void_instance_id))
# Updates combined_instance_mask.
# (batch_size, )
instance_id = tf.cast(
sorted_detection_indices[:, i] + 1, # starting from 1
dtype=combined_instance_mask.dtype)
# (batch_size, output_height, output_width)
combined_instance_mask = tf.where(instance_binary_mask,
instance_id[:, tf.newaxis, tf.newaxis],
combined_instance_mask)
# Updates combined_category_mask.
# (batch_size, )
class_id = tf.cast(
sorted_detection_classes[:, i], dtype=combined_category_mask.dtype)
# (batch_size, output_height, output_width)
combined_category_mask = tf.where(instance_binary_mask,
class_id[:, tf.newaxis, tf.newaxis],
combined_category_mask)
# Returns the updated loop vars.
return (
i + 1, # Increment the loop counter i
combined_instance_mask,
combined_category_mask)
# (batch_size, output_height, output_width)
_, instance_mask, category_mask = tf.while_loop(
cond=lambda i, *_: i < num_rois,
body=_copy_instances_loop_body,
loop_vars=init_loop_vars,
parallel_iterations=32,
maximum_iterations=num_rois)
return instance_mask, category_mask
def _generate_stuffs(self, segmentation_outputs: tf.Tensor) -> tf.Tensor:
"""Generates category mask from semantic segmentation outputs."""
num_semantic_classes = tf.shape(segmentation_outputs)[3]
# (batch_size, output_height, output_width)
segmentation_masks = tf.argmax(
segmentation_outputs, axis=-1, output_type=tf.int32)
stuff_binary_masks = (segmentation_masks != self._things_class_label) & (
segmentation_masks != self._void_class_label)
# (batch_size, num_semantic_classes, output_height, output_width)
stuff_class_binary_masks = ((tf.one_hot(
segmentation_masks, num_semantic_classes, axis=1, dtype=tf.int32) == 1)
& tf.expand_dims(stuff_binary_masks, axis=1))
# Masks out the stuff class whose area is below the given threshold.
# (batch_size, num_semantic_classes)
stuff_class_areas = _batch_count_ones(
stuff_class_binary_masks, dtype=tf.float32)
# (batch_size, num_semantic_classes, output_height, output_width)
stuff_class_binary_masks &= tf.greater(
stuff_class_areas, self._stuff_area_threshold)[:, :, tf.newaxis,
tf.newaxis]
# (batch_size, output_height, output_width)
stuff_binary_masks = tf.reduce_any(stuff_class_binary_masks, axis=1)
# (batch_size, output_height, output_width)
return tf.where(stuff_binary_masks, segmentation_masks,
tf.ones_like(segmentation_masks) * self._void_class_label)
def get_config(self) -> Dict[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config: Dict[str,
Any]) -> 'PanopticSegmentationGeneratorV2':
return cls(**config)
| 25,846 | 40.823625 | 85 | py |
models | models-master/official/projects/panoptic/modeling/layers/fusion_layers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains feature fusion blocks for panoptic segmentation models."""
from typing import Any, Callable, Dict, List, Mapping, Optional, Union
import tensorflow as tf
from official.modeling import tf_utils
# Type annotations.
States = Dict[str, tf.Tensor]
Activation = Union[str, Callable]
class PanopticDeepLabFusion(tf.keras.layers.Layer):
"""Creates a Panoptic DeepLab feature Fusion layer.
This implements the feature fusion introduced in the paper:
Cheng et al. Panoptic-DeepLab
(https://arxiv.org/pdf/1911.10194.pdf)
"""
def __init__(
self,
level: int,
low_level: List[int],
num_projection_filters: List[int],
num_output_filters: int = 256,
use_depthwise_convolution: bool = False,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
interpolation: str = 'bilinear',
**kwargs):
"""Initializes panoptic FPN feature fusion layer.
Args:
level: An `int` level at which the decoder was appled at.
low_level: A list of `int` of minimum level to use in feature fusion.
num_projection_filters: A list of `int` with number of filters for
projection conv2d layers.
num_output_filters: An `int` number of filters in output conv2d layers.
use_depthwise_convolution: A bool to specify if use depthwise separable
convolutions.
activation: A `str` name of the activation function.
use_sync_bn: A `bool` that indicates whether to use synchronized batch
normalization across different replicas.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
interpolation: A `str` interpolation method for upsampling. Defaults to
`bilinear`.
**kwargs: Additional keyword arguments to be passed.
Returns:
A `float` `tf.Tensor` of shape [batch_size, feature_height, feature_width,
feature_channel].
"""
super(PanopticDeepLabFusion, self).__init__(**kwargs)
self._config_dict = {
'level': level,
'low_level': low_level,
'num_projection_filters': num_projection_filters,
'num_output_filters': num_output_filters,
'use_depthwise_convolution': use_depthwise_convolution,
'activation': activation,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
'interpolation': interpolation
}
if tf.keras.backend.image_data_format() == 'channels_last':
self._channel_axis = -1
else:
self._channel_axis = 1
self._activation = tf_utils.get_activation(activation)
def build(self, input_shape: List[tf.TensorShape]):
conv_op = tf.keras.layers.Conv2D
conv_kwargs = {
'padding': 'same',
'use_bias': True,
'kernel_initializer': tf.initializers.VarianceScaling(),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
}
bn_op = (tf.keras.layers.experimental.SyncBatchNormalization
if self._config_dict['use_sync_bn']
else tf.keras.layers.BatchNormalization)
bn_kwargs = {
'axis': self._channel_axis,
'momentum': self._config_dict['norm_momentum'],
'epsilon': self._config_dict['norm_epsilon'],
}
self._projection_convs = []
self._projection_norms = []
self._fusion_convs = []
self._fusion_norms = []
for i in range(len(self._config_dict['low_level'])):
self._projection_convs.append(
conv_op(
filters=self._config_dict['num_projection_filters'][i],
kernel_size=1,
**conv_kwargs))
if self._config_dict['use_depthwise_convolution']:
depthwise_initializer = tf.keras.initializers.RandomNormal(stddev=0.01)
fusion_conv = tf.keras.Sequential([
tf.keras.layers.DepthwiseConv2D(
kernel_size=5,
padding='same',
use_bias=True,
depthwise_initializer=depthwise_initializer,
depthwise_regularizer=self._config_dict['kernel_regularizer'],
depth_multiplier=1),
bn_op(**bn_kwargs),
conv_op(
filters=self._config_dict['num_output_filters'],
kernel_size=1,
**conv_kwargs)])
else:
fusion_conv = conv_op(
filters=self._config_dict['num_output_filters'],
kernel_size=5,
**conv_kwargs)
self._fusion_convs.append(fusion_conv)
self._projection_norms.append(bn_op(**bn_kwargs))
self._fusion_norms.append(bn_op(**bn_kwargs))
def call(self, inputs, training=None):
if training is None:
training = tf.keras.backend.learning_phase()
backbone_output = inputs[0]
decoder_output = inputs[1][str(self._config_dict['level'])]
x = decoder_output
for i in range(len(self._config_dict['low_level'])):
feature = backbone_output[str(self._config_dict['low_level'][i])]
feature = self._projection_convs[i](feature)
feature = self._projection_norms[i](feature, training=training)
feature = self._activation(feature)
shape = tf.shape(feature)
x = tf.image.resize(
x, size=[shape[1], shape[2]],
method=self._config_dict['interpolation'])
x = tf.cast(x, dtype=feature.dtype)
x = tf.concat([x, feature], axis=self._channel_axis)
x = self._fusion_convs[i](x)
x = self._fusion_norms[i](x, training=training)
x = self._activation(x)
return x
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 6,881 | 37.022099 | 80 | py |
models | models-master/official/projects/panoptic/modeling/heads/panoptic_deeplab_heads.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions for Panoptic Deeplab heads."""
from typing import List, Mapping, Optional, Tuple, Union
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.panoptic.modeling.layers import fusion_layers
from official.vision.ops import spatial_transform_ops
class PanopticDeeplabHead(tf.keras.layers.Layer):
"""Creates a panoptic deeplab head."""
def __init__(
self,
level: Union[int, str],
num_convs: int = 2,
num_filters: int = 256,
kernel_size: int = 3,
use_depthwise_convolution: bool = False,
upsample_factor: int = 1,
low_level: Optional[List[int]] = None,
low_level_num_filters: Optional[List[int]] = None,
fusion_num_output_filters: int = 256,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initializes a panoptic deeplab head.
Args:
level: An `int` or `str`, level to use to build head.
num_convs: An `int` number of stacked convolution before the last
prediction layer.
num_filters: An `int` number to specify the number of filters used.
Default is 256.
kernel_size: An `int` number to specify the kernel size of the
stacked convolutions before the last prediction layer.
use_depthwise_convolution: A bool to specify if use depthwise separable
convolutions.
upsample_factor: An `int` number to specify the upsampling factor to
generate finer mask. Default 1 means no upsampling is applied.
low_level: An `int` of backbone level to be used for feature fusion. It is
used when feature_fusion is set to `deeplabv3plus`.
low_level_num_filters: An `int` of reduced number of filters for the low
level features before fusing it with higher level features. It is only
used when feature_fusion is set to `deeplabv3plus`.
fusion_num_output_filters: An `int` number to specify the number of
filters used by output layer of fusion module. Default is 256.
activation: A `str` that indicates which activation is used, e.g. 'relu',
'swish', etc.
use_sync_bn: A `bool` that indicates whether to use synchronized batch
normalization across different replicas.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
**kwargs: Additional keyword arguments to be passed.
"""
super(PanopticDeeplabHead, self).__init__(**kwargs)
self._config_dict = {
'level': level,
'num_convs': num_convs,
'num_filters': num_filters,
'kernel_size': kernel_size,
'use_depthwise_convolution': use_depthwise_convolution,
'upsample_factor': upsample_factor,
'low_level': low_level,
'low_level_num_filters': low_level_num_filters,
'fusion_num_output_filters': fusion_num_output_filters,
'activation': activation,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer
}
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation = tf_utils.get_activation(activation)
def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]):
"""Creates the variables of the head."""
kernel_size = self._config_dict['kernel_size']
use_depthwise_convolution = self._config_dict['use_depthwise_convolution']
random_initializer = tf.keras.initializers.RandomNormal(stddev=0.01)
conv_op = tf.keras.layers.Conv2D
conv_kwargs = {
'kernel_size': kernel_size if not use_depthwise_convolution else 1,
'padding': 'same',
'use_bias': True,
'kernel_initializer': random_initializer,
'kernel_regularizer': self._config_dict['kernel_regularizer'],
}
bn_op = (tf.keras.layers.experimental.SyncBatchNormalization
if self._config_dict['use_sync_bn']
else tf.keras.layers.BatchNormalization)
bn_kwargs = {
'axis': self._bn_axis,
'momentum': self._config_dict['norm_momentum'],
'epsilon': self._config_dict['norm_epsilon'],
}
self._panoptic_deeplab_fusion = fusion_layers.PanopticDeepLabFusion(
level=self._config_dict['level'],
low_level=self._config_dict['low_level'],
num_projection_filters=self._config_dict['low_level_num_filters'],
num_output_filters=self._config_dict['fusion_num_output_filters'],
use_depthwise_convolution=self
._config_dict['use_depthwise_convolution'],
activation=self._config_dict['activation'],
use_sync_bn=self._config_dict['use_sync_bn'],
norm_momentum=self._config_dict['norm_momentum'],
norm_epsilon=self._config_dict['norm_epsilon'],
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'])
# Stacked convolutions layers.
self._convs = []
self._norms = []
for i in range(self._config_dict['num_convs']):
if use_depthwise_convolution:
self._convs.append(
tf.keras.layers.DepthwiseConv2D(
name='panoptic_deeplab_head_depthwise_conv_{}'.format(i),
kernel_size=kernel_size,
padding='same',
use_bias=True,
depthwise_initializer=random_initializer,
depthwise_regularizer=self._config_dict['kernel_regularizer'],
depth_multiplier=1))
norm_name = 'panoptic_deeplab_head_depthwise_norm_{}'.format(i)
self._norms.append(bn_op(name=norm_name, **bn_kwargs))
conv_name = 'panoptic_deeplab_head_conv_{}'.format(i)
self._convs.append(
conv_op(
name=conv_name,
filters=self._config_dict['num_filters'],
**conv_kwargs))
norm_name = 'panoptic_deeplab_head_norm_{}'.format(i)
self._norms.append(bn_op(name=norm_name, **bn_kwargs))
super().build(input_shape)
def call(self, inputs: Tuple[Union[tf.Tensor, Mapping[str, tf.Tensor]],
Union[tf.Tensor, Mapping[str, tf.Tensor]]],
training=None):
"""Forward pass of the head.
It supports both a tuple of 2 tensors or 2 dictionaries. The first is
backbone endpoints, and the second is decoder endpoints. When inputs are
tensors, they are from a single level of feature maps. When inputs are
dictionaries, they contain multiple levels of feature maps, where the key
is the index of feature map.
Args:
inputs: A tuple of 2 feature map tensors of shape
[batch, height_l, width_l, channels] or 2 dictionaries of tensors:
- key: A `str` of the level of the multilevel features.
- values: A `tf.Tensor` of the feature map tensors, whose shape is
[batch, height_l, width_l, channels].
training: A bool, runs the model in training/eval mode.
Returns:
A `tf.Tensor` of the fused backbone and decoder features.
"""
if training is None:
training = tf.keras.backend.learning_phase()
x = self._panoptic_deeplab_fusion(inputs, training=training)
for conv, norm in zip(self._convs, self._norms):
x = conv(x)
x = norm(x, training=training)
x = self._activation(x)
if self._config_dict['upsample_factor'] > 1:
x = spatial_transform_ops.nearest_upsampling(
x, scale=self._config_dict['upsample_factor'])
return x
def get_config(self):
base_config = super().get_config()
return dict(list(base_config.items()) + list(self._config_dict.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
@tf.keras.utils.register_keras_serializable(package='Vision')
class SemanticHead(PanopticDeeplabHead):
"""Creates a semantic head."""
def __init__(
self,
num_classes: int,
level: Union[int, str],
num_convs: int = 2,
num_filters: int = 256,
kernel_size: int = 3,
prediction_kernel_size: int = 3,
use_depthwise_convolution: bool = False,
upsample_factor: int = 1,
low_level: Optional[List[int]] = None,
low_level_num_filters: Optional[List[int]] = None,
fusion_num_output_filters: int = 256,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initializes a instance center head.
Args:
num_classes: An `int` number of mask classification categories. The number
of classes does not include background class.
level: An `int` or `str`, level to use to build head.
num_convs: An `int` number of stacked convolution before the last
prediction layer.
num_filters: An `int` number to specify the number of filters used.
Default is 256.
kernel_size: An `int` number to specify the kernel size of the
stacked convolutions before the last prediction layer.
prediction_kernel_size: An `int` number to specify the kernel size of the
prediction layer.
use_depthwise_convolution: A bool to specify if use depthwise separable
convolutions.
upsample_factor: An `int` number to specify the upsampling factor to
generate finer mask. Default 1 means no upsampling is applied.
low_level: An `int` of backbone level to be used for feature fusion. It is
used when feature_fusion is set to `deeplabv3plus`.
low_level_num_filters: An `int` of reduced number of filters for the low
level features before fusing it with higher level features. It is only
used when feature_fusion is set to `deeplabv3plus`.
fusion_num_output_filters: An `int` number to specify the number of
filters used by output layer of fusion module. Default is 256.
activation: A `str` that indicates which activation is used, e.g. 'relu',
'swish', etc.
use_sync_bn: A `bool` that indicates whether to use synchronized batch
normalization across different replicas.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
**kwargs: Additional keyword arguments to be passed.
"""
super(SemanticHead, self).__init__(
level=level,
num_convs=num_convs,
num_filters=num_filters,
use_depthwise_convolution=use_depthwise_convolution,
kernel_size=kernel_size,
upsample_factor=upsample_factor,
low_level=low_level,
low_level_num_filters=low_level_num_filters,
fusion_num_output_filters=fusion_num_output_filters,
activation=activation,
use_sync_bn=use_sync_bn,
norm_momentum=norm_momentum,
norm_epsilon=norm_epsilon,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
**kwargs)
self._config_dict.update({
'num_classes': num_classes,
'prediction_kernel_size': prediction_kernel_size})
def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]):
"""Creates the variables of the semantic head."""
super(SemanticHead, self).build(input_shape)
self._classifier = tf.keras.layers.Conv2D(
name='semantic_output',
filters=self._config_dict['num_classes'],
kernel_size=self._config_dict['prediction_kernel_size'],
padding='same',
bias_initializer=tf.zeros_initializer(),
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'])
def call(self, inputs: Tuple[Union[tf.Tensor, Mapping[str, tf.Tensor]],
Union[tf.Tensor, Mapping[str, tf.Tensor]]],
training=None):
"""Forward pass of the head."""
if training is None:
training = tf.keras.backend.learning_phase()
x = super(SemanticHead, self).call(inputs, training=training)
outputs = self._classifier(x)
return outputs
@tf.keras.utils.register_keras_serializable(package='Vision')
class InstanceHead(PanopticDeeplabHead):
"""Creates a instance head."""
def __init__(
self,
level: Union[int, str],
num_convs: int = 2,
num_filters: int = 256,
kernel_size: int = 3,
prediction_kernel_size: int = 3,
use_depthwise_convolution: bool = False,
upsample_factor: int = 1,
low_level: Optional[List[int]] = None,
low_level_num_filters: Optional[List[int]] = None,
fusion_num_output_filters: int = 256,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initializes a instance center head.
Args:
level: An `int` or `str`, level to use to build head.
num_convs: An `int` number of stacked convolution before the last
prediction layer.
num_filters: An `int` number to specify the number of filters used.
Default is 256.
kernel_size: An `int` number to specify the kernel size of the
stacked convolutions before the last prediction layer.
prediction_kernel_size: An `int` number to specify the kernel size of the
prediction layer.
use_depthwise_convolution: A bool to specify if use depthwise separable
convolutions.
upsample_factor: An `int` number to specify the upsampling factor to
generate finer mask. Default 1 means no upsampling is applied.
low_level: An `int` of backbone level to be used for feature fusion. It is
used when feature_fusion is set to `deeplabv3plus`.
low_level_num_filters: An `int` of reduced number of filters for the low
level features before fusing it with higher level features. It is only
used when feature_fusion is set to `deeplabv3plus`.
fusion_num_output_filters: An `int` number to specify the number of
filters used by output layer of fusion module. Default is 256.
activation: A `str` that indicates which activation is used, e.g. 'relu',
'swish', etc.
use_sync_bn: A `bool` that indicates whether to use synchronized batch
normalization across different replicas.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
**kwargs: Additional keyword arguments to be passed.
"""
super(InstanceHead, self).__init__(
level=level,
num_convs=num_convs,
num_filters=num_filters,
use_depthwise_convolution=use_depthwise_convolution,
kernel_size=kernel_size,
upsample_factor=upsample_factor,
low_level=low_level,
low_level_num_filters=low_level_num_filters,
fusion_num_output_filters=fusion_num_output_filters,
activation=activation,
use_sync_bn=use_sync_bn,
norm_momentum=norm_momentum,
norm_epsilon=norm_epsilon,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
**kwargs)
self._config_dict.update({
'prediction_kernel_size': prediction_kernel_size})
def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]):
"""Creates the variables of the instance head."""
super(InstanceHead, self).build(input_shape)
self._instance_center_prediction_conv = tf.keras.layers.Conv2D(
name='instance_centers_heatmap',
filters=1,
kernel_size=self._config_dict['prediction_kernel_size'],
padding='same',
bias_initializer=tf.zeros_initializer(),
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'])
self._instance_center_regression_conv = tf.keras.layers.Conv2D(
name='instance_centers_offset',
filters=2,
kernel_size=self._config_dict['prediction_kernel_size'],
padding='same',
bias_initializer=tf.zeros_initializer(),
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'])
def call(self, inputs: Tuple[Union[tf.Tensor, Mapping[str, tf.Tensor]],
Union[tf.Tensor, Mapping[str, tf.Tensor]]],
training=None):
"""Forward pass of the head."""
if training is None:
training = tf.keras.backend.learning_phase()
x = super(InstanceHead, self).call(inputs, training=training)
instance_centers_heatmap = self._instance_center_prediction_conv(x)
instance_centers_offset = self._instance_center_regression_conv(x)
outputs = {
'instance_centers_heatmap': instance_centers_heatmap,
'instance_centers_offset': instance_centers_offset
}
return outputs
| 19,039 | 42.770115 | 80 | py |
models | models-master/official/projects/panoptic/tasks/panoptic_deeplab.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Panoptic Deeplab task definition."""
from typing import Any, Dict, List, Mapping, Optional, Tuple
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import base_task
from official.core import task_factory
from official.projects.panoptic.configs import panoptic_deeplab as exp_cfg
from official.projects.panoptic.dataloaders import panoptic_deeplab_input
from official.projects.panoptic.losses import panoptic_deeplab_losses
from official.projects.panoptic.modeling import factory
from official.vision.dataloaders import input_reader_factory
from official.vision.evaluation import panoptic_quality_evaluator
from official.vision.evaluation import segmentation_metrics
@task_factory.register_task_cls(exp_cfg.PanopticDeeplabTask)
class PanopticDeeplabTask(base_task.Task):
"""A task for Panoptic Deeplab."""
def build_model(self):
"""Builds panoptic deeplab model."""
input_specs = tf.keras.layers.InputSpec(
shape=[None] + self.task_config.model.input_size)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (tf.keras.regularizers.l2(
l2_weight_decay / 2.0) if l2_weight_decay else None)
model = factory.build_panoptic_deeplab(
input_specs=input_specs,
model_config=self.task_config.model,
l2_regularizer=l2_regularizer)
# Builds the model through warm-up call.
dummy_images = tf.keras.Input(self.task_config.model.input_size)
# Note that image_info is always in the shape of [4, 2].
dummy_image_info = tf.keras.layers.Input([4, 2])
_ = model(dummy_images, dummy_image_info, training=False)
return model
def initialize(self, model: tf.keras.Model):
"""Loads pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if 'all' in self.task_config.init_checkpoint_modules:
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
ckpt_items = {}
if 'backbone' in self.task_config.init_checkpoint_modules:
ckpt_items.update(backbone=model.backbone)
if 'decoder' in self.task_config.init_checkpoint_modules:
ckpt_items.update(semantic_decoder=model.semantic_decoder)
if not self.task_config.model.shared_decoder:
ckpt_items.update(instance_decoder=model.instance_decoder)
ckpt = tf.train.Checkpoint(**ckpt_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(self,
params: exp_cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None):
"""Builds panoptic deeplab input."""
decoder_cfg = params.decoder.get()
if params.decoder.type == 'simple_decoder':
decoder = panoptic_deeplab_input.TfExampleDecoder(
regenerate_source_id=decoder_cfg.regenerate_source_id,
panoptic_category_mask_key=decoder_cfg.panoptic_category_mask_key,
panoptic_instance_mask_key=decoder_cfg.panoptic_instance_mask_key)
else:
raise ValueError('Unknown decoder type: {}!'.format(params.decoder.type))
parser = panoptic_deeplab_input.Parser(
output_size=self.task_config.model.input_size[:2],
ignore_label=params.parser.ignore_label,
resize_eval_groundtruth=params.parser.resize_eval_groundtruth,
groundtruth_padded_size=params.parser.groundtruth_padded_size,
aug_scale_min=params.parser.aug_scale_min,
aug_scale_max=params.parser.aug_scale_max,
aug_rand_hflip=params.parser.aug_rand_hflip,
aug_type=params.parser.aug_type,
sigma=params.parser.sigma,
dtype=params.parser.dtype)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self,
labels: Mapping[str, tf.Tensor],
model_outputs: Mapping[str, tf.Tensor],
aux_losses: Optional[Any] = None):
"""Panoptic deeplab losses.
Args:
labels: labels.
model_outputs: Output logits from panoptic deeplab.
aux_losses: auxiliarly loss tensors, i.e. `losses` in keras.Model.
Returns:
The total loss tensor.
"""
loss_config = self._task_config.losses
segmentation_loss_fn = (
panoptic_deeplab_losses.WeightedBootstrappedCrossEntropyLoss(
loss_config.label_smoothing,
loss_config.class_weights,
loss_config.ignore_label,
top_k_percent_pixels=loss_config.top_k_percent_pixels))
instance_center_heatmap_loss_fn = panoptic_deeplab_losses.CenterHeatmapLoss(
)
instance_center_offset_loss_fn = panoptic_deeplab_losses.CenterOffsetLoss()
semantic_weights = tf.cast(
labels['semantic_weights'],
dtype=model_outputs['instance_centers_heatmap'].dtype)
things_mask = tf.cast(
tf.squeeze(labels['things_mask'], axis=3),
dtype=model_outputs['instance_centers_heatmap'].dtype)
valid_mask = tf.cast(
tf.squeeze(labels['valid_mask'], axis=3),
dtype=model_outputs['instance_centers_heatmap'].dtype)
segmentation_loss = segmentation_loss_fn(
model_outputs['segmentation_outputs'],
labels['category_mask'],
sample_weight=semantic_weights)
instance_center_heatmap_loss = instance_center_heatmap_loss_fn(
model_outputs['instance_centers_heatmap'],
labels['instance_centers_heatmap'],
sample_weight=valid_mask)
instance_center_offset_loss = instance_center_offset_loss_fn(
model_outputs['instance_centers_offset'],
labels['instance_centers_offset'],
sample_weight=things_mask)
model_loss = (
loss_config.segmentation_loss_weight * segmentation_loss +
loss_config.center_heatmap_loss_weight * instance_center_heatmap_loss +
loss_config.center_offset_loss_weight * instance_center_offset_loss)
total_loss = model_loss
if aux_losses:
total_loss += tf.add_n(aux_losses)
losses = {
'total_loss': total_loss,
'model_loss': model_loss,
'segmentation_loss': segmentation_loss,
'instance_center_heatmap_loss': instance_center_heatmap_loss,
'instance_center_offset_loss': instance_center_offset_loss
}
return losses
def build_metrics(self, training: bool = True) -> List[
tf.keras.metrics.Metric]:
"""Build metrics."""
eval_config = self.task_config.evaluation
metrics = []
if training:
metric_names = [
'total_loss',
'segmentation_loss',
'instance_center_heatmap_loss',
'instance_center_offset_loss',
'model_loss']
for name in metric_names:
metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))
if eval_config.report_train_mean_iou:
self.train_mean_iou = segmentation_metrics.MeanIoU(
name='train_mean_iou',
num_classes=self.task_config.model.num_classes,
rescale_predictions=False,
dtype=tf.float32)
else:
rescale_predictions = (not self.task_config.validation_data.parser
.resize_eval_groundtruth)
self.perclass_iou_metric = segmentation_metrics.PerClassIoU(
name='per_class_iou',
num_classes=self.task_config.model.num_classes,
rescale_predictions=rescale_predictions,
dtype=tf.float32)
if self.task_config.model.generate_panoptic_masks:
self.panoptic_quality_metric = (
panoptic_quality_evaluator.PanopticQualityEvaluator(
num_categories=self.task_config.model.num_classes,
ignored_label=eval_config.ignored_label,
max_instances_per_category=eval_config
.max_instances_per_category,
offset=eval_config.offset,
is_thing=eval_config.is_thing,
rescale_predictions=eval_config.rescale_predictions))
return metrics
def train_step(
self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[List[Any]] = None) -> Dict[str, Any]:
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
images, labels = inputs
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(
inputs=images,
image_info=labels['image_info'],
training=True)
outputs = tf.nest.map_structure(
lambda x: tf.cast(x, tf.float32), outputs)
# Computes per-replica loss.
losses = self.build_losses(
labels=labels,
model_outputs=outputs,
aux_losses=model.losses)
scaled_loss = losses['total_loss'] / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient when LossScaleOptimizer is used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: losses['total_loss']}
if metrics:
for m in metrics:
m.update_state(losses[m.name])
if self.task_config.evaluation.report_train_mean_iou:
segmentation_labels = {
'masks': labels['category_mask'],
'valid_masks': labels['valid_mask'],
'image_info': labels['image_info']
}
self.process_metrics(
metrics=[self.train_mean_iou],
labels=segmentation_labels,
model_outputs=outputs['segmentation_outputs'])
logs.update({
self.train_mean_iou.name:
self.train_mean_iou.result()
})
return logs
def validation_step(
self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
metrics: Optional[List[Any]] = None) -> Dict[str, Any]:
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
images, labels = inputs
outputs = model(
inputs=images,
image_info=labels['image_info'],
training=False)
logs = {self.loss: 0}
segmentation_labels = {
'masks': labels['category_mask'],
'valid_masks': labels['valid_mask'],
'image_info': labels['image_info']
}
self.perclass_iou_metric.update_state(segmentation_labels,
outputs['segmentation_outputs'])
if self.task_config.model.generate_panoptic_masks:
pq_metric_labels = {
'category_mask': tf.squeeze(labels['category_mask'], axis=3),
'instance_mask': tf.squeeze(labels['instance_mask'], axis=3),
'image_info': labels['image_info']
}
panoptic_outputs = {
'category_mask':
outputs['category_mask'],
'instance_mask':
outputs['instance_mask'],
}
logs.update({
self.panoptic_quality_metric.name:
(pq_metric_labels, panoptic_outputs)})
return logs
def aggregate_logs(self, state=None, step_outputs=None):
if state is None:
self.perclass_iou_metric.reset_states()
state = [self.perclass_iou_metric]
if self.task_config.model.generate_panoptic_masks:
state += [self.panoptic_quality_metric]
if self.task_config.model.generate_panoptic_masks:
self.panoptic_quality_metric.update_state(
step_outputs[self.panoptic_quality_metric.name][0],
step_outputs[self.panoptic_quality_metric.name][1])
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
result = {}
ious = self.perclass_iou_metric.result()
if self.task_config.evaluation.report_per_class_iou:
for i, value in enumerate(ious.numpy()):
result.update({'segmentation_iou/class_{}'.format(i): value})
# Computes mean IoU
result.update({'segmentation_mean_iou': tf.reduce_mean(ious).numpy()})
if self.task_config.model.generate_panoptic_masks:
panoptic_quality_results = self.panoptic_quality_metric.result()
for k, value in panoptic_quality_results.items():
if k.endswith('per_class'):
if self.task_config.evaluation.report_per_class_pq:
for i, per_class_value in enumerate(value):
metric_key = 'panoptic_quality/{}/class_{}'.format(k, i)
result[metric_key] = per_class_value
else:
continue
else:
result['panoptic_quality/{}'.format(k)] = value
return result
| 14,713 | 36.345178 | 80 | py |
models | models-master/official/projects/panoptic/tasks/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/panoptic/tasks/panoptic_maskrcnn.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Panoptic MaskRCNN task definition."""
from typing import Any, Dict, List, Mapping, Optional, Tuple
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import task_factory
from official.projects.panoptic.configs import panoptic_maskrcnn as exp_cfg
from official.projects.panoptic.dataloaders import panoptic_maskrcnn_input
from official.projects.panoptic.modeling import factory
from official.vision.dataloaders import input_reader
from official.vision.dataloaders import input_reader_factory
from official.vision.evaluation import panoptic_quality
from official.vision.evaluation import segmentation_metrics
from official.vision.losses import segmentation_losses
from official.vision.tasks import maskrcnn
@task_factory.register_task_cls(exp_cfg.PanopticMaskRCNNTask)
class PanopticMaskRCNNTask(maskrcnn.MaskRCNNTask):
"""A single-replica view of training procedure.
Panoptic Mask R-CNN task provides artifacts for training/evalution procedures,
including loading/iterating over Datasets, initializing the model, calculating
the loss, post-processing, and customized metrics with reduction.
"""
def __init__(self,
params,
logging_dir: Optional[str] = None,
name: Optional[str] = None):
super().__init__(params, logging_dir=logging_dir, name=name)
self.segmentation_train_mean_iou = None
self.segmentation_perclass_iou_metric = None
self.panoptic_quality_metric = None
def build_model(self) -> tf.keras.Model:
"""Builds Panoptic Mask R-CNN model."""
input_specs = tf.keras.layers.InputSpec(
shape=[None] + self.task_config.model.input_size)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (tf.keras.regularizers.l2(
l2_weight_decay / 2.0) if l2_weight_decay else None)
model = factory.build_panoptic_maskrcnn(
input_specs=input_specs,
model_config=self.task_config.model,
l2_regularizer=l2_regularizer)
if self.task_config.freeze_backbone:
model.backbone.trainable = False
# Builds the model through warm-up call.
dummy_images = tf.keras.Input(self.task_config.model.input_size)
# Note that image_info is always in the shape of [4, 2].
dummy_image_info = tf.keras.layers.Input([4, 2])
_ = model(dummy_images, image_info=dummy_image_info, training=False)
return model
def initialize(self, model: tf.keras.Model) -> None:
"""Loads pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
def _get_checkpoint_path(checkpoint_dir_or_file):
checkpoint_path = checkpoint_dir_or_file
if tf.io.gfile.isdir(checkpoint_dir_or_file):
checkpoint_path = tf.train.latest_checkpoint(
checkpoint_dir_or_file)
return checkpoint_path
for init_module in self.task_config.init_checkpoint_modules:
# Restoring checkpoint.
if init_module == 'all':
checkpoint_path = _get_checkpoint_path(
self.task_config.init_checkpoint)
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.read(checkpoint_path)
status.expect_partial().assert_existing_objects_matched()
elif init_module == 'backbone':
checkpoint_path = _get_checkpoint_path(
self.task_config.init_checkpoint)
if self.task_config.model.backbone.type == 'uvit':
model.backbone.load_checkpoint(ckpt_filepath=checkpoint_path)
else:
ckpt = tf.train.Checkpoint(backbone=model.backbone)
status = ckpt.read(checkpoint_path)
status.expect_partial().assert_existing_objects_matched()
elif init_module == 'decoder':
checkpoint_path = _get_checkpoint_path(
self.task_config.init_checkpoint)
ckpt = tf.train.Checkpoint(decoder=model.decoder)
status = ckpt.read(checkpoint_path)
status.expect_partial().assert_existing_objects_matched()
elif init_module == 'segmentation_backbone':
checkpoint_path = _get_checkpoint_path(
self.task_config.segmentation_init_checkpoint)
ckpt = tf.train.Checkpoint(
segmentation_backbone=model.segmentation_backbone)
status = ckpt.read(checkpoint_path)
status.expect_partial().assert_existing_objects_matched()
elif init_module == 'segmentation_decoder':
checkpoint_path = _get_checkpoint_path(
self.task_config.segmentation_init_checkpoint)
ckpt = tf.train.Checkpoint(
segmentation_decoder=model.segmentation_decoder)
status = ckpt.read(checkpoint_path)
status.expect_partial().assert_existing_objects_matched()
else:
raise ValueError(
"Only 'all', 'backbone', 'decoder', 'segmentation_backbone' and/or "
"'segmentation_decoder' can be used to initialize the model, but "
"got {}".format(init_module))
logging.info('Finished loading pretrained checkpoint from %s for %s',
checkpoint_path, init_module)
def build_inputs(
self,
params: exp_cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None
) -> tf.data.Dataset:
"""Builds input dataset."""
decoder_cfg = params.decoder.get()
if params.decoder.type == 'simple_decoder':
decoder = panoptic_maskrcnn_input.TfExampleDecoder(
regenerate_source_id=decoder_cfg.regenerate_source_id,
mask_binarize_threshold=decoder_cfg.mask_binarize_threshold,
include_panoptic_masks=decoder_cfg.include_panoptic_masks,
panoptic_category_mask_key=decoder_cfg.panoptic_category_mask_key,
panoptic_instance_mask_key=decoder_cfg.panoptic_instance_mask_key)
else:
raise ValueError('Unknown decoder type: {}!'.format(params.decoder.type))
parser = panoptic_maskrcnn_input.Parser(
output_size=self.task_config.model.input_size[:2],
min_level=self.task_config.model.min_level,
max_level=self.task_config.model.max_level,
num_scales=self.task_config.model.anchor.num_scales,
aspect_ratios=self.task_config.model.anchor.aspect_ratios,
anchor_size=self.task_config.model.anchor.anchor_size,
rpn_match_threshold=params.parser.rpn_match_threshold,
rpn_unmatched_threshold=params.parser.rpn_unmatched_threshold,
rpn_batch_size_per_im=params.parser.rpn_batch_size_per_im,
rpn_fg_fraction=params.parser.rpn_fg_fraction,
aug_rand_hflip=params.parser.aug_rand_hflip,
aug_rand_vflip=params.parser.aug_rand_vflip,
aug_scale_min=params.parser.aug_scale_min,
aug_scale_max=params.parser.aug_scale_max,
aug_type=params.parser.aug_type,
skip_crowd_during_training=params.parser.skip_crowd_during_training,
max_num_instances=params.parser.max_num_instances,
outer_boxes_scale=self.task_config.model.outer_boxes_scale,
mask_crop_size=params.parser.mask_crop_size,
segmentation_resize_eval_groundtruth=params.parser
.segmentation_resize_eval_groundtruth,
segmentation_groundtruth_padded_size=params.parser
.segmentation_groundtruth_padded_size,
segmentation_ignore_label=params.parser.segmentation_ignore_label,
panoptic_ignore_label=params.parser.panoptic_ignore_label,
include_panoptic_masks=params.parser.include_panoptic_masks,
dtype=params.dtype,
)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
combine_fn=input_reader.create_combine_fn(params),
parser_fn=parser.parse_fn(params.is_training),
)
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self,
outputs: Mapping[str, Any],
labels: Mapping[str, Any],
aux_losses: Optional[Any] = None) -> Dict[str, tf.Tensor]:
"""Builds Panoptic Mask R-CNN losses."""
params = self.task_config.losses
use_groundtruth_dimension = (
params.semantic_segmentation_use_groundtruth_dimension)
segmentation_loss_fn = segmentation_losses.SegmentationLoss(
label_smoothing=params.semantic_segmentation_label_smoothing,
class_weights=params.semantic_segmentation_class_weights,
ignore_label=params.semantic_segmentation_ignore_label,
gt_is_matting_map=params.semantic_segmentation_gt_is_matting_map,
use_groundtruth_dimension=use_groundtruth_dimension,
use_binary_cross_entropy=params
.semantic_segmentation_use_binary_cross_entropy,
top_k_percent_pixels=params.semantic_segmentation_top_k_percent_pixels)
instance_segmentation_weight = params.instance_segmentation_weight
semantic_segmentation_weight = params.semantic_segmentation_weight
losses = super().build_losses(
outputs=outputs,
labels=labels,
aux_losses=None)
maskrcnn_loss = losses['model_loss']
segmentation_loss = segmentation_loss_fn(
outputs['segmentation_outputs'],
labels['gt_segmentation_mask'])
model_loss = (
instance_segmentation_weight * maskrcnn_loss +
semantic_segmentation_weight * segmentation_loss)
total_loss = model_loss
if aux_losses:
reg_loss = tf.reduce_sum(aux_losses)
total_loss = model_loss + reg_loss
losses.update({
'total_loss': total_loss,
'maskrcnn_loss': maskrcnn_loss,
'segmentation_loss': segmentation_loss,
'model_loss': model_loss,
})
return losses
def build_metrics(
self, training: bool = True
) -> List[tf.keras.metrics.Metric]:
"""Builds detection metrics."""
metrics = super().build_metrics(training)
if training:
metric_names = ['maskrcnn_loss', 'segmentation_loss']
for name in metric_names:
metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))
if self.task_config.segmentation_evaluation.report_train_mean_iou:
self.segmentation_train_mean_iou = segmentation_metrics.MeanIoU(
name='train_mean_iou',
num_classes=self.task_config.model.segmentation_model.num_classes,
rescale_predictions=False,
dtype=tf.float32,
)
else:
rescale_predictions = (
not self.task_config.validation_data.parser.segmentation_resize_eval_groundtruth
)
self.segmentation_perclass_iou_metric = segmentation_metrics.PerClassIoU(
name='per_class_iou',
num_classes=self.task_config.model.segmentation_model.num_classes,
rescale_predictions=rescale_predictions,
dtype=tf.float32,
)
if (
self.task_config.model.generate_panoptic_masks
and self.task_config.panoptic_quality_evaluator is not None
):
if not self.task_config.validation_data.parser.include_panoptic_masks:
raise ValueError(
'`include_panoptic_masks` should be set to True when'
' computing panoptic quality.'
)
pq_config = self.task_config.panoptic_quality_evaluator
self.panoptic_quality_metric = panoptic_quality.PanopticQualityV2(
num_categories=pq_config.num_categories,
is_thing=pq_config.is_thing,
ignored_label=pq_config.ignored_label,
rescale_predictions=pq_config.rescale_predictions,
)
return metrics
def train_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[List[Any]] = None) -> Dict[str, Any]:
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
images, labels = inputs
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
model_kwargs = {
'image_info': labels['image_info'],
'anchor_boxes': labels['anchor_boxes'],
'gt_boxes': labels['gt_boxes'],
'gt_classes': labels['gt_classes'],
'training': True,
}
if self.task_config.model.include_mask:
model_kwargs['gt_masks'] = labels['gt_masks']
if self.task_config.model.outer_boxes_scale > 1.0:
model_kwargs['gt_outer_boxes'] = labels['gt_outer_boxes']
outputs = model(images, **model_kwargs)
outputs = tf.nest.map_structure(
lambda x: tf.cast(x, tf.float32), outputs)
# Computes per-replica loss.
losses = self.build_losses(
outputs=outputs, labels=labels, aux_losses=model.losses)
scaled_loss = losses['total_loss'] / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient when LossScaleOptimizer is used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: losses['total_loss']}
if metrics:
for m in metrics:
m.update_state(losses[m.name])
if (self.task_config.segmentation_evaluation.report_train_mean_iou and
self.segmentation_train_mean_iou is not None):
segmentation_labels = {
'masks': labels['gt_segmentation_mask'],
'valid_masks': labels['gt_segmentation_valid_mask'],
'image_info': labels['image_info']
}
self.process_metrics(
metrics=[self.segmentation_train_mean_iou],
labels=segmentation_labels,
model_outputs=outputs['segmentation_outputs'])
logs.update({
self.segmentation_train_mean_iou.name:
self.segmentation_train_mean_iou.result()
})
return logs
def _update_metrics(self, labels, outputs, logs):
super()._update_metrics(labels, outputs, logs)
if self.segmentation_perclass_iou_metric is not None:
segmentation_labels = {
'masks': labels['groundtruths']['gt_segmentation_mask'],
'valid_masks': labels['groundtruths']['gt_segmentation_valid_mask'],
'image_info': labels['image_info'],
}
self.segmentation_perclass_iou_metric.update_state(
segmentation_labels, outputs['segmentation_outputs']
)
if self.panoptic_quality_metric is not None:
pq_metric_labels = {
'category_mask': labels['groundtruths']['gt_panoptic_category_mask'],
'instance_mask': labels['groundtruths']['gt_panoptic_instance_mask'],
'image_info': labels['image_info'],
}
self.panoptic_quality_metric.update_state(
pq_metric_labels, outputs['panoptic_outputs']
)
def validation_step(
self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
metrics: Optional[List[Any]] = None,
) -> Dict[str, Any]:
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
images, labels = inputs
outputs = model(
images,
anchor_boxes=labels['anchor_boxes'],
image_info=labels['image_info'],
training=False,
)
logs = {self.loss: 0}
self._update_metrics(labels, outputs, logs)
return logs
def aggregate_logs(self, state=None, step_outputs=None):
is_first_step = not state
super().aggregate_logs(state, step_outputs)
if is_first_step:
if not isinstance(state, list):
state = []
if self.segmentation_perclass_iou_metric is not None:
state.append(self.segmentation_perclass_iou_metric)
if self.panoptic_quality_metric is not None:
state.append(self.panoptic_quality_metric)
if not state:
# Create an arbitrary state to indicate it's not the first step in the
# following calls to this function.
state = True
return state
def _reduce_semantic_metrics(self, logs: Dict[str, Any]):
"""Updates the per class and mean semantic metrics in the logs."""
ious = self.segmentation_perclass_iou_metric.result()
if self.task_config.segmentation_evaluation.report_per_class_iou:
for i, value in enumerate(ious.numpy()):
logs.update({'segmentation_iou/class_{}'.format(i): value})
logs.update({'segmentation_mean_iou': tf.reduce_mean(ious)})
def _reduce_panoptic_metrics(self, logs: Dict[str, Any]):
"""Updates the per class and mean panoptic metrics in the logs."""
result = self.panoptic_quality_metric.result()
valid_thing_classes = result['valid_thing_classes']
valid_stuff_classes = result['valid_stuff_classes']
valid_classes = valid_stuff_classes | valid_thing_classes
num_categories = tf.math.count_nonzero(valid_classes, dtype=tf.float32)
num_thing_categories = tf.math.count_nonzero(
valid_thing_classes, dtype=tf.float32
)
num_stuff_categories = tf.math.count_nonzero(
valid_stuff_classes, dtype=tf.float32
)
valid_thing_classes = tf.cast(valid_thing_classes, dtype=tf.float32)
valid_stuff_classes = tf.cast(valid_stuff_classes, dtype=tf.float32)
logs['panoptic_quality/All_num_categories'] = num_categories
logs['panoptic_quality/Things_num_categories'] = num_thing_categories
logs['panoptic_quality/Stuff_num_categories'] = num_stuff_categories
for metric in ['pq', 'sq', 'rq']:
metric_per_class = result[f'{metric}_per_class']
logs[f'panoptic_quality/All_{metric}'] = tf.math.divide_no_nan(
tf.reduce_sum(metric_per_class), num_categories
)
logs[f'panoptic_quality/Things_{metric}'] = tf.math.divide_no_nan(
tf.reduce_sum(metric_per_class * valid_thing_classes),
num_thing_categories,
)
logs[f'panoptic_quality/Stuff_{metric}'] = tf.math.divide_no_nan(
tf.reduce_sum(metric_per_class * valid_stuff_classes),
num_stuff_categories,
)
if self.task_config.panoptic_quality_evaluator.report_per_class_metrics:
for i, is_valid in enumerate(valid_classes.numpy()):
if is_valid:
logs[f'panoptic_quality/{metric}/class_{i}'] = metric_per_class[i]
def reduce_aggregated_logs(
self,
aggregated_logs: Dict[str, Any],
global_step: Optional[tf.Tensor] = None,
) -> Dict[str, tf.Tensor]:
"""Optional reduce of aggregated logs over validation steps."""
logs = super().reduce_aggregated_logs(aggregated_logs, global_step)
if self.segmentation_perclass_iou_metric is not None:
self._reduce_semantic_metrics(logs)
self.segmentation_perclass_iou_metric.reset_state()
if self.panoptic_quality_metric is not None:
self._reduce_panoptic_metrics(logs)
self.panoptic_quality_metric.reset_state()
return logs
| 20,323 | 39.007874 | 90 | py |
models | models-master/official/projects/panoptic/ops/mask_ops.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for masks."""
import tensorflow as tf
def resize_and_rescale_offsets(input_tensor: tf.Tensor, target_size):
"""Bilinearly resizes and rescales the offsets.
Reference:
https://github.com/google-research/deeplab2/blob/main/model/utils.py#L157
Args:
input_tensor: A tf.Tensor of shape [batch, height, width, 2].
target_size: A list or tuple or 1D tf.Tensor that specifies the height and
width after resizing.
Returns:
The input_tensor resized to shape `[batch, target_height, target_width, 2]`.
Moreover, the offsets along the y-axis are rescaled by a factor equal to
(target_height - 1) / (reference_height - 1) and the offsets along the
x-axis are rescaled by a factor equal to
(target_width - 1) / (reference_width - 1).
"""
input_size_y = tf.shape(input_tensor)[1]
input_size_x = tf.shape(input_tensor)[2]
dtype = input_tensor.dtype
scale_y = tf.cast(target_size[0] - 1, dtype=dtype) / tf.cast(
input_size_y - 1, dtype=dtype)
scale_x = tf.cast(target_size[1] - 1, dtype=dtype) / tf.cast(
input_size_x - 1, dtype=dtype)
target_y, target_x = tf.split(
value=input_tensor, num_or_size_splits=2, axis=3)
target_y *= scale_y
target_x *= scale_x
_ = tf.concat([target_y, target_x], 3)
return tf.image.resize(
input_tensor,
size=target_size,
method=tf.image.ResizeMethod.BILINEAR)
| 2,027 | 35.214286 | 80 | py |
models | models-master/official/projects/panoptic/losses/panoptic_deeplab_losses.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses used for panoptic deeplab model."""
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.panoptic.ops import mask_ops
EPSILON = 1e-5
class WeightedBootstrappedCrossEntropyLoss:
"""Weighted semantic segmentation loss."""
def __init__(self, label_smoothing, class_weights, ignore_label,
top_k_percent_pixels=1.0):
self._top_k_percent_pixels = top_k_percent_pixels
self._class_weights = class_weights
self._ignore_label = ignore_label
self._label_smoothing = label_smoothing
def __call__(self, logits, labels, sample_weight=None):
_, _, _, num_classes = logits.get_shape().as_list()
logits = tf.image.resize(
logits, tf.shape(labels)[1:3],
method=tf.image.ResizeMethod.BILINEAR)
valid_mask = tf.not_equal(labels, self._ignore_label)
normalizer = tf.reduce_sum(tf.cast(valid_mask, tf.float32)) + EPSILON
# Assign pixel with ignore label to class 0 (background). The loss on the
# pixel will later be masked out.
labels = tf.where(valid_mask, labels, tf.zeros_like(labels))
labels = tf.squeeze(tf.cast(labels, tf.int32), axis=3)
valid_mask = tf.squeeze(tf.cast(valid_mask, tf.float32), axis=3)
onehot_labels = tf.one_hot(labels, num_classes)
onehot_labels = onehot_labels * (
1 - self._label_smoothing) + self._label_smoothing / num_classes
cross_entropy_loss = tf.nn.softmax_cross_entropy_with_logits(
labels=onehot_labels, logits=logits)
if not self._class_weights:
class_weights = [1] * num_classes
else:
class_weights = self._class_weights
if num_classes != len(class_weights):
raise ValueError(
'Length of class_weights should be {}'.format(num_classes))
weight_mask = tf.einsum('...y,y->...',
tf.one_hot(labels, num_classes, dtype=tf.float32),
tf.constant(class_weights, tf.float32))
valid_mask *= weight_mask
if sample_weight is not None:
valid_mask *= sample_weight
cross_entropy_loss *= tf.cast(valid_mask, tf.float32)
if self._top_k_percent_pixels >= 1.0:
loss = tf.reduce_sum(cross_entropy_loss) / normalizer
else:
loss = self._compute_top_k_loss(cross_entropy_loss)
return loss
def _compute_top_k_loss(self, loss):
"""Computs top k loss."""
batch_size = tf.shape(loss)[0]
loss = tf.reshape(loss, shape=[batch_size, -1])
top_k_pixels = tf.cast(
self._top_k_percent_pixels *
tf.cast(tf.shape(loss)[-1], dtype=tf.float32),
dtype=tf.int32)
# shape: [batch_size, top_k_pixels]
per_sample_top_k_loss = tf.map_fn(
fn=lambda x: tf.nn.top_k(x, k=top_k_pixels, sorted=False)[0],
elems=loss,
parallel_iterations=32,
fn_output_signature=tf.float32)
# shape: [batch_size]
per_sample_normalizer = tf.reduce_sum(
tf.cast(
tf.not_equal(per_sample_top_k_loss, 0.0),
dtype=tf.float32),
axis=-1) + EPSILON
per_sample_normalized_loss = tf.reduce_sum(
per_sample_top_k_loss, axis=-1) / per_sample_normalizer
normalized_loss = tf_utils.safe_mean(per_sample_normalized_loss)
return normalized_loss
class CenterHeatmapLoss:
"""Center heatmap loss."""
def __init__(self):
self._loss_fn = tf.losses.mean_squared_error
def __call__(self, logits, labels, sample_weight=None):
_, height, width, _ = labels.get_shape().as_list()
logits = tf.image.resize(
logits,
size=[height, width],
method=tf.image.ResizeMethod.BILINEAR)
loss = self._loss_fn(y_true=labels, y_pred=logits)
if sample_weight is not None:
loss *= sample_weight
return tf_utils.safe_mean(loss)
class CenterOffsetLoss:
"""Center offset loss."""
def __init__(self):
self._loss_fn = tf.losses.mean_absolute_error
def __call__(self, logits, labels, sample_weight=None):
_, height, width, _ = labels.get_shape().as_list()
logits = mask_ops.resize_and_rescale_offsets(
logits, target_size=[height, width])
loss = self._loss_fn(y_true=labels, y_pred=logits)
if sample_weight is not None:
loss *= sample_weight
return tf_utils.safe_mean(loss)
| 4,877 | 31.738255 | 78 | py |
models | models-master/official/projects/basnet/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Model Garden Vision training driver."""
from absl import app
# pylint: disable=unused-import
from official.common import flags as tfm_flags
from official.projects.basnet.configs import basnet as basnet_cfg
from official.projects.basnet.modeling import basnet_model
from official.projects.basnet.modeling import refunet
from official.projects.basnet.tasks import basnet as basenet_task
from official.vision import train
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(train.main)
| 1,122 | 35.225806 | 74 | py |
models | models-master/official/projects/basnet/evaluation/metrics_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for metrics.py."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.basnet.evaluation import metrics
class BASNetMetricTest(parameterized.TestCase, tf.test.TestCase):
def test_mae(self):
input_size = 224
inputs = (tf.random.uniform([2, input_size, input_size, 1]),)
labels = (tf.random.uniform([2, input_size, input_size, 1]),)
mae_obj = metrics.MAE()
mae_obj.reset_states()
mae_obj.update_state(labels, inputs)
output = mae_obj.result()
mae_tf = tf.keras.metrics.MeanAbsoluteError()
mae_tf.reset_state()
mae_tf.update_state(labels[0], inputs[0])
compare = mae_tf.result().numpy()
self.assertAlmostEqual(output, compare, places=4)
def test_max_f(self):
input_size = 224
beta = 0.3
inputs = (tf.random.uniform([2, input_size, input_size, 1]),)
labels = (tf.random.uniform([2, input_size, input_size, 1]),)
max_f_obj = metrics.MaxFscore()
max_f_obj.reset_states()
max_f_obj.update_state(labels, inputs)
output = max_f_obj.result()
pre_tf = tf.keras.metrics.Precision(thresholds=0.78)
rec_tf = tf.keras.metrics.Recall(thresholds=0.78)
pre_tf.reset_state()
rec_tf.reset_state()
pre_tf.update_state(labels[0], inputs[0])
rec_tf.update_state(labels[0], inputs[0])
pre_out_tf = pre_tf.result().numpy()
rec_out_tf = rec_tf.result().numpy()
compare = (1+beta)*pre_out_tf*rec_out_tf/(beta*pre_out_tf+rec_out_tf+1e-8)
self.assertAlmostEqual(output, compare, places=1)
if __name__ == '__main__':
tf.test.main()
| 2,195 | 30.826087 | 78 | py |
models | models-master/official/projects/basnet/evaluation/metrics.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation metrics for BASNet.
The MAE and maxFscore implementations are a modified version of
https://github.com/xuebinqin/Binary-Segmentation-Evaluation-Tool
"""
import numpy as np
import scipy.signal
class MAE:
"""Mean Absolute Error(MAE) metric for basnet."""
def __init__(self):
"""Constructs MAE metric class."""
self.reset_states()
@property
def name(self):
return 'MAE'
def reset_states(self):
"""Resets internal states for a fresh run."""
self._predictions = []
self._groundtruths = []
def result(self):
"""Evaluates segmentation results, and reset_states."""
metric_result = self.evaluate()
# Cleans up the internal variables in order for a fresh eval next time.
self.reset_states()
return metric_result
def evaluate(self):
"""Evaluates with masks from all images.
Returns:
average_mae: average MAE with float numpy.
"""
mae_total = 0.0
for (true, pred) in zip(self._groundtruths, self._predictions):
# Computes MAE
mae = self._compute_mae(true, pred)
mae_total += mae
average_mae = mae_total / len(self._groundtruths)
return average_mae
def _mask_normalize(self, mask):
return mask/(np.amax(mask)+1e-8)
def _compute_mae(self, true, pred):
h, w = true.shape[0], true.shape[1]
mask1 = self._mask_normalize(true)
mask2 = self._mask_normalize(pred)
sum_error = np.sum(np.absolute((mask1.astype(float) - mask2.astype(float))))
mae_error = sum_error/(float(h)*float(w)+1e-8)
return mae_error
def _convert_to_numpy(self, groundtruths, predictions):
"""Converts tesnors to numpy arrays."""
numpy_groundtruths = groundtruths.numpy()
numpy_predictions = predictions.numpy()
return numpy_groundtruths, numpy_predictions
def update_state(self, groundtruths, predictions):
"""Update segmentation results and groundtruth data.
Args:
groundtruths : Tuple of single Tensor [batch, width, height, 1],
groundtruth masks. range [0, 1]
predictions : Tuple of single Tensor [batch, width, height, 1],
predicted masks. range [0, 1]
"""
groundtruths, predictions = self._convert_to_numpy(groundtruths[0],
predictions[0])
for (true, pred) in zip(groundtruths, predictions):
self._groundtruths.append(true)
self._predictions.append(pred)
class MaxFscore:
"""Maximum F-score metric for basnet."""
def __init__(self):
"""Constructs BASNet evaluation class."""
self.reset_states()
@property
def name(self):
return 'MaxFScore'
def reset_states(self):
"""Resets internal states for a fresh run."""
self._predictions = []
self._groundtruths = []
def result(self):
"""Evaluates segmentation results, and reset_states."""
metric_result = self.evaluate()
# Cleans up the internal variables in order for a fresh eval next time.
self.reset_states()
return metric_result
def evaluate(self):
"""Evaluates with masks from all images.
Returns:
f_max: maximum F-score value.
"""
mybins = np.arange(0, 256)
beta = 0.3
precisions = np.zeros((len(self._groundtruths), len(mybins)-1))
recalls = np.zeros((len(self._groundtruths), len(mybins)-1))
for i, (true, pred) in enumerate(zip(self._groundtruths,
self._predictions)):
# Compute F-score
true = self._mask_normalize(true) * 255.0
pred = self._mask_normalize(pred) * 255.0
pre, rec = self._compute_pre_rec(true, pred, mybins=np.arange(0, 256))
precisions[i, :] = pre
recalls[i, :] = rec
precisions = np.sum(precisions, 0) / (len(self._groundtruths) + 1e-8)
recalls = np.sum(recalls, 0) / (len(self._groundtruths) + 1e-8)
f = (1 + beta) * precisions * recalls / (beta * precisions + recalls + 1e-8)
f_max = np.max(f)
f_max = f_max.astype(np.float32)
return f_max
def _mask_normalize(self, mask):
return mask / (np.amax(mask) + 1e-8)
def _compute_pre_rec(self, true, pred, mybins=np.arange(0, 256)):
"""Computes relaxed precision and recall."""
# pixel number of ground truth foreground regions
gt_num = true[true > 128].size
# mask predicted pixel values in the ground truth foreground region
pp = pred[true > 128]
# mask predicted pixel values in the ground truth bacground region
nn = pred[true <= 128]
pp_hist, _ = np.histogram(pp, bins=mybins)
nn_hist, _ = np.histogram(nn, bins=mybins)
pp_hist_flip = np.flipud(pp_hist)
nn_hist_flip = np.flipud(nn_hist)
pp_hist_flip_cum = np.cumsum(pp_hist_flip)
nn_hist_flip_cum = np.cumsum(nn_hist_flip)
precision = pp_hist_flip_cum / (pp_hist_flip_cum + nn_hist_flip_cum + 1e-8
) # TP/(TP+FP)
recall = pp_hist_flip_cum / (gt_num + 1e-8) # TP/(TP+FN)
precision[np.isnan(precision)] = 0.0
recall[np.isnan(recall)] = 0.0
pre_len = len(precision)
rec_len = len(recall)
return np.reshape(precision, (pre_len)), np.reshape(recall, (rec_len))
def _convert_to_numpy(self, groundtruths, predictions):
"""Converts tesnors to numpy arrays."""
numpy_groundtruths = groundtruths.numpy()
numpy_predictions = predictions.numpy()
return numpy_groundtruths, numpy_predictions
def update_state(self, groundtruths, predictions):
"""Update segmentation results and groundtruth data.
Args:
groundtruths : Tuple of single Tensor [batch, width, height, 1],
groundtruth masks. range [0, 1]
predictions : Tuple of signle Tensor [batch, width, height, 1],
predicted masks. range [0, 1]
"""
groundtruths, predictions = self._convert_to_numpy(groundtruths[0],
predictions[0])
for (true, pred) in zip(groundtruths, predictions):
self._groundtruths.append(true)
self._predictions.append(pred)
class RelaxedFscore:
"""Relaxed F-score metric for basnet."""
def __init__(self):
"""Constructs BASNet evaluation class."""
self.reset_states()
@property
def name(self):
return 'RelaxFScore'
def reset_states(self):
"""Resets internal states for a fresh run."""
self._predictions = []
self._groundtruths = []
def result(self):
"""Evaluates segmentation results, and reset_states."""
metric_result = self.evaluate()
# Cleans up the internal variables in order for a fresh eval next time.
self.reset_states()
return metric_result
def evaluate(self):
"""Evaluates with masks from all images.
Returns:
relax_f: relaxed F-score value.
"""
beta = 0.3
rho = 3
relax_fs = np.zeros(len(self._groundtruths))
erode_kernel = np.ones((3, 3))
for i, (true,
pred) in enumerate(zip(self._groundtruths, self._predictions)):
true = self._mask_normalize(true)
pred = self._mask_normalize(pred)
true = np.squeeze(true, axis=-1)
pred = np.squeeze(pred, axis=-1)
# binary saliency mask (S_bw), threshold 0.5
pred[pred >= 0.5] = 1
pred[pred < 0.5] = 0
# compute eroded binary mask (S_erd) of S_bw
pred_erd = self._compute_erosion(pred, erode_kernel)
pred_xor = np.logical_xor(pred_erd, pred)
# convert True/False to 1/0
pred_xor = pred_xor * 1
# same method for ground truth
true[true >= 0.5] = 1
true[true < 0.5] = 0
true_erd = self._compute_erosion(true, erode_kernel)
true_xor = np.logical_xor(true_erd, true)
true_xor = true_xor * 1
pre, rec = self._compute_relax_pre_rec(true_xor, pred_xor, rho)
relax_fs[i] = (1 + beta) * pre * rec / (beta * pre + rec + 1e-8)
relax_f = np.sum(relax_fs, 0) / (len(self._groundtruths) + 1e-8)
relax_f = relax_f.astype(np.float32)
return relax_f
def _mask_normalize(self, mask):
return mask/(np.amax(mask)+1e-8)
def _compute_erosion(self, mask, kernel):
kernel_full = np.sum(kernel)
mask_erd = scipy.signal.convolve2d(mask, kernel, mode='same')
mask_erd[mask_erd < kernel_full] = 0
mask_erd[mask_erd == kernel_full] = 1
return mask_erd
def _compute_relax_pre_rec(self, true, pred, rho):
"""Computes relaxed precision and recall."""
kernel = np.ones((2 * rho - 1, 2 * rho - 1))
map_zeros = np.zeros_like(pred)
map_ones = np.ones_like(pred)
pred_filtered = scipy.signal.convolve2d(pred, kernel, mode='same')
# True positive for relaxed precision
relax_pre_tp = np.where((true == 1) & (pred_filtered > 0), map_ones,
map_zeros)
true_filtered = scipy.signal.convolve2d(true, kernel, mode='same')
# True positive for relaxed recall
relax_rec_tp = np.where((pred == 1) & (true_filtered > 0), map_ones,
map_zeros)
return np.sum(relax_pre_tp) / np.sum(pred), np.sum(relax_rec_tp) / np.sum(
true)
def _convert_to_numpy(self, groundtruths, predictions):
"""Converts tesnors to numpy arrays."""
numpy_groundtruths = groundtruths.numpy()
numpy_predictions = predictions.numpy()
return numpy_groundtruths, numpy_predictions
def update_state(self, groundtruths, predictions):
"""Update segmentation results and groundtruth data.
Args:
groundtruths : Tuple of single Tensor [batch, width, height, 1],
groundtruth masks. range [0, 1]
predictions : Tuple of single Tensor [batch, width, height, 1],
predicted masks. range [0, 1]
"""
groundtruths, predictions = self._convert_to_numpy(groundtruths[0],
predictions[0])
for (true, pred) in zip(groundtruths, predictions):
self._groundtruths.append(true)
self._predictions.append(pred)
| 10,566 | 31.021212 | 80 | py |
models | models-master/official/projects/basnet/serving/basnet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export module for BASNet."""
import tensorflow as tf
from official.projects.basnet.tasks import basnet
from official.vision.serving import semantic_segmentation
class BASNetModule(semantic_segmentation.SegmentationModule):
"""BASNet Module."""
def _build_model(self):
input_specs = tf.keras.layers.InputSpec(
shape=[self._batch_size] + self._input_image_size + [3])
return basnet.build_basnet_model(
input_specs=input_specs,
model_config=self.params.task.model,
l2_regularizer=None)
def serve(self, images):
"""Cast image to float and run inference.
Args:
images: uint8 Tensor of shape [batch_size, None, None, 3]
Returns:
Tensor holding classification output logits.
"""
with tf.device('cpu:0'):
images = tf.cast(images, dtype=tf.float32)
images = tf.nest.map_structure(
tf.identity,
tf.map_fn(
self._build_inputs, elems=images,
fn_output_signature=tf.TensorSpec(
shape=self._input_image_size + [3], dtype=tf.float32),
parallel_iterations=32
)
)
masks = self.inference_step(images)
keys = sorted(masks.keys())
output = tf.image.resize(
masks[keys[-1]],
self._input_image_size, method='bilinear')
return dict(predicted_masks=output)
| 1,982 | 30.47619 | 74 | py |
models | models-master/official/projects/basnet/serving/export_saved_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Export binary for BASNet.
To export a trained checkpoint in saved_model format (shell script):
EXPERIMENT_TYPE = XX
CHECKPOINT_PATH = XX
EXPORT_DIR_PATH = XX
export_saved_model --experiment=${EXPERIMENT_TYPE} \
--export_dir=${EXPORT_DIR_PATH}/ \
--checkpoint_path=${CHECKPOINT_PATH} \
--batch_size=2 \
--input_image_size=224,224
To serve (python):
export_dir_path = XX
input_type = XX
input_images = XX
imported = tf.saved_model.load(export_dir_path)
model_fn = imported.signatures['serving_default']
output = model_fn(input_images)
"""
from absl import app
from absl import flags
from official.core import exp_factory
from official.modeling import hyperparams
from official.projects.basnet.serving import basnet
from official.vision.serving import export_saved_model_lib
FLAGS = flags.FLAGS
flags.DEFINE_string(
'experiment', None, 'experiment type, e.g. retinanet_resnetfpn_coco')
flags.DEFINE_string('export_dir', None, 'The export directory.')
flags.DEFINE_string('checkpoint_path', None, 'Checkpoint path.')
flags.DEFINE_multi_string(
'config_file',
default=None,
help='YAML/JSON files which specifies overrides. The override order '
'follows the order of args. Note that each file '
'can be used as an override template to override the default parameters '
'specified in Python. If the same parameter is specified in both '
'`--config_file` and `--params_override`, `config_file` will be used '
'first, followed by params_override.')
flags.DEFINE_string(
'params_override', '',
'The JSON/YAML file or string which specifies the parameter to be overriden'
' on top of `config_file` template.')
flags.DEFINE_integer(
'batch_size', None, 'The batch size.')
flags.DEFINE_string(
'input_type', 'image_tensor',
'One of `image_tensor`, `image_bytes`, `tf_example`.')
flags.DEFINE_string(
'input_image_size', '224,224',
'The comma-separated string of two integers representing the height,width '
'of the input to the model.')
def main(_):
params = exp_factory.get_exp_config(FLAGS.experiment)
for config_file in FLAGS.config_file or []:
params = hyperparams.override_params_dict(
params, config_file, is_strict=True)
if FLAGS.params_override:
params = hyperparams.override_params_dict(
params, FLAGS.params_override, is_strict=True)
params.validate()
params.lock()
export_saved_model_lib.export_inference_graph(
input_type=FLAGS.input_type,
batch_size=FLAGS.batch_size,
input_image_size=[int(x) for x in FLAGS.input_image_size.split(',')],
params=params,
checkpoint_path=FLAGS.checkpoint_path,
export_dir=FLAGS.export_dir,
export_module=basnet.BASNetModule(
params=params,
batch_size=FLAGS.batch_size,
input_image_size=[int(x) for x in FLAGS.input_image_size.split(',')]),
export_checkpoint_subdir='checkpoint',
export_saved_model_subdir='saved_model')
if __name__ == '__main__':
app.run(main)
| 3,691 | 33.504673 | 80 | py |
models | models-master/official/projects/basnet/configs/basnet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BASNet configuration definition."""
import dataclasses
import os
from typing import List, Optional, Union
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.vision.configs import common
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""Input config for training."""
output_size: List[int] = dataclasses.field(default_factory=list)
# If crop_size is specified, image will be resized first to
# output_size, then crop of size crop_size will be cropped.
crop_size: List[int] = dataclasses.field(default_factory=list)
input_path: str = ''
global_batch_size: int = 0
is_training: bool = True
dtype: str = 'float32'
shuffle_buffer_size: int = 1000
cycle_length: int = 10
resize_eval_groundtruth: bool = True
groundtruth_padded_size: List[int] = dataclasses.field(default_factory=list)
aug_rand_hflip: bool = True
file_type: str = 'tfrecord'
@dataclasses.dataclass
class BASNetModel(hyperparams.Config):
"""BASNet model config."""
input_size: List[int] = dataclasses.field(default_factory=list)
use_bias: bool = False
norm_activation: common.NormActivation = dataclasses.field(
default_factory=common.NormActivation
)
@dataclasses.dataclass
class Losses(hyperparams.Config):
label_smoothing: float = 0.1
ignore_label: int = 0 # will be treated as background
l2_weight_decay: float = 0.0
use_groundtruth_dimension: bool = True
@dataclasses.dataclass
class BASNetTask(cfg.TaskConfig):
"""The model config."""
model: BASNetModel = dataclasses.field(default_factory=BASNetModel)
train_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=True)
)
validation_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=False)
)
losses: Losses = dataclasses.field(default_factory=Losses)
gradient_clip_norm: float = 0.0
init_checkpoint: Optional[str] = None
init_checkpoint_modules: Union[
str, List[str]] = 'backbone' # all, backbone, and/or decoder
@exp_factory.register_config_factory('basnet')
def basnet() -> cfg.ExperimentConfig:
"""BASNet general."""
return cfg.ExperimentConfig(
task=BASNetModel(),
trainer=cfg.TrainerConfig(),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
# DUTS Dataset
DUTS_TRAIN_EXAMPLES = 10553
DUTS_VAL_EXAMPLES = 5019
DUTS_INPUT_PATH_BASE_TR = 'DUTS_DATASET'
DUTS_INPUT_PATH_BASE_VAL = 'DUTS_DATASET'
@exp_factory.register_config_factory('basnet_duts')
def basnet_duts() -> cfg.ExperimentConfig:
"""Image segmentation on duts with basnet."""
train_batch_size = 64
eval_batch_size = 16
steps_per_epoch = DUTS_TRAIN_EXAMPLES // train_batch_size
config = cfg.ExperimentConfig(
task=BASNetTask(
model=BASNetModel(
input_size=[None, None, 3],
use_bias=True,
norm_activation=common.NormActivation(
activation='relu',
norm_momentum=0.99,
norm_epsilon=1e-3,
use_sync_bn=True)),
losses=Losses(l2_weight_decay=0),
train_data=DataConfig(
input_path=os.path.join(DUTS_INPUT_PATH_BASE_TR,
'tf_record_train'),
file_type='tfrecord',
crop_size=[224, 224],
output_size=[256, 256],
is_training=True,
global_batch_size=train_batch_size,
),
validation_data=DataConfig(
input_path=os.path.join(DUTS_INPUT_PATH_BASE_VAL,
'tf_record_test'),
file_type='tfrecord',
output_size=[256, 256],
is_training=False,
global_batch_size=eval_batch_size,
),
init_checkpoint='gs://cloud-basnet-checkpoints/basnet_encoder_imagenet/ckpt-340306',
init_checkpoint_modules='backbone'
),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=300 * steps_per_epoch,
validation_steps=DUTS_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adam',
'adam': {
'beta_1': 0.9,
'beta_2': 0.999,
'epsilon': 1e-8,
}
},
'learning_rate': {
'type': 'constant',
'constant': {'learning_rate': 0.001}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
| 5,667 | 33.987654 | 94 | py |
models | models-master/official/projects/basnet/configs/basnet_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for basnet configs."""
# pylint: disable=unused-import
from absl.testing import parameterized
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.basnet.configs import basnet as exp_cfg
class BASNetConfigTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(('basnet_duts',))
def test_basnet_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, exp_cfg.BASNetTask)
self.assertIsInstance(config.task.model,
exp_cfg.BASNetModel)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.task.train_data.is_training = None
with self.assertRaises(KeyError):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 1,536 | 34.744186 | 74 | py |
models | models-master/official/projects/basnet/modeling/refunet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RefUNet model."""
import tensorflow as tf
from official.projects.basnet.modeling import nn_blocks
@tf.keras.utils.register_keras_serializable(package='Vision')
class RefUnet(tf.keras.layers.Layer):
"""Residual Refinement Module of BASNet.
Boundary-Aware network (BASNet) were proposed in:
[1] Qin, Xuebin, et al.
Basnet: Boundary-aware salient object detection.
"""
def __init__(self,
activation='relu',
use_sync_bn=False,
use_bias=True,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
**kwargs):
"""Residual Refinement Module of BASNet.
Args:
activation: `str` name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
use_bias: if True, use bias in conv2d.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
**kwargs: keyword arguments to be passed.
"""
super(RefUnet, self).__init__(**kwargs)
self._config_dict = {
'activation': activation,
'use_sync_bn': use_sync_bn,
'use_bias': use_bias,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'kernel_initializer': kernel_initializer,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
}
self._concat = tf.keras.layers.Concatenate(axis=-1)
self._sigmoid = tf.keras.layers.Activation(activation='sigmoid')
self._maxpool = tf.keras.layers.MaxPool2D(
pool_size=2,
strides=2,
padding='valid')
self._upsample = tf.keras.layers.UpSampling2D(
size=2,
interpolation='bilinear')
def build(self, input_shape):
"""Creates the variables of the BASNet decoder."""
conv_op = tf.keras.layers.Conv2D
conv_kwargs = {
'kernel_size': 3,
'strides': 1,
'use_bias': self._config_dict['use_bias'],
'kernel_initializer': self._config_dict['kernel_initializer'],
'kernel_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
}
self._in_conv = conv_op(
filters=64,
padding='same',
**conv_kwargs)
self._en_convs = []
for _ in range(4):
self._en_convs.append(nn_blocks.ConvBlock(
filters=64,
use_sync_bn=self._config_dict['use_sync_bn'],
norm_momentum=self._config_dict['norm_momentum'],
norm_epsilon=self._config_dict['norm_epsilon'],
**conv_kwargs))
self._bridge_convs = []
for _ in range(1):
self._bridge_convs.append(nn_blocks.ConvBlock(
filters=64,
use_sync_bn=self._config_dict['use_sync_bn'],
norm_momentum=self._config_dict['norm_momentum'],
norm_epsilon=self._config_dict['norm_epsilon'],
**conv_kwargs))
self._de_convs = []
for _ in range(4):
self._de_convs.append(nn_blocks.ConvBlock(
filters=64,
use_sync_bn=self._config_dict['use_sync_bn'],
norm_momentum=self._config_dict['norm_momentum'],
norm_epsilon=self._config_dict['norm_epsilon'],
**conv_kwargs))
self._out_conv = conv_op(
filters=1,
padding='same',
**conv_kwargs)
def call(self, inputs):
endpoints = {}
residual = inputs
x = self._in_conv(inputs)
# Top-down
for i, block in enumerate(self._en_convs):
x = block(x)
endpoints[str(i)] = x
x = self._maxpool(x)
# Bridge
for i, block in enumerate(self._bridge_convs):
x = block(x)
# Bottom-up
for i, block in enumerate(self._de_convs):
dtype = x.dtype
x = tf.cast(x, tf.float32)
x = self._upsample(x)
x = tf.cast(x, dtype)
x = self._concat([endpoints[str(3-i)], x])
x = block(x)
x = self._out_conv(x)
residual = tf.cast(residual, dtype=x.dtype)
output = self._sigmoid(x + residual)
self._output_specs = output.get_shape()
return output
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
return self._output_specs
| 5,415 | 31.626506 | 78 | py |
models | models-master/official/projects/basnet/modeling/basnet_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for basnet network."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.basnet.modeling import basnet_model
from official.projects.basnet.modeling import refunet
class BASNetNetworkTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(256),
(512),
)
def test_basnet_network_creation(
self, input_size):
"""Test for creation of a segmentation network."""
inputs = np.random.rand(2, input_size, input_size, 3)
tf.keras.backend.set_image_data_format('channels_last')
backbone = basnet_model.BASNetEncoder()
decoder = basnet_model.BASNetDecoder()
refinement = refunet.RefUnet()
model = basnet_model.BASNetModel(
backbone=backbone,
decoder=decoder,
refinement=refinement
)
sigmoids = model(inputs)
levels = sorted(sigmoids.keys())
self.assertAllEqual(
[2, input_size, input_size, 1],
sigmoids[levels[-1]].numpy().shape)
def test_serialize_deserialize(self):
"""Validate the network can be serialized and deserialized."""
backbone = basnet_model.BASNetEncoder()
decoder = basnet_model.BASNetDecoder()
refinement = refunet.RefUnet()
model = basnet_model.BASNetModel(
backbone=backbone,
decoder=decoder,
refinement=refinement
)
config = model.get_config()
new_model = basnet_model.BASNetModel.from_config(config)
# Validate that the config can be forced to JSON.
_ = new_model.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(model.get_config(), new_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,356 | 29.61039 | 79 | py |
models | models-master/official/projects/basnet/modeling/nn_blocks.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains common building blocks for BasNet model."""
import tensorflow as tf
from official.modeling import tf_utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class ConvBlock(tf.keras.layers.Layer):
"""A (Conv+BN+Activation) block."""
def __init__(self,
filters,
strides,
dilation_rate=1,
kernel_size=3,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
use_bias=False,
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
**kwargs):
"""A vgg block with BN after convolutions.
Args:
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
dilation_rate: `int`, dilation rate for conv layers.
kernel_size: `int`, kernel size of conv layers.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
activation: `str` name of the activation function.
use_bias: `bool`, whether or not use bias in conv layers.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
**kwargs: keyword arguments to be passed.
"""
super(ConvBlock, self).__init__(**kwargs)
self._config_dict = {
'filters': filters,
'kernel_size': kernel_size,
'strides': strides,
'dilation_rate': dilation_rate,
'kernel_initializer': kernel_initializer,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
'activation': activation,
'use_sync_bn': use_sync_bn,
'use_bias': use_bias,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon
}
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation_fn = tf_utils.get_activation(activation)
def build(self, input_shape):
conv_kwargs = {
'padding': 'same',
'use_bias': self._config_dict['use_bias'],
'kernel_initializer': self._config_dict['kernel_initializer'],
'kernel_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
}
self._conv0 = tf.keras.layers.Conv2D(
filters=self._config_dict['filters'],
kernel_size=self._config_dict['kernel_size'],
strides=self._config_dict['strides'],
dilation_rate=self._config_dict['dilation_rate'],
**conv_kwargs)
self._norm0 = self._norm(
axis=self._bn_axis,
momentum=self._config_dict['norm_momentum'],
epsilon=self._config_dict['norm_epsilon'])
super(ConvBlock, self).build(input_shape)
def get_config(self):
return self._config_dict
def call(self, inputs, training=None):
x = self._conv0(inputs)
x = self._norm0(x)
x = self._activation_fn(x)
return x
@tf.keras.utils.register_keras_serializable(package='Vision')
class ResBlock(tf.keras.layers.Layer):
"""A residual block."""
def __init__(self,
filters,
strides,
use_projection=False,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
use_sync_bn=False,
use_bias=False,
norm_momentum=0.99,
norm_epsilon=0.001,
**kwargs):
"""Initializes a residual block with BN after convolutions.
Args:
filters: An `int` number of filters for the first two convolutions. Note
that the third and final convolution will use 4 times as many filters.
strides: An `int` block stride. If greater than 1, this block will
ultimately downsample the input.
use_projection: A `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
kernel_initializer: A `str` of kernel_initializer for convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d.
Default to None.
activation: A `str` name of the activation function.
use_sync_bn: A `bool`. If True, use synchronized batch normalization.
use_bias: A `bool`. If True, use bias in conv2d.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
**kwargs: Additional keyword arguments to be passed.
"""
super(ResBlock, self).__init__(**kwargs)
self._config_dict = {
'filters': filters,
'strides': strides,
'use_projection': use_projection,
'kernel_initializer': kernel_initializer,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
'activation': activation,
'use_sync_bn': use_sync_bn,
'use_bias': use_bias,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon
}
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation_fn = tf_utils.get_activation(activation)
def build(self, input_shape):
conv_kwargs = {
'filters': self._config_dict['filters'],
'padding': 'same',
'use_bias': self._config_dict['use_bias'],
'kernel_initializer': self._config_dict['kernel_initializer'],
'kernel_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
}
if self._config_dict['use_projection']:
self._shortcut = tf.keras.layers.Conv2D(
filters=self._config_dict['filters'],
kernel_size=1,
strides=self._config_dict['strides'],
use_bias=self._config_dict['use_bias'],
kernel_initializer=self._config_dict['kernel_initializer'],
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'])
self._norm0 = self._norm(
axis=self._bn_axis,
momentum=self._config_dict['norm_momentum'],
epsilon=self._config_dict['norm_epsilon'])
self._conv1 = tf.keras.layers.Conv2D(
kernel_size=3,
strides=self._config_dict['strides'],
**conv_kwargs)
self._norm1 = self._norm(
axis=self._bn_axis,
momentum=self._config_dict['norm_momentum'],
epsilon=self._config_dict['norm_epsilon'])
self._conv2 = tf.keras.layers.Conv2D(
kernel_size=3,
strides=1,
**conv_kwargs)
self._norm2 = self._norm(
axis=self._bn_axis,
momentum=self._config_dict['norm_momentum'],
epsilon=self._config_dict['norm_epsilon'])
super(ResBlock, self).build(input_shape)
def get_config(self):
return self._config_dict
def call(self, inputs, training=None):
shortcut = inputs
if self._config_dict['use_projection']:
shortcut = self._shortcut(shortcut)
shortcut = self._norm0(shortcut)
x = self._conv1(inputs)
x = self._norm1(x)
x = self._activation_fn(x)
x = self._conv2(x)
x = self._norm2(x)
return self._activation_fn(x + shortcut)
| 9,180 | 36.321138 | 80 | py |
models | models-master/official/projects/basnet/modeling/basnet_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build BASNet models."""
from typing import Mapping
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.basnet.modeling import nn_blocks
from official.vision.modeling.backbones import factory
# Specifications for BASNet encoder.
# Each element in the block configuration is in the following format:
# (num_filters, stride, block_repeats, maxpool)
BASNET_ENCODER_SPECS = [
(64, 1, 3, 0), # ResNet-34,
(128, 2, 4, 0), # ResNet-34,
(256, 2, 6, 0), # ResNet-34,
(512, 2, 3, 1), # ResNet-34,
(512, 1, 3, 1), # BASNet,
(512, 1, 3, 0), # BASNet,
]
# Specifications for BASNet decoder.
# Each element in the block configuration is in the following format:
# (conv1_nf, conv1_dr, convm_nf, convm_dr, conv2_nf, conv2_dr, scale_factor)
# nf : num_filters, dr : dilation_rate
BASNET_BRIDGE_SPECS = [
(512, 2, 512, 2, 512, 2, 32), # Sup0, Bridge
]
BASNET_DECODER_SPECS = [
(512, 1, 512, 2, 512, 2, 32), # Sup1, stage6d
(512, 1, 512, 1, 512, 1, 16), # Sup2, stage5d
(512, 1, 512, 1, 256, 1, 8), # Sup3, stage4d
(256, 1, 256, 1, 128, 1, 4), # Sup4, stage3d
(128, 1, 128, 1, 64, 1, 2), # Sup5, stage2d
(64, 1, 64, 1, 64, 1, 1) # Sup6, stage1d
]
@tf.keras.utils.register_keras_serializable(package='Vision')
class BASNetModel(tf.keras.Model):
"""A BASNet model.
Boundary-Awar network (BASNet) were proposed in:
[1] Qin, Xuebin, et al.
Basnet: Boundary-aware salient object detection.
Input images are passed through backbone first. Decoder network is then
applied, and finally, refinement module is applied on the output of the
decoder network.
"""
def __init__(self,
backbone,
decoder,
refinement=None,
**kwargs):
"""BASNet initialization function.
Args:
backbone: a backbone network. basnet_encoder.
decoder: a decoder network. basnet_decoder.
refinement: a module for salient map refinement.
**kwargs: keyword arguments to be passed.
"""
super(BASNetModel, self).__init__(**kwargs)
self._config_dict = {
'backbone': backbone,
'decoder': decoder,
'refinement': refinement,
}
self.backbone = backbone
self.decoder = decoder
self.refinement = refinement
def call(self, inputs, training=None): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
features = self.backbone(inputs)
if self.decoder:
features = self.decoder(features)
levels = sorted(features.keys())
new_key = str(len(levels))
if self.refinement:
features[new_key] = self.refinement(features[levels[-1]])
return features
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(backbone=self.backbone)
if self.decoder is not None:
items.update(decoder=self.decoder)
if self.refinement is not None:
items.update(refinement=self.refinement)
return items
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@tf.keras.utils.register_keras_serializable(package='Vision')
class BASNetEncoder(tf.keras.Model):
"""BASNet encoder."""
def __init__(
self,
input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]),
activation='relu',
use_sync_bn=False,
use_bias=True,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
**kwargs):
"""BASNet encoder initialization function.
Args:
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
activation: `str` name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
use_bias: if True, use bias in conv2d.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
**kwargs: keyword arguments to be passed.
"""
self._input_specs = input_specs
self._use_sync_bn = use_sync_bn
self._use_bias = use_bias
self._activation = activation
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
if tf.keras.backend.image_data_format() == 'channels_last':
bn_axis = -1
else:
bn_axis = 1
# Build BASNet Encoder.
inputs = tf.keras.Input(shape=input_specs.shape[1:])
x = tf.keras.layers.Conv2D(
filters=64, kernel_size=3, strides=1,
use_bias=self._use_bias, padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
inputs)
x = self._norm(
axis=bn_axis, momentum=norm_momentum, epsilon=norm_epsilon)(
x)
x = tf_utils.get_activation(activation)(x)
endpoints = {}
for i, spec in enumerate(BASNET_ENCODER_SPECS):
x = self._block_group(
inputs=x,
filters=spec[0],
strides=spec[1],
block_repeats=spec[2],
name='block_group_l{}'.format(i + 2))
endpoints[str(i)] = x
if spec[3]:
x = tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding='same')(x)
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super(BASNetEncoder, self).__init__(
inputs=inputs, outputs=endpoints, **kwargs)
def _block_group(self,
inputs,
filters,
strides,
block_repeats=1,
name='block_group'):
"""Creates one group of residual blocks for the BASNet encoder model.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]`.
filters: `int` number of filters for the first convolution of the layer.
strides: `int` stride to use for the first convolution of the layer. If
greater than 1, this layer will downsample the input.
block_repeats: `int` number of blocks contained in the layer.
name: `str`name for the block.
Returns:
The output `Tensor` of the block layer.
"""
x = nn_blocks.ResBlock(
filters=filters,
strides=strides,
use_projection=True,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
use_bias=self._use_bias,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
inputs)
for _ in range(1, block_repeats):
x = nn_blocks.ResBlock(
filters=filters,
strides=1,
use_projection=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
use_bias=self._use_bias,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
x)
return tf.identity(x, name=name)
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_backbone_builder('basnet_encoder')
def build_basnet_encoder(
input_specs: tf.keras.layers.InputSpec,
model_config,
l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds BASNet Encoder backbone from a config."""
backbone_type = model_config.backbone.type
norm_activation_config = model_config.norm_activation
assert backbone_type == 'basnet_encoder', (f'Inconsistent backbone type '
f'{backbone_type}')
return BASNetEncoder(
input_specs=input_specs,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
use_bias=norm_activation_config.use_bias,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
@tf.keras.utils.register_keras_serializable(package='Vision')
class BASNetDecoder(tf.keras.layers.Layer):
"""BASNet decoder."""
def __init__(self,
activation='relu',
use_sync_bn=False,
use_bias=True,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
**kwargs):
"""BASNet decoder initialization function.
Args:
activation: `str` name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
use_bias: if True, use bias in convolution.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
**kwargs: keyword arguments to be passed.
"""
super(BASNetDecoder, self).__init__(**kwargs)
self._config_dict = {
'activation': activation,
'use_sync_bn': use_sync_bn,
'use_bias': use_bias,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'kernel_initializer': kernel_initializer,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
}
self._activation = tf_utils.get_activation(activation)
self._concat = tf.keras.layers.Concatenate(axis=-1)
self._sigmoid = tf.keras.layers.Activation(activation='sigmoid')
def build(self, input_shape):
"""Creates the variables of the BASNet decoder."""
conv_op = tf.keras.layers.Conv2D
conv_kwargs = {
'kernel_size': 3,
'strides': 1,
'use_bias': self._config_dict['use_bias'],
'kernel_initializer': self._config_dict['kernel_initializer'],
'kernel_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
}
self._out_convs = []
self._out_usmps = []
# Bridge layers.
self._bdg_convs = []
for spec in BASNET_BRIDGE_SPECS:
blocks = []
for j in range(3):
blocks.append(nn_blocks.ConvBlock(
filters=spec[2*j],
dilation_rate=spec[2*j+1],
activation='relu',
use_sync_bn=self._config_dict['use_sync_bn'],
norm_momentum=0.99,
norm_epsilon=0.001,
**conv_kwargs))
self._bdg_convs.append(blocks)
self._out_convs.append(conv_op(
filters=1,
padding='same',
**conv_kwargs))
self._out_usmps.append(tf.keras.layers.UpSampling2D(
size=spec[6],
interpolation='bilinear'
))
# Decoder layers.
self._dec_convs = []
for spec in BASNET_DECODER_SPECS:
blocks = []
for j in range(3):
blocks.append(nn_blocks.ConvBlock(
filters=spec[2*j],
dilation_rate=spec[2*j+1],
activation='relu',
use_sync_bn=self._config_dict['use_sync_bn'],
norm_momentum=0.99,
norm_epsilon=0.001,
**conv_kwargs))
self._dec_convs.append(blocks)
self._out_convs.append(conv_op(
filters=1,
padding='same',
**conv_kwargs))
self._out_usmps.append(tf.keras.layers.UpSampling2D(
size=spec[6],
interpolation='bilinear'
))
def call(self, backbone_output: Mapping[str, tf.Tensor]):
"""Forward pass of the BASNet decoder.
Args:
backbone_output: A `dict` of tensors
- key: A `str` of the level of the multilevel features.
- values: A `tf.Tensor` of the feature map tensors, whose shape is
[batch, height_l, width_l, channels].
Returns:
sup: A `dict` of tensors
- key: A `str` of the level of the multilevel features.
- values: A `tf.Tensor` of the feature map tensors, whose shape is
[batch, height_l, width_l, channels].
"""
levels = sorted(backbone_output.keys(), reverse=True)
sup = {}
x = backbone_output[levels[0]]
for blocks in self._bdg_convs:
for block in blocks:
x = block(x)
sup['0'] = x
for i, blocks in enumerate(self._dec_convs):
x = self._concat([x, backbone_output[levels[i]]])
for block in blocks:
x = block(x)
sup[str(i+1)] = x
x = tf.keras.layers.UpSampling2D(
size=2,
interpolation='bilinear'
)(x)
for i, (conv, usmp) in enumerate(zip(self._out_convs, self._out_usmps)):
sup[str(i)] = self._sigmoid(usmp(conv(sup[str(i)])))
self._output_specs = {
str(order): sup[str(order)].get_shape()
for order in range(0, len(BASNET_DECODER_SPECS))
}
return sup
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {order: TensorShape} pairs for the model output."""
return self._output_specs
| 15,095 | 33.076749 | 139 | py |
models | models-master/official/projects/basnet/tasks/basnet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BASNet task definition."""
from typing import Optional
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import base_task
from official.core import input_reader
from official.core import task_factory
from official.projects.basnet.configs import basnet as exp_cfg
from official.projects.basnet.evaluation import metrics as basnet_metrics
from official.projects.basnet.losses import basnet_losses
from official.projects.basnet.modeling import basnet_model
from official.projects.basnet.modeling import refunet
from official.vision.dataloaders import segmentation_input
def build_basnet_model(
input_specs: tf.keras.layers.InputSpec,
model_config: exp_cfg.BASNetModel,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None):
"""Builds BASNet model."""
norm_activation_config = model_config.norm_activation
backbone = basnet_model.BASNetEncoder(
input_specs=input_specs,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
use_bias=model_config.use_bias,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
decoder = basnet_model.BASNetDecoder(
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
use_bias=model_config.use_bias,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
refinement = refunet.RefUnet(
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
use_bias=model_config.use_bias,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
model = basnet_model.BASNetModel(backbone, decoder, refinement)
return model
@task_factory.register_task_cls(exp_cfg.BASNetTask)
class BASNetTask(base_task.Task):
"""A task for basnet."""
def build_model(self):
"""Builds basnet model."""
input_specs = tf.keras.layers.InputSpec(
shape=[None] + self.task_config.model.input_size)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (tf.keras.regularizers.l2(
l2_weight_decay / 2.0) if l2_weight_decay else None)
model = build_basnet_model(
input_specs=input_specs,
model_config=self.task_config.model,
l2_regularizer=l2_regularizer)
return model
def initialize(self, model: tf.keras.Model):
"""Loads pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if 'all' in self.task_config.init_checkpoint_modules:
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.restore(ckpt_dir_or_file)
status.assert_consumed()
else:
ckpt_items = {}
if 'backbone' in self.task_config.init_checkpoint_modules:
ckpt_items.update(backbone=model.backbone)
if 'decoder' in self.task_config.init_checkpoint_modules:
ckpt_items.update(decoder=model.decoder)
ckpt = tf.train.Checkpoint(**ckpt_items)
status = ckpt.restore(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(self,
params: exp_cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None):
"""Builds BASNet input."""
ignore_label = self.task_config.losses.ignore_label
decoder = segmentation_input.Decoder()
parser = segmentation_input.Parser(
output_size=params.output_size,
crop_size=params.crop_size,
ignore_label=ignore_label,
aug_rand_hflip=params.aug_rand_hflip,
dtype=params.dtype)
reader = input_reader.InputReader(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self, label, model_outputs, aux_losses=None):
"""Hybrid loss proposed in BASNet.
Args:
label: label.
model_outputs: Output logits of the classifier.
aux_losses: auxiliarly loss tensors, i.e. `losses` in keras.Model.
Returns:
The total loss tensor.
"""
basnet_loss_fn = basnet_losses.BASNetLoss()
total_loss = basnet_loss_fn(model_outputs, label['masks'])
if aux_losses:
total_loss += tf.add_n(aux_losses)
return total_loss
def build_metrics(self, training=False):
"""Gets streaming metrics for training/validation."""
evaluations = []
if training:
evaluations = []
else:
self.mae_metric = basnet_metrics.MAE()
self.maxf_metric = basnet_metrics.MaxFscore()
self.relaxf_metric = basnet_metrics.RelaxedFscore()
return evaluations
def train_step(self, inputs, model, optimizer, metrics=None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(features, training=True)
# Casting output layer as float32 is necessary when mixed_precision is
# mixed_float16 or mixed_bfloat16 to ensure output is casted as float32.
outputs = tf.nest.map_structure(
lambda x: tf.cast(x, tf.float32), outputs)
# Computes per-replica loss.
loss = self.build_losses(
model_outputs=outputs, label=labels, aux_losses=model.losses)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient before apply_gradients when LossScaleOptimizer is
# used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
# Apply gradient clipping.
if self.task_config.gradient_clip_norm > 0:
grads, _ = tf.clip_by_global_norm(
grads, self.task_config.gradient_clip_norm)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
return logs
def validation_step(self, inputs, model, metrics=None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
outputs = self.inference_step(features, model)
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
loss = 0
logs = {self.loss: loss}
levels = sorted(outputs.keys())
logs.update(
{self.mae_metric.name: (labels['masks'], outputs[levels[-1]])})
logs.update(
{self.maxf_metric.name: (labels['masks'], outputs[levels[-1]])})
logs.update(
{self.relaxf_metric.name: (labels['masks'], outputs[levels[-1]])})
return logs
def inference_step(self, inputs, model):
"""Performs the forward step."""
return model(inputs, training=False)
def aggregate_logs(self, state=None, step_outputs=None):
if state is None:
self.mae_metric.reset_states()
self.maxf_metric.reset_states()
self.relaxf_metric.reset_states()
state = self.mae_metric
self.mae_metric.update_state(
step_outputs[self.mae_metric.name][0],
step_outputs[self.mae_metric.name][1])
self.maxf_metric.update_state(
step_outputs[self.maxf_metric.name][0],
step_outputs[self.maxf_metric.name][1])
self.relaxf_metric.update_state(
step_outputs[self.relaxf_metric.name][0],
step_outputs[self.relaxf_metric.name][1])
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
result = {}
result['MAE'] = self.mae_metric.result()
result['maxF'] = self.maxf_metric.result()
result['relaxF'] = self.relaxf_metric.result()
return result
| 9,905 | 34.378571 | 79 | py |
models | models-master/official/projects/basnet/losses/basnet_losses.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses used for BASNet models."""
import tensorflow as tf
EPSILON = 1e-5
class BASNetLoss:
"""BASNet hybrid loss."""
def __init__(self):
self._binary_crossentropy = tf.keras.losses.BinaryCrossentropy(
reduction=tf.keras.losses.Reduction.SUM, from_logits=False)
self._ssim = tf.image.ssim
def __call__(self, sigmoids, labels):
levels = sorted(sigmoids.keys())
labels_bce = tf.squeeze(labels, axis=-1)
labels = tf.cast(labels, tf.float32)
bce_losses = []
ssim_losses = []
iou_losses = []
for level in levels:
bce_losses.append(
self._binary_crossentropy(labels_bce, sigmoids[level]))
ssim_losses.append(
1 - self._ssim(sigmoids[level], labels, max_val=1.0))
iou_losses.append(
self._iou_loss(sigmoids[level], labels))
total_bce_loss = tf.math.add_n(bce_losses)
total_ssim_loss = tf.math.add_n(ssim_losses)
total_iou_loss = tf.math.add_n(iou_losses)
total_loss = total_bce_loss + total_ssim_loss + total_iou_loss
total_loss = total_loss / len(levels)
return total_loss
def _iou_loss(self, sigmoids, labels):
total_iou_loss = 0
intersection = tf.reduce_sum(sigmoids[:, :, :, :] * labels[:, :, :, :])
union = tf.reduce_sum(sigmoids[:, :, :, :]) + tf.reduce_sum(
labels[:, :, :, :]) - intersection
iou = intersection / union
total_iou_loss += 1-iou
return total_iou_loss
| 2,047 | 30.030303 | 75 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.