repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/official/projects/pointpillars/tasks/pointpillars_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pointpillars."""
from absl.testing import parameterized
import tensorflow as tf
from official.core import exp_factory
from official.modeling import optimization
from official.projects.pointpillars.configs import pointpillars as cfg
from official.projects.pointpillars.tasks import pointpillars
def _mock_inputs(model_config):
batch_size = 1
image_config = model_config.image
pillars_config = model_config.pillars
pillars = tf.ones([
batch_size, pillars_config.num_pillars,
pillars_config.num_points_per_pillar,
pillars_config.num_features_per_point
], dtype=tf.float32)
indices = tf.ones([
batch_size, pillars_config.num_pillars, 2
], dtype=tf.int32)
features = {
'pillars': pillars,
'indices': indices,
}
image_height = image_config.height
image_width = image_config.width
num_anchors_per_location = len(model_config.anchors)
cls_targets = {}
box_targets = {}
attribute_targets = {}
for attr in model_config.head.attribute_heads:
attribute_targets[attr.name] = {}
total_num_anchors = 0
for level in range(model_config.min_level, model_config.max_level + 1):
stride = 2**level
h_i = int(image_height / stride)
w_i = int(image_width / stride)
cls_targets[str(level)] = tf.ones(
[batch_size, h_i, w_i, num_anchors_per_location], dtype=tf.int32)
box_targets[str(level)] = tf.ones(
[batch_size, h_i, w_i, num_anchors_per_location * 4], dtype=tf.float32)
for attr in model_config.head.attribute_heads:
attribute_targets[attr.name][str(level)] = tf.ones(
[batch_size, h_i, w_i, num_anchors_per_location], dtype=tf.float32)
total_num_anchors += h_i * w_i * num_anchors_per_location
cls_weights = tf.ones([batch_size, total_num_anchors], dtype=tf.float32)
box_weights = tf.ones([batch_size, total_num_anchors], dtype=tf.float32)
image_shape = tf.ones([batch_size, 2], dtype=tf.int32)
labels = {
'cls_targets': cls_targets,
'box_targets': box_targets,
'attribute_targets': attribute_targets,
'cls_weights': cls_weights,
'box_weights': box_weights,
'anchor_boxes': None,
'image_shape': image_shape,
}
return features, labels
class PointPillarsTaskTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(True),
(False),
)
def test_train_and_eval(self, is_training):
exp_config = exp_factory.get_exp_config('pointpillars_baseline')
task_config = exp_config.task
# modify config to suit local testing
task_config.model.image.height = 32
task_config.model.image.width = 32
task_config.model.pillars.num_pillars = 2
task_config.model.pillars.num_points_per_pillar = 3
task_config.model.pillars.num_features_per_point = 4
task_config.model.anchors = [cfg.Anchor(length=2.1, width=1.2)]
task_config.train_data.global_batch_size = 1
task_config.train_data.shuffle_buffer_size = 2
task_config.validation_data.global_batch_size = 1
task_config.validation_data.shuffle_buffer_size = 2
task_config.use_wod_metrics = False
task = pointpillars.PointPillarsTask(task_config)
inputs = _mock_inputs(task_config.model)
model = task.build_model()
opt_factory = optimization.OptimizerFactory(
exp_config.trainer.optimizer_config)
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
metrics = task.build_metrics(training=is_training)
if is_training:
logs = task.train_step(inputs, model, optimizer, metrics=metrics)
else:
logs = task.validation_step(inputs, model, metrics=metrics)
self.assertIn('loss', logs)
if __name__ == '__main__':
tf.test.main()
| 4,320 | 35.618644 | 79 | py |
models | models-master/official/projects/pix2seq/utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pix2Seq required utility library."""
import copy
import tensorflow as tf
from official.projects.pix2seq.configs import pix2seq as pix2seq_cfg
def decode_object_seq_to_bbox(
logits, pred_seq, quantization_bins, coord_vocab_shift
):
"""Decode objects (label & bbox) for seq from `build_response_seq_from_bbox`.
Assume yxyxc format with truncation at the end for any uneven extra tokens.
Replace class tokens with argmax instead of sampling.
Args:
logits: `float` output logits in shape of (bsz, max_seq_len, vocab_size).
pred_seq: `int` pred sequence in shape of (bsz, max_seq_len).
quantization_bins: `int` for bins.
coord_vocab_shift: `int`, shifting coordinates by a specified integer.
Returns:
pred_class: `int` of shape (bsz, max_instances_per_image).
pred_bbox: `float` of shape (bsz, max_instances_per_image, 4).
pred_score: `float` of shape (bsz, max_instances_per_image).
"""
_, seqlen, vocab_size = logits.shape
if seqlen % 5 != 0: # truncate out the last few tokens.
pred_seq = pred_seq[..., : -(seqlen % 5)]
logits = logits[..., : -(seqlen % 5), :]
pred_class_p = tf.nn.softmax(logits)[:, 4::5] # (bsz, instances, vocab_size)
mask_s1 = [0.0] * (pix2seq_cfg.BASE_VOCAB_SHIFT) # reserved.
mask_s2 = [1.0] * (
coord_vocab_shift - pix2seq_cfg.BASE_VOCAB_SHIFT
) # labels.
mask_s3 = [0.0] * (vocab_size - coord_vocab_shift) # coordinates and others.
mask = tf.constant(mask_s1 + mask_s2 + mask_s3)
pred_class = tf.argmax(pred_class_p * mask[tf.newaxis, tf.newaxis, :], -1)
pred_num = logits[:, 4::5] * mask[tf.newaxis, tf.newaxis, :]
pred_num = tf.reduce_sum(
tf.cast(
tf.math.greater(tf.math.reduce_max(pred_num, axis=-1), 0), tf.int32
),
axis=-1,
)
pred_score = tf.reduce_sum(
pred_class_p * tf.one_hot(pred_class, vocab_size), -1
)
pred_class = tf.maximum(pred_class - pix2seq_cfg.BASE_VOCAB_SHIFT, 0)
pred_bbox = seq_to_bbox(pred_seq - coord_vocab_shift, quantization_bins)
return pred_class, pred_bbox, pred_score, pred_num
def seq_to_bbox(seq, quantization_bins, seq_format='yxyx_name'):
"""Returns [0, 1] normalized yxyx bbox from token sequence."""
# [batch, 5*num_instances]
assert seq.shape.rank == 2, seq.shape.as_list()
# [batch, num_instances, 1]
if seq_format.startswith('name'):
ymin = tf.expand_dims(seq[:, 1::5], -1)
xmin = tf.expand_dims(seq[:, 2::5], -1)
ymax = tf.expand_dims(seq[:, 3::5], -1)
xmax = tf.expand_dims(seq[:, 4::5], -1)
else:
ymin = tf.expand_dims(seq[:, 0::5], -1)
xmin = tf.expand_dims(seq[:, 1::5], -1)
ymax = tf.expand_dims(seq[:, 2::5], -1)
xmax = tf.expand_dims(seq[:, 3::5], -1)
if seq_format in ['name_cycxhw', 'cycxhw_name']:
ycnt, xcnt, ysize, xsize = ymin, xmin, ymax, xmax
ymin = ycnt - ysize // 2
xmin = xcnt - xsize // 2
ymax = ycnt + ysize // 2
xmax = xcnt + xsize // 2
quantized_box = tf.concat([ymin, xmin, ymax, xmax], axis=-1)
quantized_box = dequantize(quantized_box, quantization_bins)
return tf.minimum(tf.maximum(quantized_box, 0), 1)
def quantize(coordinates, bins):
"""Quantization of (normalized) coordinates in [0, 1]."""
coordinates = tf.cast(tf.round(coordinates * (bins - 1)), tf.int64)
coordinates = tf.clip_by_value(coordinates, 0, bins - 1)
return coordinates
def dequantize(boxes, bins):
"""Dequantization of discrete tokens of coordinates in [0, bins-1]."""
boxes = tf.cast(boxes, tf.float32)
boxes = boxes / (bins - 1)
return boxes
def truncation_bbox(bbox):
return tf.minimum(tf.maximum(bbox, 0.0), 1.0)
def jitter_bbox(bbox, min_range=0.0, max_range=0.05, truncation=True):
"""Jitter the bbox.
Args:
bbox: `float` tensor of shape (n, 4), ranged between 0 and 1.
min_range: min jitter range in ratio to bbox size.
max_range: max jitter range in ratio to bbox size.
truncation: whether to truncate resulting bbox to remain [0, 1].
Note: To create noisy positives, set min_range=0, which enables truncated
normal distribution. max_range <=0.05: noisy duplicates, <=0.02: near
duplicate. To create negatives: set min_range >= 0.1 to avoid false
negatives; suggested max_range <=0.4 to avoid too much randomness.
Returns:
jittered bbox.
"""
n = tf.shape(bbox)[0]
h = bbox[:, 2] - bbox[:, 0]
w = bbox[:, 3] - bbox[:, 1]
noise = tf.stack([h, w, h, w], -1)
if min_range == 0:
noise_rate = tf.random.truncated_normal(
[n, 4], mean=0, stddev=max_range / 2.0, dtype=bbox.dtype
)
else:
noise_rate1 = tf.random.uniform([n, 4], min_range, max_range)
noise_rate2 = tf.random.uniform([n, 4], -max_range, -min_range)
selector = tf.cast(tf.random.uniform([n, 4], 0, 1) < 0.5, tf.float32)
noise_rate = noise_rate1 * selector + noise_rate2 * (1.0 - selector)
bbox = bbox + noise * noise_rate
return truncation_bbox(bbox) if truncation else bbox
def shift_bbox(bbox, truncation=True):
"""Shifting bbox without changing the bbox height and width."""
n = tf.shape(bbox)[0]
# randomly sample new bbox centers.
cy = tf.random.uniform([n, 1], 0, 1)
cx = tf.random.uniform([n, 1], 0, 1)
h = bbox[:, 2:3] - bbox[:, 0:1]
w = bbox[:, 3:4] - bbox[:, 1:2]
bbox = tf.concat(
[
cy - tf.abs(h) / 2,
cx - tf.abs(w) / 2,
cy + tf.abs(h) / 2,
cx + tf.abs(w) / 2,
],
-1,
)
return truncation_bbox(bbox) if truncation else bbox
def random_bbox(n, max_size=1.0, truncation=True):
"""Generating random n bbox with max size specified within [0, 1]."""
cy = tf.random.uniform([n, 1], 0, 1)
cx = tf.random.uniform([n, 1], 0, 1)
h = tf.random.truncated_normal([n, 1], 0, max_size / 2.0)
w = tf.random.truncated_normal([n, 1], 0, max_size / 2.0)
bbox = tf.concat(
[
cy - tf.abs(h) / 2,
cx - tf.abs(w) / 2,
cy + tf.abs(h) / 2,
cx + tf.abs(w) / 2,
],
-1,
)
return truncation_bbox(bbox) if truncation else bbox
def augment_bbox(bbox, bbox_label, max_jitter, n_noise_bbox, mix_rate=0.0):
"""Augment bbox.
There are two types of noises to add:
1. Bad bbox: jittered bbox, shifted bbox, or random bbox.
2. Duplicated bbox.
Args:
bbox: `float` tensor of shape (n, 4), ranged between 0 and 1.
bbox_label: `int` tensor of shape (n,).
max_jitter: `float` scalar specifying max jitter range for positive bbox.
n_noise_bbox: `int` scalar tensor specifying size of the extra noise to add.
mix_rate: `float`. Probability of injecting the bad bbox in the middle of
original bbox, followed by dup bbox at the end; otherwise simply append
all noises at the end of original bbox.
Returns:
bbox_new: augmented bbox that's `n_noise_bbox` larger than original.
label_new: new label for bbox_new.
is_real: a `float` 0/1 indicator for whether a bbox is real.
is_noise: a `float` 0/1 indicator for whether a bbox is extra.
"""
n = tf.shape(bbox)[0]
dup_bbox_size = tf.random.uniform([], 0, n_noise_bbox + 1, dtype=tf.int32)
dup_bbox_size = 0 if n == 0 else dup_bbox_size
bad_bbox_size = n_noise_bbox - dup_bbox_size
multiplier = 1 if n == 0 else tf.math.floordiv(n_noise_bbox, n) + 1
bbox_tiled = tf.tile(bbox, [multiplier, 1])
# Create bad bbox.
bbox_tiled = tf.random.shuffle(bbox_tiled)
bad_bbox_shift = shift_bbox(bbox_tiled[:bad_bbox_size], truncation=True)
bad_bbox_random = random_bbox(bad_bbox_size, max_size=1.0, truncation=True)
bad_bbox = tf.concat([bad_bbox_shift, bad_bbox_random], 0)
bad_bbox = tf.random.shuffle(bad_bbox)[:bad_bbox_size]
bad_bbox_label = tf.zeros([bad_bbox_size], dtype=bbox_label.dtype) + (
pix2seq_cfg.FAKE_CLASS_TOKEN - pix2seq_cfg.BASE_VOCAB_SHIFT
)
# Create dup bbox.
bbox_tiled = tf.random.shuffle(bbox_tiled)
dup_bbox = jitter_bbox(
bbox_tiled[:dup_bbox_size], min_range=0, max_range=0.1, truncation=True
)
dup_bbox_label = tf.zeros([dup_bbox_size], dtype=bbox_label.dtype) + (
pix2seq_cfg.FAKE_CLASS_TOKEN - pix2seq_cfg.BASE_VOCAB_SHIFT
)
# Jitter positive bbox.
if max_jitter > 0:
bbox = jitter_bbox(bbox, min_range=0, max_range=max_jitter, truncation=True)
if tf.random.uniform([]) < mix_rate:
# Mix the bbox with bad bbox, appneded by dup bbox.
bbox_new = tf.concat([bbox, bad_bbox], 0)
bbox_new_label = tf.concat([bbox_label, bad_bbox_label], 0)
idx = tf.random.shuffle(tf.range(tf.shape(bbox_new)[0]))
bbox_new = tf.gather(bbox_new, idx)
bbox_new_label = tf.gather(bbox_new_label, idx)
bbox_new = tf.concat([bbox_new, dup_bbox], 0)
bbox_new_label = tf.concat([bbox_new_label, dup_bbox_label], 0)
else:
# Merge bad bbox and dup bbox into noise bbox.
noise_bbox = tf.concat([bad_bbox, dup_bbox], 0)
noise_bbox_label = tf.concat([bad_bbox_label, dup_bbox_label], 0)
if n_noise_bbox > 0:
idx = tf.random.shuffle(tf.range(n_noise_bbox))
noise_bbox = tf.gather(noise_bbox, idx)
noise_bbox_label = tf.gather(noise_bbox_label, idx)
# Append noise bbox to bbox and create mask.
bbox_new = tf.concat([bbox, noise_bbox], 0)
bbox_new_label = tf.concat([bbox_label, noise_bbox_label], 0)
return bbox_new, bbox_new_label
def inject_noise_bbox(boxes, classes, max_instances_per_image):
boxes = copy.copy(boxes)
classes = copy.copy(classes)
num_instances = tf.shape(boxes)[0]
if num_instances < max_instances_per_image:
n_noise_bbox = max_instances_per_image - num_instances
boxes, classes = augment_bbox(boxes, classes, 0.0, n_noise_bbox)
return boxes, classes
def build_prompt_seq_from_task_id(
task_vocab_id: int, response_seq=None, prompt_shape=None
):
"""Build prompt seq just using task id.
Args:
task_vocab_id: Vocab id for the task.
response_seq: an (optional) discerte target sequen with shape (bsz, ..., k).
prompt_shape: an (optional) tuple for prompt shape. One and only one of
`response_seq` and `prompt_shape` should be specified.
Returns:
discrete input sequence of task id with shape (bsz, ..., 1).
"""
task_id = tf.constant(task_vocab_id)
if response_seq is not None:
prompt_seq = tf.zeros_like(response_seq[..., :1]) + tf.cast(
task_id, response_seq.dtype
)
if prompt_shape is not None:
assert response_seq is None, 'double specification'
prompt_seq = tf.zeros(prompt_shape, dtype=tf.int64) + tf.cast(
task_id, dtype=tf.int64
)
return prompt_seq
def clip_or_pad_to_max_len(data, max_len, dim):
"""Pad the data tensor to max length on dim."""
shape = shape_as_list(data)
padding_shape, clipped_shape = copy.copy(shape), copy.copy(shape)
padding_shape[dim] = tf.maximum(0, max_len - padding_shape[dim])
clipped_shape[dim] = tf.minimum(clipped_shape[dim], max_len)
paddings = tf.zeros(padding_shape, dtype=data.dtype)
clipped_data = tf.slice(data, tf.zeros_like(shape), clipped_shape)
return tf.concat([clipped_data, paddings], axis=dim)
def shape_as_list(t):
# Assumes rank of `t` is statically known.
shape = t.shape.as_list()
dynamic_shape = tf.shape(t)
return [
shape[i] if shape[i] is not None else dynamic_shape[i]
for i in range(len(shape))
]
def reorder_object_instances(boxes, classes, order):
"""Must be called _before_ padding to max instances."""
if order == 'none':
return classes, boxes
assert boxes.shape.rank == 2, 'Must be unbatched'
boxes = tf.reshape(boxes, [-1, 2, 2])
if order == 'random':
idx = tf.random.shuffle(tf.range(tf.shape(boxes)[0]))
elif order == 'area':
areas = tf.cast(
tf.reduce_prod(boxes[:, 1, :] - boxes[:, 0, :], axis=1), tf.int64
) # approximated size.
idx = tf.argsort(areas, direction='DESCENDING')
elif order == 'dist2ori':
y, x = boxes[:, 0], boxes[:, 1] # using top-left corner.
dist2ori = tf.square(y) + tf.square(x)
idx = tf.argsort(dist2ori, direction='ASCENDING')
else:
raise ValueError('Unknown order {}'.format(order))
boxes = tf.reshape(boxes, [-1, 4])
boxes = tf.gather(boxes, idx)
classes = tf.gather(classes, idx)
return boxes, classes
def scale_points(points, scale):
"""Scales points.
Args:
points: Tensor with shape [num_points * 2], [batch, num_points * 2] or
[batch, instances, num_points * 2] where points are organized in (y, x)
format.
scale: Tensor with shape [2] or [batch, 2].
Returns:
Tensor with same shape as points.
"""
points_orig = points
orig_shape = tf.shape(points)
coords_len = points.shape[-1]
if points.shape.rank == 1:
points = tf.reshape(points, [coords_len // 2, 2])
elif points.shape.rank == 2:
points = tf.reshape(points, [-1, coords_len // 2, 2])
else:
points = tf.reshape(points, [-1, orig_shape[1], coords_len // 2, 2])
scale = tf.expand_dims(scale, -2)
points = points * scale
points = tf.reshape(points, orig_shape)
points = preserve_reserved_tokens(points, points_orig)
return points
def preserve_reserved_tokens(points, points_orig):
"""Preserve reserved tokens in points according to points_orig."""
return replace_reserved_tokens(
points, points_orig, dict(zip(pix2seq_cfg.FLOATS, pix2seq_cfg.FLOATS))
)
def replace_reserved_tokens(seq, ref_seq, replacements):
for key, replacement in replacements.items():
seq = tf.where(
tf.equal(ref_seq, key), tf.constant(replacement, seq.dtype), seq
)
return seq
def tf_float32(t):
return tf.cast(t, tf.float32)
| 14,169 | 34.692695 | 80 | py |
models | models-master/official/projects/pix2seq/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Model Garden Vision training driver."""
from absl import app
from absl import flags
import gin
from official.common import distribute_utils
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
# pylint: disable=unused-import
from official.projects.pix2seq.configs import pix2seq
from official.projects.pix2seq.tasks import pix2seq_task
# pylint: enable=unused-import
FLAGS = flags.FLAGS
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu,
)
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir,
)
train_utils.save_gin_config(FLAGS.mode, model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
flags.mark_flags_as_required(['experiment', 'mode', 'model_dir'])
app.run(main)
| 2,673 | 35.135135 | 80 | py |
models | models-master/official/projects/pix2seq/configs/pix2seq.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pix2Seq configurations."""
import dataclasses
import os
from typing import List, Optional, Union
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.vision.configs import backbones
from official.vision.configs import common
# Vocab.
# A shared vocab among tasks and its structure -
# Special tokens: [0, 99).
# Class tokens: [100, coord_vocab_shift).
# Coordinate tokens: [coord_vocab_shift, text_vocab_shift).
PADDING_TOKEN = 0
# 10-29 reserved for task id.
FAKE_CLASS_TOKEN = 30
FAKE_TEXT_TOKEN = 30 # Same token to represent fake class and fake text.
SEPARATOR_TOKEN = 40
INVISIBLE_TOKEN = 41
BASE_VOCAB_SHIFT = 100
# Floats used to represent padding and separator in the flat list of polygon
# coords, and invisibility in the key points.
PADDING_FLOAT = -1.0
SEPARATOR_FLOAT = -2.0
INVISIBLE_FLOAT = -3.0
FLOATS = [PADDING_FLOAT, SEPARATOR_FLOAT, INVISIBLE_FLOAT]
TOKENS = [PADDING_TOKEN, SEPARATOR_TOKEN, INVISIBLE_TOKEN]
FLOAT_TO_TOKEN = dict(zip(FLOATS, TOKENS))
TOKEN_TO_FLOAT = dict(zip(TOKENS, FLOATS))
OD_ID = 10
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""Input config for training."""
input_path: str = ''
tfds_name: str = ''
tfds_split: str = 'train'
global_batch_size: int = 0
is_training: bool = False
dtype: str = 'float32'
decoder: common.DataDecoder = dataclasses.field(
default_factory=common.DataDecoder
)
shuffle_buffer_size: int = 10000
file_type: str = 'tfrecord'
drop_remainder: bool = True
aug_scale_min: float = 1.0
aug_scale_max: float = 1.0
aug_color_jitter_strength: float = 0.0
label_shift: int = 0
@dataclasses.dataclass
class Losses(hyperparams.Config):
noise_bbox_weight: float = 1.0
eos_token_weight: float = 0.1
l2_weight_decay: float = 1e-4
@dataclasses.dataclass
class Pix2Seq(hyperparams.Config):
"""Pix2Seq model definations."""
max_num_instances: int = 100
hidden_size: int = 256
num_encoder_layers: int = 6
num_decoder_layers: int = 6
vocab_size: int = 3000
use_cls_token: bool = False
shared_decoder_embedding: bool = True
decoder_output_bias: bool = True
input_size: List[int] = dataclasses.field(default_factory=list)
backbone: backbones.Backbone = dataclasses.field(
default_factory=lambda: backbones.Backbone( # pylint: disable=g-long-lambda
type='resnet',
resnet=backbones.ResNet(model_id=50, bn_trainable=False),
)
)
norm_activation: common.NormActivation = dataclasses.field(
default_factory=common.NormActivation
)
backbone_endpoint_name: str = '5'
drop_path: float = 0.1
drop_units: float = 0.1
drop_att: float = 0.0
norm_first: bool = True
@dataclasses.dataclass
class Pix2SeqTask(cfg.TaskConfig):
model: Pix2Seq = dataclasses.field(default_factory=Pix2Seq)
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(
default_factory=cfg.DataConfig
)
losses: Losses = dataclasses.field(default_factory=Losses)
init_checkpoint: Optional[str] = None
init_checkpoint_modules: Union[str, List[str]] = 'all' # all, backbone
annotation_file: Optional[str] = None
per_category_metrics: bool = False
coord_vocab_shift: int = 1000
quantization_bins: int = 1000
COCO_INPUT_PATH_BASE = 'coco'
COCO_TRAIN_EXAMPLES = 118287
COCO_VAL_EXAMPLES = 5000
@exp_factory.register_config_factory('pix2seq_r50_coco')
def pix2seq_r50_coco() -> cfg.ExperimentConfig:
"""Config to get results that matches the paper."""
train_batch_size = 128
eval_batch_size = 16
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
train_steps = 80 * steps_per_epoch
config = cfg.ExperimentConfig(
task=Pix2SeqTask(
init_checkpoint='',
init_checkpoint_modules='backbone',
annotation_file=os.path.join(
COCO_INPUT_PATH_BASE, 'instances_val2017.json'
),
model=Pix2Seq(
input_size=[640, 640, 3],
norm_activation=common.NormActivation(
norm_momentum=0.9,
norm_epsilon=1e-5,
use_sync_bn=True),
backbone=backbones.Backbone(
type='resnet', resnet=backbones.ResNet(model_id=50)
),
),
losses=Losses(l2_weight_decay=0.0),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
shuffle_buffer_size=train_batch_size * 10,
aug_scale_min=0.3,
aug_scale_max=2.0,
aug_color_jitter_strength=0.0
),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size,
drop_remainder=True,
),
),
trainer=cfg.TrainerConfig(
train_steps=train_steps,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=5 * steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
max_to_keep=10,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw_experimental',
'adamw_experimental': {
'epsilon': 1.0e-08,
'weight_decay': 0.05,
'beta_1': 0.9,
'beta_2': 0.95,
'global_clipnorm': -1.0,
},
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 0.0001,
'end_learning_rate': 0.000001,
'offset': 0,
'power': 1.0,
'decay_steps': 80 * steps_per_epoch,
},
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 2 * steps_per_epoch,
'warmup_learning_rate': 0,
},
},
}),
),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
],
)
return config
| 7,249 | 31.366071 | 82 | py |
models | models-master/official/projects/pix2seq/configs/pix2seq_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Pix2Seq config."""
# pylint: disable=unused-import
from absl.testing import parameterized
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.pix2seq.configs import pix2seq as exp_cfg
class Pix2SeqTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(('pix2seq_r50_coco',))
def test_pix2seq_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, exp_cfg.Pix2SeqTask)
self.assertIsInstance(config.task.train_data, cfg.DataConfig)
config.task.train_data.is_training = None
with self.assertRaises(KeyError):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 1,444 | 34.243902 | 74 | py |
models | models-master/official/projects/pix2seq/dataloaders/pix2seq_input_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Pix2Seq input."""
import io
# Import libraries
import numpy as np
from PIL import Image
import tensorflow as tf
from official.projects.pix2seq.dataloaders import pix2seq_input
from official.vision.dataloaders import tf_example_decoder
IMAGE_KEY = 'image/encoded'
LABEL_KEY = 'image/object/class/label'
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))):
value = (
value.numpy()
) # BytesList won't unpack a string from an EagerTensor.
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def fake_seq_example():
# Create fake data.
random_image = np.random.randint(0, 256, size=(480, 640, 3), dtype=np.uint8)
random_image = Image.fromarray(random_image)
labels = [42, 5]
with io.BytesIO() as buffer:
random_image.save(buffer, format='JPEG')
raw_image_bytes = buffer.getvalue()
xmins = [0.23, 0.15]
xmaxs = [0.54, 0.60]
ymins = [0.11, 0.5]
ymaxs = [0.86, 0.72]
feature = {
'image/encoded': _bytes_feature(raw_image_bytes),
'image/height': _int64_feature(480),
'image/width': _int64_feature(640),
'image/object/bbox/xmin': tf.train.Feature(
float_list=tf.train.FloatList(value=xmins)
),
'image/object/bbox/xmax': tf.train.Feature(
float_list=tf.train.FloatList(value=xmaxs)
),
'image/object/bbox/ymin': tf.train.Feature(
float_list=tf.train.FloatList(value=ymins)
),
'image/object/bbox/ymax': tf.train.Feature(
float_list=tf.train.FloatList(value=ymaxs)
),
'image/object/class/label': tf.train.Feature(
int64_list=tf.train.Int64List(value=labels)
),
'image/object/area': tf.train.Feature(
float_list=tf.train.FloatList(value=[1., 2.])
),
'image/object/is_crowd': tf.train.Feature(
int64_list=tf.train.Int64List(value=[0, 0])
),
'image/source_id': _bytes_feature(b'123'),
}
# Create a Features message using tf.train.Example.
example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
return example_proto, labels
class Pix2SeqParserTest(tf.test.TestCase):
def test_image_input_train(self):
decoder = tf_example_decoder.TfExampleDecoder()
parser = pix2seq_input.Parser(
eos_token_weight=0.1,
output_size=[640, 640],
max_num_boxes=10,
).parse_fn(True)
seq_example, _ = fake_seq_example()
input_tensor = tf.constant(seq_example.SerializeToString())
decoded_tensors = decoder.decode(input_tensor)
output_tensor = parser(decoded_tensors)
image, _ = output_tensor
self.assertAllEqual(image.shape, (640, 640, 3))
def test_image_input_eval(self):
decoder = tf_example_decoder.TfExampleDecoder()
parser = pix2seq_input.Parser(
eos_token_weight=0.1,
output_size=[640, 640],
max_num_boxes=10,
).parse_fn(False)
seq_example, _ = fake_seq_example()
input_tensor = tf.constant(seq_example.SerializeToString())
decoded_tensors = decoder.decode(input_tensor)
output_tensor = parser(decoded_tensors)
image, _ = output_tensor
self.assertAllEqual(image.shape, (640, 640, 3))
if __name__ == '__main__':
tf.test.main()
| 4,227 | 29.637681 | 79 | py |
models | models-master/official/projects/pix2seq/dataloaders/pix2seq_input.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""COCO data loader for Pix2Seq."""
from typing import Tuple
import tensorflow as tf
from official.projects.pix2seq import utils
from official.projects.pix2seq.configs import pix2seq as pix2seq_cfg
from official.projects.simclr.dataloaders import preprocess_ops as simclr_preprocess_ops
from official.vision.dataloaders import parser
from official.vision.ops import box_ops
from official.vision.ops import preprocess_ops
RESIZE_SCALES = (480, 512, 544, 576, 608, 640)
class Parser(parser.Parser):
"""Parse an image and its annotations into a dictionary of tensors."""
def __init__(
self,
eos_token_weight: float = 0.1,
output_size: Tuple[int, int] = (1333, 1333),
max_num_boxes: int = 100,
aug_rand_hflip=True,
aug_scale_min=0.3,
aug_scale_max=2.0,
aug_color_jitter_strength: float = 0.5,
aug_color_jitter_impl='simclrv2',
coord_vocab_shift=1000,
quantization_bins=1000,
skip_crowd_during_training=True,
label_shift: int = 0,
):
self._eos_token_weight = eos_token_weight
self._output_size = output_size
self._max_num_boxes = max_num_boxes
self._aug_rand_hflip = aug_rand_hflip
self._aug_scale_min = aug_scale_min
self._aug_scale_max = aug_scale_max
self._aug_color_jitter_strength = aug_color_jitter_strength
self._aug_color_jitter_impl = aug_color_jitter_impl
self._coord_vocab_shift = coord_vocab_shift
self._quantization_bins = quantization_bins
self._skip_crowd_during_training = skip_crowd_during_training
self._label_shift = label_shift
def _parse_train_data(self, data):
"""Parses data for training and evaluation."""
classes = data['groundtruth_classes'] + self._label_shift
boxes = data['groundtruth_boxes']
is_crowds = data['groundtruth_is_crowd']
# Skips annotations with `is_crowd` = True.
if self._skip_crowd_during_training:
num_groundtruths = tf.shape(classes)[0]
with tf.control_dependencies([num_groundtruths, is_crowds]):
indices = tf.cond(
tf.greater(tf.size(is_crowds), 0),
lambda: tf.where(tf.logical_not(is_crowds))[:, 0],
lambda: tf.cast(tf.range(num_groundtruths), tf.int64),
)
classes = tf.gather(classes, indices)
boxes = tf.gather(boxes, indices)
# Gets original image.
image = data['image']
# Normalizes image with mean and std pixel values.
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Color jitter.
image = simclr_preprocess_ops.random_color_jitter(
image=image,
color_jitter_strength=self._aug_color_jitter_strength,
impl=self._aug_color_jitter_impl,
)
image = tf.clip_by_value(image, 0.0, 1.0)
image, boxes, _ = preprocess_ops.random_horizontal_flip(image, boxes)
image_shape = tf.shape(image)[:2]
boxes = box_ops.denormalize_boxes(boxes, image_shape)
image, image_info = preprocess_ops.resize_and_crop_image(
image,
self._output_size,
padded_size=self._output_size,
aug_scale_min=self._aug_scale_min,
aug_scale_max=self._aug_scale_max)
boxes = preprocess_ops.resize_and_crop_boxes(
boxes, image_info[2, :], image_info[1, :], image_info[3, :]
)
boxes = box_ops.normalize_boxes(boxes, image_info[1, :])
# Filters out ground truth boxes that are all zeros.
indices = box_ops.get_non_empty_box_indices(boxes)
boxes = tf.gather(boxes, indices)
classes = tf.gather(classes, indices)
boxes, classes = utils.reorder_object_instances(boxes, classes, 'random')
boxes, classes = utils.inject_noise_bbox(
boxes, classes, self._max_num_boxes
)
boxes = utils.clip_or_pad_to_max_len(boxes, self._max_num_boxes, 0)
classes = utils.clip_or_pad_to_max_len(classes, self._max_num_boxes, 0)
outputs = self.build_response_seq_from_bbox(
boxes, classes, self._coord_vocab_shift, self._quantization_bins
)
response_seq, response_seq_class_m, token_weights = outputs
prompt_seq = utils.build_prompt_seq_from_task_id(
pix2seq_cfg.OD_ID, response_seq
) # (1)
input_seq = tf.concat([prompt_seq, response_seq_class_m], -1)
target_seq = tf.concat([prompt_seq, response_seq], -1)
backgrnd_val = 0.3
image = backgrnd_val + tf.image.pad_to_bounding_box(
image - backgrnd_val, 0, 0, self._output_size[0], self._output_size[1]
)
input_seq = utils.clip_or_pad_to_max_len(
input_seq, self._max_num_boxes * 5 + 1, -1)
target_seq = utils.clip_or_pad_to_max_len(
target_seq, self._max_num_boxes * 5 + 1, -1
)
input_seq, target_seq = input_seq[..., :-1], target_seq[..., 1:]
token_weights = utils.clip_or_pad_to_max_len(
token_weights, self._max_num_boxes * 5, -1
)
# Assign lower weights for ending/padding tokens.
token_weights = tf.where(
target_seq == pix2seq_cfg.PADDING_TOKEN,
tf.zeros_like(token_weights) + self._eos_token_weight,
token_weights,
)
labels = {
'targets': target_seq,
'weights': token_weights,
'inputs': input_seq,
}
return image, labels
def build_response_seq_from_bbox(
self,
bbox,
label,
coord_vocab_shift,
quantization_bins,
noise_bbox_weight=1.0,
class_label_corruption='rand_n_fake_cls',
):
"""Build target seq from bounding bboxes for object detection.
Objects are serialized using the format of yxyxc.
Args:
bbox: `float` bounding box of shape (n, 4).
label: `int` label of shape (n).
coord_vocab_shift: `int`, shifting coordinates by a specified integer.
quantization_bins: `int`.
noise_bbox_weight: `float` on the token weights for noise bboxes.
class_label_corruption: `string` specifying how labels are corrupted for
the input_seq.
Returns:
discrete sequences with shape (seqlen).
"""
# Bbox and label quantization.
is_padding = tf.expand_dims(tf.equal(label, 0), -1)
quantized_bbox = utils.quantize(bbox, quantization_bins)
quantized_bbox = quantized_bbox + coord_vocab_shift
quantized_bbox = tf.where(
is_padding, tf.zeros_like(quantized_bbox), quantized_bbox
)
new_label = tf.expand_dims(label + pix2seq_cfg.BASE_VOCAB_SHIFT, -1)
new_label = tf.where(is_padding, tf.zeros_like(new_label), new_label)
lb_shape = tf.shape(new_label)
# Bbox and label serialization.
response_seq = tf.concat([quantized_bbox, new_label], axis=-1)
response_seq = tf.reshape(response_seq, [-1])
rand_cls = pix2seq_cfg.BASE_VOCAB_SHIFT + tf.random.uniform(
lb_shape,
0,
coord_vocab_shift - pix2seq_cfg.BASE_VOCAB_SHIFT,
dtype=new_label.dtype,
)
fake_cls = pix2seq_cfg.FAKE_CLASS_TOKEN + tf.zeros_like(new_label)
rand_n_fake_cls = tf.where(
tf.random.uniform(lb_shape) > 0.5, rand_cls, fake_cls
)
real_n_fake_cls = tf.where(
tf.random.uniform(lb_shape) > 0.5, new_label, fake_cls
)
real_n_rand_n_fake_cls = tf.where(
tf.random.uniform(lb_shape) > 0.5, new_label, rand_n_fake_cls
)
label_mapping = {
'none': new_label,
'rand_cls': rand_cls,
'real_n_fake_cls': real_n_fake_cls,
'rand_n_fake_cls': rand_n_fake_cls,
'real_n_rand_n_fake_cls': real_n_rand_n_fake_cls,
}
new_label_m = label_mapping[class_label_corruption]
new_label_m = tf.where(is_padding, tf.zeros_like(new_label_m), new_label_m)
response_seq_class_m = tf.concat([quantized_bbox, new_label_m], axis=-1)
response_seq_class_m = tf.reshape(response_seq_class_m, [-1])
# Get token weights.
is_real = tf.cast(
tf.not_equal(new_label, pix2seq_cfg.FAKE_CLASS_TOKEN), tf.float32
)
bbox_weight = tf.tile(is_real, [1, 4])
label_weight = is_real + (1.0 - is_real) * noise_bbox_weight
token_weights = tf.concat([bbox_weight, label_weight], -1)
token_weights = tf.reshape(token_weights, [-1])
return response_seq, response_seq_class_m, token_weights
def _parse_eval_data(self, data):
"""Parses data for training and evaluation."""
classes = data['groundtruth_classes'] + self._label_shift
boxes = data['groundtruth_boxes']
is_crowd = data['groundtruth_is_crowd']
# Gets original image and its size.
image = data['image']
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image_shape = tf.shape(image)[:2]
boxes = box_ops.denormalize_boxes(boxes, image_shape)
gt_boxes = boxes
image, image_info = preprocess_ops.resize_image(
image, min(self._output_size), max(self._output_size)
)
boxes = preprocess_ops.resize_and_crop_boxes(
boxes, image_info[2, :], image_info[1, :], image_info[3, :]
)
scale = tf.cast(
tf.concat([self._output_size, self._output_size], -1), boxes.dtype
)
boxes = boxes / scale
# Filters out ground truth boxes that are all zeros.
indices = box_ops.get_non_empty_box_indices(boxes)
boxes = tf.gather(boxes, indices)
classes = tf.gather(classes, indices)
is_crowd = tf.gather(is_crowd, indices)
prompt_seq = tf.constant([pix2seq_cfg.OD_ID], dtype=tf.int64)
backgrnd_val = 0.3
image = backgrnd_val + tf.image.pad_to_bounding_box(
image - backgrnd_val, 0, 0, self._output_size[0], self._output_size[1]
)
labels = {
'prompt': prompt_seq,
'classes': preprocess_ops.clip_or_pad_to_fixed_size(
classes, self._max_num_boxes
),
'boxes': preprocess_ops.clip_or_pad_to_fixed_size(
boxes, self._max_num_boxes
),
}
labels.update({
'id': int(data['source_id']),
'image_info': image_info,
'is_crowd': preprocess_ops.clip_or_pad_to_fixed_size(
is_crowd, self._max_num_boxes
),
'gt_boxes': preprocess_ops.clip_or_pad_to_fixed_size(
gt_boxes, self._max_num_boxes
),
})
return image, labels
| 10,680 | 34.722408 | 88 | py |
models | models-master/official/projects/pix2seq/modeling/transformer_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for transformer."""
import tensorflow as tf
from official.projects.pix2seq.modeling import transformer
class TransformerTest(tf.test.TestCase):
def test_transformer_encoder(self):
batch_size = 2
sequence_length = 100
feature_size = 256
model = transformer.TransformerEncoder(
num_layers=3,
dim=feature_size,
mlp_ratio=4.0,
num_heads=2,
)
input_tensor = tf.ones((batch_size, sequence_length, feature_size))
out = model(input_tensor, mask=None, training=False)
self.assertAllEqual(
tf.shape(out), (batch_size, sequence_length, feature_size)
)
def test_transformer_encoder_get_config(self):
model = transformer.TransformerEncoder(
num_layers=2,
dim=256,
mlp_ratio=4.0,
num_heads=2,
)
config = model.get_config()
expected_config = {
'name': 'transformer_encoder',
'trainable': True,
'dtype': 'float32',
'num_layers': 2,
'dim': 256,
'mlp_ratio': 4.0,
'num_heads': 2,
'drop_path': 0.1,
'drop_units': 0.1,
'drop_att': 0.0,
'self_attention': True,
'use_ffn_ln': False,
'ln_scale_shift': True,
}
self.assertAllEqual(expected_config, config)
def test_transformer_decoder_layer(self):
batch_size = 2
sequence_length = 100
memory_length = 200
feature_size = 256
model = transformer.TransformerDecoderLayer(
dim=feature_size,
mlp_ratio=4.0,
num_heads=2
)
input_tensor = tf.ones((batch_size, sequence_length, feature_size))
memory = tf.ones((batch_size, memory_length, feature_size))
self_attention_mask = tf.ones(
(batch_size, sequence_length, sequence_length), dtype=tf.int64
)
out, _ = model(
input_tensor,
memory,
None,
self_attention_mask,
None,
training=False,
)
self.assertAllEqual(
tf.shape(out), (batch_size, sequence_length, feature_size)
)
def test_transformer_decoder_layer_get_config(self):
model = transformer.TransformerDecoderLayer(
dim=256,
mlp_ratio=4.0,
num_heads=2
)
config = model.get_config()
expected_config = {
'name': 'transformer_decoder_layer',
'trainable': True,
'dtype': 'float32',
'dim': 256,
'mlp_ratio': 4.0,
'num_heads': 2,
'drop_path': 0.1,
'drop_units': 0.1,
'drop_att': 0.0,
'dim_x_att': None,
'self_attention': True,
'cross_attention': True,
'use_mlp': True,
'use_enc_ln': False,
'use_ffn_ln': False,
'ln_scale_shift': True,
}
self.assertAllEqual(expected_config, config)
def test_transformer_decoder(self):
batch_size = 2
sequence_length = 100
memory_length = 200
feature_size = 256
num_layers = 3
model = transformer.TransformerDecoder(
num_layers=num_layers,
dim=feature_size,
mlp_ratio=4.0,
num_heads=2,
)
input_tensor = tf.ones((batch_size, sequence_length, feature_size))
memory = tf.ones((batch_size, memory_length, feature_size))
self_attention_mask = tf.ones(
(batch_size, sequence_length, sequence_length), dtype=tf.int64
)
out, cache = model(
input_tensor, memory, None, self_attention_mask, None, training=False
)
self.assertAllEqual(
tf.shape(out), (batch_size, sequence_length, feature_size)
)
self.assertAllEqual(
tf.shape(cache), (num_layers, batch_size, sequence_length, feature_size)
)
def test_transformer_decoder_get_config(self):
num_layers = 2
num_attention_heads = 2
intermediate_size = 256
model = transformer.TransformerDecoder(
num_layers=num_layers,
dim=intermediate_size,
mlp_ratio=4.0,
num_heads=num_attention_heads,
)
config = model.get_config()
expected_config = {
'name': 'transformer_decoder',
'trainable': True,
'dtype': 'float32',
'num_layers': 2,
'dim': 256,
'mlp_ratio': 4.0,
'num_heads': 2,
'drop_path': 0.1,
'drop_units': 0.1,
'drop_att': 0.0,
'dim_x_att': None,
'self_attention': True,
'cross_attention': True,
'use_mlp': True,
'use_enc_ln': False,
'use_ffn_ln': False,
'ln_scale_shift': True,
}
self.assertAllEqual(expected_config, config)
if __name__ == '__main__':
tf.test.main()
| 5,184 | 27.333333 | 80 | py |
models | models-master/official/projects/pix2seq/modeling/pix2seq_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements A Language Modeling Framework for Object Detection.
Model paper: https://arxiv.org/abs/2109.10852
This module does not support Keras de/serialization. Please use
tf.train.Checkpoint for object based saving and loading and tf.saved_model.save
for graph serializaiton.
"""
import math
from typing import Any, List, Mapping, Optional, Union
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.pix2seq.modeling import transformer
def get_shape(x):
static = x.shape.as_list()
dynamic = tf.shape(x)
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
def get_variable_initializer(name=None):
if name is None:
return tf.keras.initializers.TruncatedNormal(mean=0.0, stddev=0.02)
def add_seq_pos_emb(
self, pos_encoding, max_seq_len, dim, name_prefix=None, initializer=None
):
"""Add seq_pos_emb variable/tensor to model instance referenced by `self`."""
if name_prefix is None:
name_prefix = self.name
if initializer is None:
initializer = get_variable_initializer()
if pos_encoding == "learned":
self.seq_pos_emb = self.add_weight(
shape=(max_seq_len + 1, dim),
initializer=initializer,
name="%s/seq_pos_embedding" % name_prefix,
)
# (gunho) currently only 'learned' positional encoding is supported
elif pos_encoding == "sin_cos":
self.seq_pos_emb = None
else:
raise ValueError("Unknown pos encoding %s" % pos_encoding)
def add_vocab_token_emb(
self,
vocab_size,
dim,
shared_embedding,
output_bias,
name_prefix=None,
initializer=None,
):
"""Add token_embedding variable to model instance referenced by `self`."""
if name_prefix is None:
name_prefix = self.name
if initializer is None:
initializer = get_variable_initializer()
if shared_embedding:
self.token_embedding = self.add_weight(
shape=[vocab_size, dim],
initializer=initializer,
name="%s/token_embedding" % name_prefix,
)
else:
self.inp_token_embedding = self.add_weight(
shape=[vocab_size, dim],
initializer=initializer,
name="%s/inp_token_embedding" % name_prefix,
)
self.outp_token_embedding = self.add_weight(
shape=[vocab_size, dim],
initializer=initializer,
name="%s/outp_token_embedding" % name_prefix,
)
if output_bias:
self.outp_bias = self.add_weight(
shape=[vocab_size],
initializer=initializer,
name="%s/outp_bias" % name_prefix,
)
def get_ar_mask(seq_len, dtype=tf.float32):
"""Get autoregressive causal mask so the model cannot attends to the future.
Args:
seq_len: a `int` or `int` tensor specifying the sequence length.
dtype: tf data type for the return tensor.
Returns:
tensor of shape [1, 1, seq_len, seq_len] with ones for locations to be
masked out.
"""
valid_locs = tf.linalg.band_part(
tf.ones([seq_len, seq_len], dtype=dtype), -1, 0
)
valid_locs = tf.reshape(valid_locs, [1, 1, seq_len, seq_len])
return 1.0 - valid_locs
def position_embedding_sine(
attention_mask,
num_pos_features=256,
temperature=10000.0,
normalize=True,
scale=2 * math.pi,
):
"""Sine-based positional embeddings for 2D images.
Args:
attention_mask: a `bool` Tensor specifying the size of the input image to
the Transformer and which elements are padded, of size [batch_size,
height, width]
num_pos_features: a `int` specifying the number of positional features,
should be equal to the hidden size of the Transformer network
temperature: a `float` specifying the temperature of the positional
embedding. Any type that is converted to a `float` can also be accepted.
normalize: a `bool` determining whether the positional embeddings should be
normalized between [0, scale] before application of the sine and cos
functions.
scale: a `float` if normalize is True specifying the scale embeddings before
application of the embedding function.
Returns:
embeddings: a `float` tensor of the same shape as input_tensor specifying
the positional embeddings based on sine features.
"""
if num_pos_features % 2 != 0:
raise ValueError(
"Number of embedding features (num_pos_features) must be even when "
"column and row embeddings are concatenated."
)
num_pos_features = num_pos_features // 2
# Produce row and column embeddings based on total size of the image
# <tf.float>[batch_size, height, width]
attention_mask = tf.cast(attention_mask, tf.float32)
row_embedding = tf.cumsum(attention_mask, 1)
col_embedding = tf.cumsum(attention_mask, 2)
if normalize:
eps = 1e-6
row_embedding = row_embedding / (row_embedding[:, -1:, :] + eps) * scale
col_embedding = col_embedding / (col_embedding[:, :, -1:] + eps) * scale
dim_t = tf.range(num_pos_features, dtype=row_embedding.dtype)
dim_t = tf.pow(temperature, 2 * (dim_t // 2) / num_pos_features)
# Creates positional embeddings for each row and column position
# <tf.float>[batch_size, height, width, num_pos_features]
pos_row = tf.expand_dims(row_embedding, -1) / dim_t
pos_col = tf.expand_dims(col_embedding, -1) / dim_t
pos_row = tf.stack(
[tf.sin(pos_row[:, :, :, 0::2]), tf.cos(pos_row[:, :, :, 1::2])], axis=4
)
pos_col = tf.stack(
[tf.sin(pos_col[:, :, :, 0::2]), tf.cos(pos_col[:, :, :, 1::2])], axis=4
)
final_shape = tf_utils.get_shape_list(pos_row)[:3] + [-1]
pos_row = tf.reshape(pos_row, final_shape)
pos_col = tf.reshape(pos_col, final_shape)
output = tf.concat([pos_row, pos_col], -1)
embeddings = tf.cast(output, tf.float32)
return embeddings
def top_logits(
logits: tf.Tensor, k: int = 0, p: float = 1.0, mask: float = -1e10
) -> tf.Tensor:
"""Remove low probability logits via masking.
Args:
logits: class logits in shape of (batch size, total_classes).
k: specifying top k largest logits to keep.
p: specifying a probability for finding a minimum set of largest logits to
keep, where their cumulative probability is no less than p (actually in
the following version, it is "...cumulative probability is the largest but
no more than p").
mask: an value that's used to replace logits that don't satisfy the keep
conditions.
Returns:
logits where low probability ones are replaced with mask.
"""
mask = tf.ones_like(logits) * mask
if k > 0:
min_logits = tf.nn.top_k(logits, k=k)[0][:, -1:]
logits = tf.where(logits < min_logits, mask, logits)
if p < 1.0:
sorted_logits = tf.sort(logits, direction="DESCENDING", axis=-1)
cum_probs = tf.cumsum(tf.nn.softmax(sorted_logits, axis=-1), axis=-1)
min_logits = -tf.reduce_max(
tf.where(cum_probs <= p, -sorted_logits, mask), -1, keepdims=True
)
min_logits = tf.minimum(min_logits, sorted_logits[:, :1])
logits = tf.where(logits < min_logits, mask, logits)
return logits
class Pix2Seq(tf.keras.Model):
"""Pix2Seq model with Keras.
Pix2Seq consists of backbone, input token embedding, Pix2SeqTransformer.
"""
def __init__(
self,
backbone,
backbone_endpoint_name,
max_seq_len,
vocab_size,
hidden_size,
num_encoder_layers=6,
num_decoder_layers=6,
drop_path=0.1,
drop_units=0.1,
drop_att=0.0,
**kwargs
):
super().__init__(**kwargs)
self._backbone = backbone
self._backbone_endpoint_name = backbone_endpoint_name
self._max_seq_len = max_seq_len
self._vocab_size = vocab_size
self._hidden_size = hidden_size
self._num_encoder_layers = num_encoder_layers
self._num_decoder_layers = num_decoder_layers
self._drop_path = drop_path
self._drop_units = drop_units
self._drop_att = drop_att
if hidden_size % 2 != 0:
raise ValueError("hidden_size must be a multiple of 2.")
self._dropout = tf.keras.layers.Dropout(self._drop_units)
self._stem_projection = tf.keras.layers.Dense(
self._hidden_size, name="stem_projection"
)
self._stem_ln = tf.keras.layers.LayerNormalization(
epsilon=1e-6, name="stem_ln"
)
self._transformer = Pix2SeqTransformer(
max_seq_len=self._max_seq_len,
vocab_size=self._vocab_size,
hidden_size=self._hidden_size,
pos_encoding="learned",
num_encoder_layers=self._num_encoder_layers,
num_decoder_layers=self._num_decoder_layers,
drop_path=self._drop_path,
drop_units=self._drop_units,
drop_att=self._drop_att,
)
@property
def backbone(self) -> tf.keras.Model:
return self._backbone
@property
def transformer(self) -> tf.keras.Model:
return self._transformer
def get_config(self):
return {
"backbone": self._backbone,
"backbone_endpoint_name": self._backbone_endpoint_name,
"max_seq_len": self._max_seq_len,
"vocab_size": self._vocab_size,
"hidden_size": self._hidden_size,
"num_encoder_layers": self._num_encoder_layers,
"num_decoder_layers": self._num_decoder_layers,
"drop_path": self._drop_path,
"drop_units": self._drop_units,
"drop_att": self._drop_att,
}
@classmethod
def from_config(cls, config):
return cls(**config)
@property
def checkpoint_items(
self) -> Mapping[str, Union[tf.keras.Model, tf.keras.layers.Layer]]:
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(backbone=self.backbone, transformer=self.transformer)
return items
def _generate_image_mask(
self, inputs: tf.Tensor, target_shape: tf.Tensor
) -> tf.Tensor:
"""Generates image mask from input image."""
mask = tf.expand_dims(
tf.cast(
tf.not_equal(tf.reduce_sum(inputs, axis=-1), 0.3), inputs.dtype
),
axis=-1,
)
mask = tf.image.resize(
mask, target_shape, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR
)
return mask
def call(
self,
inputs: tf.Tensor,
targets: Optional[tf.Tensor] = None,
training: bool = None,
) -> List[Any]:
features = self._backbone(inputs)[self._backbone_endpoint_name]
mask = tf.ones_like(features)
batch_size, h, w, num_channels = get_shape(features)
features = tf.reshape(features, [batch_size, h * w, num_channels])
features = self._stem_ln(
self._stem_projection(self._dropout(features, training)))
pos_emb = position_embedding_sine(
mask[:, :, :, 0], num_pos_features=self._hidden_size
)
pos_emb = tf.reshape(pos_emb, [batch_size, -1, self._hidden_size])
pos_emb = tf.cast(pos_emb, features.dtype)
tokens = None
if training:
logits = self._transformer(
{
"inputs": features,
"tokens": targets,
"pos_emb": pos_emb,
},
training,
)
else:
tokens, logits = self._transformer.infer({
"inputs": features,
"tokens": targets,
"pos_emb": pos_emb,
})
return [tokens, logits]
class Pix2SeqTransformer(tf.keras.layers.Layer):
"""Encoder and Decoder of Pix2Seq."""
def __init__(
self,
max_seq_len,
vocab_size,
hidden_size,
pos_encoding="learned",
num_encoder_layers=6,
num_decoder_layers=6,
drop_path=0.1,
drop_units=0.1,
drop_att=0.0,
shared_embedding=True,
output_bias=True,
num_heads=8,
**kwargs
):
super().__init__(**kwargs)
self._max_seq_len = max_seq_len
self._vocab_size = vocab_size
self._hidden_size = hidden_size
self._pos_encoding = pos_encoding
self._num_encoder_layers = num_encoder_layers
self._num_decoder_layers = num_decoder_layers
self._drop_path = drop_path
self._drop_units = drop_units
self._drop_att = drop_att
self._shared_embedding = shared_embedding
self._output_bias = output_bias
self._num_heads = num_heads
add_seq_pos_emb(
self, self._pos_encoding, self._max_seq_len, self._hidden_size
)
add_vocab_token_emb(
self,
self._vocab_size,
self._hidden_size,
self._shared_embedding,
self._output_bias,
)
if self._num_encoder_layers > 0:
self._encoder = transformer.TransformerEncoder(
num_layers=self._num_encoder_layers,
dim=self._hidden_size,
mlp_ratio=4,
num_heads=self._num_heads,
drop_path=self._drop_path,
drop_units=self._drop_units,
drop_att=self._drop_att,
)
else:
self._encoder = None
self._output_ln_enc = tf.keras.layers.LayerNormalization(
epsilon=1e-6, name="output_ln_enc"
)
self._proj = tf.keras.layers.Dense(self._hidden_size, name="proj/linear")
self._proj_ln = tf.keras.layers.LayerNormalization(
epsilon=1e-6, name="proj/ln"
)
self._proj_mlp = transformer.MLP(
num_layers=1,
dim=self._hidden_size,
mlp_ratio=4,
drop_path=self._drop_path,
drop_units=self._drop_units,
name="proj/mlp",
)
self._decoder = transformer.TransformerDecoder(
num_layers=self._num_decoder_layers,
dim=self._hidden_size,
mlp_ratio=4,
num_heads=self._num_heads,
drop_path=self._drop_path,
drop_units=self._drop_units,
drop_att=self._drop_att,
)
self._output_ln_dec = tf.keras.layers.LayerNormalization(
epsilon=1e-6, name="output_ln_dec"
)
def get_config(self):
return {
"max_seq_len": self._max_seq_len,
"vocab_size": self._vocab_size,
"hidden_size": self._hidden_size,
"pos_encoding": self._pos_encoding,
"num_encoder_layers": self._num_encoder_layers,
"num_decoder_layers": self._num_decoder_layers,
"drop_path": self._drop_path,
"drop_units": self._drop_units,
"drop_att": self._drop_att,
"shared_embedding": self._shared_embedding,
"output_bias": self._output_bias,
"num_heads": self._num_heads,
}
def call(self, inputs: tf.Tensor, training: bool = None):
sources = inputs["inputs"]
targets = inputs["tokens"]
mem_pos_embed = inputs["pos_emb"]
sources = sources + mem_pos_embed
if self._encoder is not None:
encoded = self._encoder(sources, None, training=training, ret_list=False)
else:
encoded = sources
encoded = self._output_ln_enc(encoded)
encoded = self._proj_ln(self._proj(encoded))
encoded = encoded + mem_pos_embed
encoded = self._proj_mlp(encoded, training=training)
seq_len = tf.shape(targets)[1]
seq_pos_emb = tf.expand_dims(self.seq_pos_emb[:seq_len], 0)
inp_embedding = outp_embedding = self.token_embedding
target_emb = tf.gather(inp_embedding, targets) + seq_pos_emb
self_attention_mask = 1.0 - get_ar_mask(seq_len, target_emb.dtype)
decoded, _ = self._decoder(
target_emb, encoded, None, self_attention_mask, None, training)
decoded = self._output_ln_dec(decoded)
decoded = tf.cast(decoded, seq_pos_emb.dtype)
outp_embedding = tf.cast(outp_embedding, seq_pos_emb.dtype)
logits = tf.matmul(decoded, outp_embedding, transpose_b=True)
if self._output_bias:
logits = tf.nn.bias_add(logits, self.outp_bias)
return logits
def infer(
self,
inputs: tf.Tensor,
max_seq_len=None,
temperature=1.0,
top_k=0,
top_p=0.4,
sampling_callback=None,
):
"""Autoregressive (without teacher-forcing) prediction.
Note: the autoregressive sampling/inference time can be further optimized by
caching *transformed* key / value inside multi-head attention for the
`encoded` and previously generated tokens, but this may make the code less
readable.
Args:
inputs: prompt - `int` tokens with shape of (bsz, prompt_len). encoded -
`float` encoded representations for conditioning with shape of (bsz,
size, dim). This can be optional in case of pure decoder.
max_seq_len: `int` of max generated sequence length (including prompt).
temperature: `float` scalar for scaling the logits before sampling.
top_k: `int` scalar for truncating top-k tokens according to logits before
token sampling.
top_p: `float` scalar specifying the threshold of cumulative probablity
for truncating tokens before token sampling.
sampling_callback: a callbak `function` that take `next_logits`, and
return `next_token`. This is used when users need a specific logic for
sampling. Default to `None` with standard free-form sampling.
Returns:
sampled tokens with shape of (bsz, max_seq_len-prompt_len).
logits (temperature-scaled) associated with sampled token, in shape of
(bsz, max_seq_len-prompt_len, vocab_size).
"""
sources = inputs["inputs"]
prompt = inputs["tokens"]
mem_pos_embed = inputs["pos_emb"]
sources = sources + mem_pos_embed
if self._encoder is not None:
encoded = self._encoder(sources, None, training=False, ret_list=False)
else:
encoded = sources
encoded = self._output_ln_enc(encoded)
encoded = self._proj_ln(self._proj(encoded))
encoded = encoded + mem_pos_embed
encoded = self._proj_mlp(encoded, training=False)
bsz = tf.shape(prompt)[0]
prompt_len = tf.shape(prompt)[1]
seq_len = self._max_seq_len if max_seq_len is None else max_seq_len
# (gunho) 500 (self._max_seq_len) -> 501 for prompt seq
seq_len = seq_len + 1
seq_pos_emb = tf.expand_dims(self.seq_pos_emb, 0)
inp_embedding = self.token_embedding
outp_embedding = inp_embedding
# Each step reads caches[:step] and tokens[step:next_step] and updates
# tokens[next_step], logits[next_step] and caches[step:next_step].
# On the first step, step=0, next_step=prompt_len. On subsequent steps
# next_step = step + 1.
def loop_body(step, caches, tokens, logits, is_prompt=False):
if is_prompt:
assert step == 0
x = tf.gather(inp_embedding, tf.transpose(tokens[:prompt_len]))
input_pos_embed = seq_pos_emb[:, :prompt_len]
x += input_pos_embed
self_attention_mask = 1.0 - get_ar_mask(prompt_len, x.dtype)
caches_in = None
else:
x = tf.gather(inp_embedding, tf.transpose(tokens[step]))
input_pos_embed = seq_pos_emb[:, step]
x += input_pos_embed
x = tf.expand_dims(x, 1) # (bsz, 1, d)
self_attention_mask = tf.ones([1, 1, 1, 1])
caches_in = tf.transpose(caches[:step], [1, 2, 0, 3])
decoded, caches_out = self._decoder(
x, encoded, caches_in, self_attention_mask, None, training=False)
decoded = self._output_ln_dec(decoded)
# (gunho) transformer.py uses tf.float32 for numeric stability.
decoded = tf.cast(decoded, seq_pos_emb.dtype)
next_logits = tf.matmul( # only take the last for sampling next token.
decoded, outp_embedding, transpose_b=True
)[:, -1]
if self._output_bias:
next_logits = tf.nn.bias_add(next_logits, self.outp_bias)
# Scale and trunctate logits and sample next token.
if sampling_callback:
next_token = sampling_callback(
next_logits, step, temperature, top_k, top_p
)
else:
sampling_logits = next_logits / tf.cast(temperature, tf.float32)
sampling_logits = top_logits(sampling_logits, k=top_k, p=top_p)
next_token = tf.random.categorical(
sampling_logits, num_samples=1, dtype=tf.int32
)[:, 0]
# Update internal states.
next_step = step + (prompt_len if is_prompt else 1)
caches_out = tf.transpose(caches_out, [2, 0, 1, 3])
caches = tf.tensor_scatter_nd_update(caches, [[step]], caches_out)
tokens = tf.tensor_scatter_nd_update(tokens, [[next_step]], [next_token])
logits = tf.tensor_scatter_nd_update(logits, [[next_step]], [next_logits])
return (next_step, caches, tokens, logits)
def cond(step, caches, tokens, logits):
del caches
del tokens
del logits
return tf.less(step, seq_len - 1)
caches_var = tf.zeros(
[seq_len-1, self._num_decoder_layers, bsz, self._hidden_size])
tokens_var = tf.zeros([seq_len, bsz], dtype=tf.int64)
logits_var = tf.zeros([seq_len, bsz, self._vocab_size], dtype=tf.float32)
indices = tf.expand_dims(tf.range(prompt_len), -1)
tokens_var = tf.tensor_scatter_nd_update(
tokens_var, indices, tf.transpose(prompt, [1, 0])
)
step = 0
step, caches_var, tokens_var, logits_var = loop_body(
step, caches_var, tokens_var, logits_var, is_prompt=True
)
if seq_len > prompt_len:
step, caches_var, tokens_var, logits_var = tf.while_loop(
cond=cond,
body=loop_body,
loop_vars=[step, caches_var, tokens_var, logits_var]
)
sampled_tokens = tf.transpose(tokens_var[prompt_len:], [1, 0])
sampled_tokens_logits = tf.transpose(logits_var[prompt_len:], [1, 0, 2])
sampled_tokens_logits = tf.reshape(
sampled_tokens_logits, [bsz, self._max_seq_len, self._vocab_size]
)
# sampled_tokens_logits : [bsz, max_seq_len-prompt_len, vocab_size]
return sampled_tokens, sampled_tokens_logits
| 22,027 | 32.941448 | 80 | py |
models | models-master/official/projects/pix2seq/modeling/transformer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Specialized Transformers for Pix2Seq.
the position embeddings are added to the query and key for every self- and
cross-attention layer.
"""
import tensorflow as tf
class TransformerEncoder(tf.keras.layers.Layer):
"""Transformer encoder."""
def __init__(
self,
num_layers,
dim,
mlp_ratio,
num_heads,
drop_path=0.1,
drop_units=0.1,
drop_att=0.0,
self_attention=True,
use_ffn_ln=False,
ln_scale_shift=True,
**kwargs
):
super().__init__(**kwargs)
self._num_layers = num_layers
self._dim = dim
self._mlp_ratio = mlp_ratio
self._num_heads = num_heads
self._drop_path = drop_path
self._drop_units = drop_units
self._drop_att = drop_att
self._self_attention = self_attention
self._use_ffn_ln = use_ffn_ln
self._ln_scale_shift = ln_scale_shift
self.enc_layers = [
TransformerEncoderLayer( # pylint: disable=g-complex-comprehension
dim,
mlp_ratio,
num_heads,
drop_path,
drop_units,
drop_att,
self_attention=self_attention,
use_ffn_ln=use_ffn_ln,
ln_scale_shift=ln_scale_shift,
name='transformer_encoder' + suffix_id(i),
)
for i in range(num_layers)
]
def call(self, x, mask, training, ret_list=False):
x_list = [x]
for i in range(self._num_layers):
x = self.enc_layers[i](x, mask, training)
x_list.append(x)
return (x, x_list) if ret_list else x
def get_config(self):
config = super().get_config()
updates = {
'num_layers': self._num_layers,
'dim': self._dim,
'mlp_ratio': self._mlp_ratio,
'num_heads': self._num_heads,
'drop_path': self._drop_path,
'drop_units': self._drop_units,
'drop_att': self._drop_att,
'self_attention': self._self_attention,
'use_ffn_ln': self._use_ffn_ln,
'ln_scale_shift': self._ln_scale_shift,
}
config.update(updates)
return config
class TransformerEncoderLayer(tf.keras.layers.Layer): # pylint: disable=missing-docstring
def __init__(
self,
dim,
mlp_ratio,
num_heads,
drop_path=0.1,
drop_units=0.1,
drop_att=0.0,
self_attention=True,
use_ffn_ln=False,
ln_scale_shift=True,
**kwargs
):
super().__init__(**kwargs)
self._dim = dim
self._mlp_ratio = mlp_ratio
self._num_heads = num_heads
self._drop_path = drop_path
self._drop_units = drop_units
self._drop_att = drop_att
self.self_attention = self_attention
self._use_ffn_ln = use_ffn_ln
self._ln_scale_shift = ln_scale_shift
if self_attention:
self.mha_ln = tf.keras.layers.LayerNormalization(
epsilon=1e-6,
center=ln_scale_shift,
scale=ln_scale_shift,
name='mha/ln',
)
self.mha = tf.keras.layers.MultiHeadAttention(
num_heads, dim // num_heads, dropout=drop_att, name='mha'
)
self.mlp = MLP(
1,
dim,
mlp_ratio,
drop_path,
drop_units,
use_ffn_ln=use_ffn_ln,
ln_scale_shift=ln_scale_shift,
name='mlp',
)
self.dropp = DropPath(drop_path)
def call(self, x, mask, training):
# x shape (bsz, seq_len, dim_att), mask shape (bsz, seq_len, seq_len).
if self.self_attention:
x_ln = self.mha_ln(x)
x_residual = self.mha(x_ln, x_ln, x_ln, mask, training=training)
x = x + self.dropp(x_residual, training)
x = self.mlp(x, training)
return x
def get_config(self):
config = super().get_config()
updates = {
'dim': self._dim,
'mlp_ratio': self._mlp_ratio,
'num_heads': self._num_heads,
'drop_path': self._drop_path,
'drop_units': self._drop_units,
'drop_att': self._drop_att,
'self_attention': self._self_attention,
'use_ffn_ln': self._use_ffn_ln,
'ln_scale_shift': self._ln_scale_shift,
}
config.update(updates)
return config
def suffix_id(i):
"""Return suffix id for layer/variable name."""
return '' if i == 0 else '_%d' % i
class DropPath(tf.keras.layers.Layer):
"""For stochastic depth."""
def __init__(self, drop_rate=0.0, **kwargs):
"""Initializes a drop path layer."""
super().__init__(**kwargs)
self._drop_rate = drop_rate
if self._drop_rate < 0 or self._drop_rate >= 1.0:
raise ValueError('drop_rate {} is outside [0, 1)'.format(self._drop_rate))
def call(self, x, training=False):
"""Performs a forward pass.
Args:
x: An input tensor of type tf.Tensor with shape [batch, height, width,
channels].
training: A boolean flag indicating whether training behavior should be
used (default: False).
Returns:
The output tensor.
"""
if self._drop_rate == 0.0 or not training:
return x
keep_rate = 1.0 - self._drop_rate
xshape = tf.shape(x)
drop_mask_shape = [xshape[0]] + [1] * (len(xshape) - 1)
drop_mask = keep_rate + tf.random.uniform(drop_mask_shape, dtype=x.dtype)
drop_mask = tf.math.divide(tf.floor(drop_mask), keep_rate)
return x * drop_mask
def get_config(self):
config = super().get_config()
updates = {
'drop_rate': self._drop_rate,
}
config.update(updates)
return config
class FeedForwardLayer(tf.keras.layers.Layer): # pylint: disable=missing-docstring
def __init__(
self,
dim_att=256,
dim_mlp=1024,
drop_units=0.1,
use_ln=False,
ln_scale_shift=False,
**kwargs
):
super().__init__(**kwargs)
self._dim_att = dim_att
self._dim_mlp = dim_mlp
self._drop_units = drop_units
self._use_ln = use_ln
self._ln_scale_shift = ln_scale_shift
self.dense1 = tf.keras.layers.Dense(
dim_mlp, activation=tf.nn.gelu, name='dense1'
)
self.dropout = tf.keras.layers.Dropout(drop_units)
self.dense2 = tf.keras.layers.Dense(dim_att, name='dense2')
if use_ln:
self.ln = tf.keras.layers.LayerNormalization(
epsilon=1e-6,
center=ln_scale_shift,
scale=ln_scale_shift,
name='mlp_ln',
)
else:
self.ln = lambda x: x
def call(self, x, training):
return self.dense2(self.dropout(self.ln(self.dense1(x)), training=training))
def get_config(self):
config = super().get_config()
updates = {
'dim_att': self._dim_att,
'dim_mlp': self._dim_mlp,
'drop_units': self._drop_units,
'use_ln': self._use_ln,
'ln_scale_shift': self._ln_scale_shift,
}
config.update(updates)
return config
class MLP(tf.keras.layers.Layer): # pylint: disable=missing-docstring
def __init__(
self,
num_layers,
dim,
mlp_ratio,
drop_path=0.1,
drop_units=0.0,
use_ffn_ln=False,
ln_scale_shift=True,
**kwargs
):
super().__init__(**kwargs)
self._num_layers = num_layers
self._dim = dim
self._mlp_ratio = mlp_ratio
self._drop_path = drop_path
self._drop_units = drop_units
self._use_ffn_ln = use_ffn_ln
self._ln_scale_shift = ln_scale_shift
self.mlp_layers = []
self.layernorms = []
for i in range(num_layers):
self.mlp_layers.append(
FeedForwardLayer(
dim,
dim * mlp_ratio,
drop_units,
use_ln=use_ffn_ln,
ln_scale_shift=ln_scale_shift,
name='ffn' + suffix_id(i),
)
)
self.layernorms.append(
tf.keras.layers.LayerNormalization(
epsilon=1e-6,
center=ln_scale_shift,
scale=ln_scale_shift,
name='ffn/ln' + suffix_id(i),
)
)
self.dropp = DropPath(drop_path)
def call(self, x, training, ret_list=False):
x_list = [x]
for i in range(self._num_layers):
x_residual = self.mlp_layers[i](self.layernorms[i](x), training)
x = x + self.dropp(x_residual, training)
x_list.append(x)
return (x, x_list) if ret_list else x
def get_config(self):
config = super().get_config()
updates = {
'num_layers': self._num_layers,
'dim': self._dim,
'mlp_ratio': self._mlp_ratio,
'drop_path': self._drop_path,
'drop_units': self._drop_units,
'use_ffn_ln': self._use_ffn_ln,
'ln_scale_shift': self._ln_scale_shift,
}
config.update(updates)
return config
class TransformerDecoderLayer(tf.keras.layers.Layer): # pylint: disable=missing-docstring
def __init__(
self,
dim,
mlp_ratio,
num_heads,
drop_path=0.1,
drop_units=0.1,
drop_att=0.0,
dim_x_att=None,
self_attention=True,
cross_attention=True,
use_mlp=True,
use_enc_ln=False,
use_ffn_ln=False,
ln_scale_shift=True,
**kwargs
):
super().__init__(**kwargs)
self._dim = dim
self._mlp_ratio = mlp_ratio
self._num_heads = num_heads
self._drop_path = drop_path
self._drop_units = drop_units
self._drop_att = drop_att
self._dim_x_att = dim_x_att
self._self_attention = self_attention
self._cross_attention = cross_attention
self._use_mlp = use_mlp
self._use_enc_ln = use_enc_ln
self._use_ffn_ln = use_ffn_ln
self._ln_scale_shift = ln_scale_shift
if self_attention:
self.self_ln = tf.keras.layers.LayerNormalization(
epsilon=1e-6,
center=ln_scale_shift,
scale=ln_scale_shift,
name='self_mha/ln',
)
self.self_mha = tf.keras.layers.MultiHeadAttention(
num_heads, dim // num_heads, dropout=drop_att, name='self_mha'
)
if cross_attention:
self.cross_ln = tf.keras.layers.LayerNormalization(
epsilon=1e-6,
center=ln_scale_shift,
scale=ln_scale_shift,
name='cross_mha/ln',
)
if use_enc_ln:
self.enc_ln = tf.keras.layers.LayerNormalization(
epsilon=1e-6,
center=ln_scale_shift,
scale=ln_scale_shift,
name='cross_mha/enc_ln',
)
else:
self.enc_ln = lambda x: x
dim_x_att = dim if dim_x_att is None else dim_x_att
self.cross_mha = tf.keras.layers.MultiHeadAttention(
num_heads, dim_x_att // num_heads, dropout=drop_att, name='cross_mha'
)
if use_mlp:
self.mlp = MLP(
1,
dim,
mlp_ratio,
drop_path,
drop_units,
use_ffn_ln=use_ffn_ln,
ln_scale_shift=ln_scale_shift,
name='mlp',
)
self.dropp = DropPath(drop_path)
def call(self, x, enc, cache, mask_self, mask_cross, training):
"""x in (bsz, seq, d), enc in (bsz, seq', d)."""
x_for_cache = []
if self._self_attention:
x_for_cache = x_ln = kv_ln = self.self_ln(x)
if cache is not None: # Augment kv_ln with cache in (bsz, c_size, d).
q_size, k_size = tf.shape(x)[1], tf.shape(cache)[1]
mask_self = tf.concat([tf.ones([1, 1, q_size, k_size]), mask_self], -1)
kv_ln = tf.concat([cache, x_ln], axis=1)
x_res = self.self_mha(x_ln, kv_ln, kv_ln, mask_self, training=training)
x = x + self.dropp(x_res, training)
if self._cross_attention:
x_ln = self.cross_ln(x)
enc = self.enc_ln(enc)
x_res = self.cross_mha(x_ln, enc, enc, mask_cross, training=training)
x = x + self.dropp(x_res, training)
if self._use_mlp:
x = self.mlp(x, training)
return x, x_for_cache
def get_config(self):
config = super().get_config()
updates = {
'dim': self._dim,
'mlp_ratio': self._mlp_ratio,
'num_heads': self._num_heads,
'drop_path': self._drop_path,
'drop_units': self._drop_units,
'drop_att': self._drop_att,
'dim_x_att': self._dim_x_att,
'self_attention': self._self_attention,
'cross_attention': self._cross_attention,
'use_mlp': self._use_mlp,
'use_enc_ln': self._use_enc_ln,
'use_ffn_ln': self._use_ffn_ln,
'ln_scale_shift': self._ln_scale_shift,
}
config.update(updates)
return config
class TransformerDecoder(tf.keras.layers.Layer): # pylint: disable=missing-docstring
def __init__(
self,
num_layers,
dim,
mlp_ratio,
num_heads,
drop_path=0.1,
drop_units=0.1,
drop_att=0.0,
dim_x_att=None,
self_attention=True,
cross_attention=True,
use_mlp=True,
use_enc_ln=False,
use_ffn_ln=False,
ln_scale_shift=True,
**kwargs
):
super().__init__(**kwargs)
self._num_layers = num_layers
self._dim = dim
self._mlp_ratio = mlp_ratio
self._num_heads = num_heads
self._drop_path = drop_path
self._drop_units = drop_units
self._drop_att = drop_att
self._dim_x_att = dim_x_att
self._self_attention = self_attention
self._cross_attention = cross_attention
self._use_mlp = use_mlp
self._use_enc_ln = use_enc_ln
self._use_ffn_ln = use_ffn_ln
self._ln_scale_shift = ln_scale_shift
self.dec_layers = [
TransformerDecoderLayer( # pylint: disable=g-complex-comprehension
dim,
mlp_ratio,
num_heads,
drop_path,
drop_units,
drop_att,
dim_x_att=dim_x_att,
self_attention=self_attention,
cross_attention=cross_attention,
use_mlp=use_mlp,
use_enc_ln=use_enc_ln,
use_ffn_ln=use_ffn_ln,
ln_scale_shift=ln_scale_shift,
name='transformer_decoder_layer' + suffix_id(i),
)
for i in range(num_layers)
]
def call(self, x, enc, caches, mask_self, mask_cross, training):
"""x in (bsz, seq, d), enc in (bsz, seq', d)."""
presents = []
for i in range(self._num_layers):
cache = None if caches is None else caches[i]
x, x_for_cache = self.dec_layers[i](
x, enc, cache, mask_self, mask_cross, training
)
presents.append(x_for_cache)
return x, tf.stack(presents)
def get_config(self):
config = super().get_config()
updates = {
'num_layers': self._num_layers,
'dim': self._dim,
'mlp_ratio': self._mlp_ratio,
'num_heads': self._num_heads,
'drop_path': self._drop_path,
'drop_units': self._drop_units,
'drop_att': self._drop_att,
'dim_x_att': self._dim_x_att,
'self_attention': self._self_attention,
'cross_attention': self._cross_attention,
'use_mlp': self._use_mlp,
'use_enc_ln': self._use_enc_ln,
'use_ffn_ln': self._use_ffn_ln,
'ln_scale_shift': self._ln_scale_shift,
}
config.update(updates)
return config
| 15,516 | 27.895717 | 90 | py |
models | models-master/official/projects/pix2seq/modeling/pix2seq_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Pix2Seq model."""
import tensorflow as tf
from official.projects.pix2seq.modeling import pix2seq_model
from official.vision.modeling.backbones import resnet
class Pix2SeqTest(tf.test.TestCase):
def test_forward(self):
hidden_size = 256
max_seq_len = 50
vocab_size = 164
image_size = 224
batch_size = 2
backbone = resnet.ResNet(50, bn_trainable=False)
backbone_endpoint_name = '5'
model = pix2seq_model.Pix2Seq(
backbone, backbone_endpoint_name, max_seq_len, vocab_size, hidden_size
)
_, outs = model(
tf.ones((batch_size, image_size, image_size, 3)),
tf.ones((batch_size, max_seq_len), tf.int64),
True,
)
self.assertLen(outs, 2) # intermediate decoded outputs.
def test_forward_infer(self):
hidden_size = 256
max_seq_len = 50
vocab_size = 600
image_size = 640
batch_size = 2
backbone = resnet.ResNet(50, bn_trainable=False)
backbone_endpoint_name = '5'
model = pix2seq_model.Pix2Seq(
backbone, backbone_endpoint_name, max_seq_len, vocab_size, hidden_size
)
tokens, _ = model(
tf.ones((batch_size, image_size, image_size, 3)),
tf.ones((batch_size, 1), tf.int64) * 10,
False,
)
self.assertLen(tokens, 2) # intermediate decoded outputs.
if __name__ == '__main__':
tf.test.main()
| 1,974 | 29.859375 | 78 | py |
models | models-master/official/projects/pix2seq/tasks/pix2seq_task.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pix2Seq detection task definition."""
from typing import Optional
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import base_task
from official.core import task_factory
from official.projects.pix2seq import utils
from official.projects.pix2seq.configs import pix2seq as pix2seq_cfg
from official.projects.pix2seq.dataloaders import pix2seq_input
from official.projects.pix2seq.modeling import pix2seq_model
from official.vision.dataloaders import input_reader_factory
from official.vision.dataloaders import tf_example_decoder
from official.vision.dataloaders import tfds_factory
from official.vision.dataloaders import tf_example_label_map_decoder
from official.vision.evaluation import coco_evaluator
from official.vision.modeling import backbones
@task_factory.register_task_cls(pix2seq_cfg.Pix2SeqTask)
class Pix2SeqTask(base_task.Task):
"""A single-replica view of training procedure.
Pix2Seq task provides artifacts for training/evalution procedures, including
loading/iterating over Datasets, initializing the model, calculating the loss,
post-processing, and customized metrics with reduction.
"""
def build_model(self):
"""Build Pix2Seq model."""
input_specs = tf.keras.layers.InputSpec(
shape=[None] + self._task_config.model.input_size
)
backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=self._task_config.model.backbone,
norm_activation_config=self._task_config.model.norm_activation,
)
model = pix2seq_model.Pix2Seq(
backbone,
self._task_config.model.backbone_endpoint_name,
self._task_config.model.max_num_instances * 5,
self._task_config.model.vocab_size,
self._task_config.model.hidden_size,
self._task_config.model.num_encoder_layers,
self._task_config.model.num_decoder_layers,
self._task_config.model.drop_path,
self._task_config.model.drop_units,
self._task_config.model.drop_att,
)
return model
def initialize(self, model: tf.keras.Model):
"""Loading pretrained checkpoint."""
if not self._task_config.init_checkpoint:
return
ckpt_dir_or_file = self._task_config.init_checkpoint
# Restoring checkpoint.
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
if self._task_config.init_checkpoint_modules == 'all':
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.restore(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
elif self._task_config.init_checkpoint_modules == 'backbone':
ckpt = tf.train.Checkpoint(backbone=model.backbone)
status = ckpt.restore(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info(
'Finished loading pretrained checkpoint from %s', ckpt_dir_or_file
)
def build_inputs(
self, params, input_context: Optional[tf.distribute.InputContext] = None
):
"""Build input dataset."""
if params.tfds_name:
decoder = tfds_factory.get_detection_decoder(params.tfds_name)
else:
decoder_cfg = params.decoder.get()
if params.decoder.type == 'simple_decoder':
decoder = tf_example_decoder.TfExampleDecoder(
regenerate_source_id=decoder_cfg.regenerate_source_id
)
elif params.decoder.type == 'label_map_decoder':
decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(
label_map=decoder_cfg.label_map,
regenerate_source_id=decoder_cfg.regenerate_source_id,
)
else:
raise ValueError(
'Unknown decoder type: {}!'.format(params.decoder.type)
)
parser = pix2seq_input.Parser(
eos_token_weight=self._task_config.losses.eos_token_weight,
output_size=self._task_config.model.input_size[:2],
max_num_boxes=self._task_config.model.max_num_instances,
coord_vocab_shift=self._task_config.coord_vocab_shift,
quantization_bins=self._task_config.quantization_bins,
aug_scale_min=params.aug_scale_min,
aug_scale_max=params.aug_scale_max,
aug_color_jitter_strength=params.aug_color_jitter_strength,
label_shift=params.label_shift,
)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training),
)
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self, outputs, labels, aux_losses=None):
"""Builds DETR losses."""
targets = labels['targets']
weights = labels['weights']
targets = tf.one_hot(targets, self._task_config.model.vocab_size)
loss = tf.keras.losses.CategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)(targets, outputs)
weights = tf.cast(weights, loss.dtype)
loss = tf.reduce_sum(loss * weights) / tf.reduce_sum(weights)
aux_losses = tf.add_n(aux_losses) if aux_losses else 0.0
total_loss = loss + aux_losses
return total_loss
def build_metrics(self, training=True):
"""Builds detection metrics."""
metrics = []
metric_names = ['loss']
for name in metric_names:
metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))
if not training:
self.coco_metric = coco_evaluator.COCOEvaluator(
annotation_file=self._task_config.annotation_file,
include_mask=False,
need_rescale_bboxes=False,
per_category_metrics=self._task_config.per_category_metrics,
)
return metrics
def train_step(self, inputs, model, optimizer, metrics=None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
_, outputs = model(features, labels['inputs'], training=True)
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
loss = self.build_losses(
outputs=outputs, labels=labels, aux_losses=model.losses
)
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient when LossScaleOptimizer is used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
# Trainer class handles loss metric for you.
logs = {self.loss: loss}
all_losses = {
'loss': loss,
}
# Metric results will be added to logs for you.
if metrics:
for m in metrics:
m.update_state(all_losses[m.name])
return logs
def validation_step(self, inputs, model, metrics=None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
tokens, logits = model(features, labels['prompt'], training=False)
# loss = self.build_losses(
# outputs=outputs, labels=labels, aux_losses=model.losses)
loss = 0.0
# Multiply for logging.
# Since we expect the gradient replica sum to happen in the optimizer,
# the loss is scaled with global num_boxes and weights.
# To have it more interpretable/comparable we scale it back when logging.
num_replicas_in_sync = tf.distribute.get_strategy().num_replicas_in_sync
loss *= num_replicas_in_sync
# Evaluator class handles loss metric for you.
logs = {self.loss: loss}
outputs = utils.decode_object_seq_to_bbox(
logits,
tokens,
self._task_config.quantization_bins,
self._task_config.coord_vocab_shift,
)
pred_classes, pred_bboxes, scores, pred_num = outputs
image_size = features.shape[1:3].as_list()
# scale points to original image size during eval.
scale = utils.tf_float32(image_size)[tf.newaxis, :] / utils.tf_float32(
labels['image_info'][:, 1:2, :]
)
scale = scale * utils.tf_float32(labels['image_info'][:, 0:1, :])
pred_bboxes = utils.scale_points(pred_bboxes, scale)
predictions = {
'detection_boxes': pred_bboxes,
'detection_scores': scores,
'detection_classes': pred_classes,
'num_detections': pred_num,
'source_id': labels['id'],
'image_info': labels['image_info'],
}
ground_truths = {
'source_id': labels['id'],
'height': labels['image_info'][:, 0:1, 0],
'width': labels['image_info'][:, 0:1, 1],
'num_detections': tf.reduce_sum(
tf.cast(tf.math.greater(labels['classes'], 0), tf.int32), axis=-1
),
'boxes': labels['gt_boxes'],
'classes': labels['classes'],
'is_crowds': labels['is_crowd'],
}
logs.update({'predictions': predictions, 'ground_truths': ground_truths})
all_losses = {
'loss': loss,
}
# Metric results will be added to logs for you.
if metrics:
for m in metrics:
m.update_state(all_losses[m.name])
return logs
def aggregate_logs(self, state=None, step_outputs=None):
if state is None:
self.coco_metric.reset_states()
state = self.coco_metric
state.update_state(
step_outputs['ground_truths'], step_outputs['predictions']
)
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
return aggregated_logs.result()
| 10,915 | 33.875399 | 80 | py |
models | models-master/official/projects/assemblenet/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Training driver.
Commandline:
python -m official.vision.beta.projects.assemblenet.trian \
--mode=train_and_eval --experiment=assemblenetplus_ucf101 \
--model_dir='YOUR MODEL SAVE GS BUCKET' \
--config_file=./official/vision/beta/projects/assemblenet/ \
--ucf101_assemblenet_plus_tpu.yaml \
--tpu=TPU_NAME
"""
from absl import app
from absl import flags
from absl import logging
import gin
from official.common import distribute_utils
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
# pylint: disable=unused-import
from official.projects.assemblenet.configs import assemblenet as asn_configs
from official.projects.assemblenet.modeling import assemblenet as asn
from official.projects.assemblenet.modeling import assemblenet_plus as asnp
from official.vision import registry_imports
# pylint: enable=unused-import
FLAGS = flags.FLAGS
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
if 'train_and_eval' in FLAGS.mode:
assert (params.task.train_data.feature_shape ==
params.task.validation_data.feature_shape), (
f'train {params.task.train_data.feature_shape} != validate '
f'{params.task.validation_data.feature_shape}')
if 'assemblenet' in FLAGS.experiment:
if 'plus' in FLAGS.experiment:
if 'eval' in FLAGS.mode:
# Use the feature shape in validation_data for all jobs. The number of
# frames in train_data will be used to construct the Assemblenet++
# model.
params.task.model.backbone.assemblenet_plus.num_frames = (
params.task.validation_data.feature_shape[0])
shape = params.task.validation_data.feature_shape
else:
params.task.model.backbone.assemblenet_plus.num_frames = (
params.task.train_data.feature_shape[0])
shape = params.task.train_data.feature_shape
logging.info('mode %r num_frames %r feature shape %r', FLAGS.mode,
params.task.model.backbone.assemblenet_plus.num_frames,
shape)
else:
if 'eval' in FLAGS.mode:
# Use the feature shape in validation_data for all jobs. The number of
# frames in train_data will be used to construct the Assemblenet model.
params.task.model.backbone.assemblenet.num_frames = (
params.task.validation_data.feature_shape[0])
shape = params.task.validation_data.feature_shape
else:
params.task.model.backbone.assemblenet.num_frames = (
params.task.train_data.feature_shape[0])
shape = params.task.train_data.feature_shape
logging.info('mode %r num_frames %r feature shape %r', FLAGS.mode,
params.task.model.backbone.assemblenet.num_frames, shape)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu)
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir)
train_utils.save_gin_config(FLAGS.mode, model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
flags.mark_flags_as_required(['experiment', 'mode', 'model_dir'])
app.run(main)
| 4,934 | 40.125 | 80 | py |
models | models-master/official/projects/assemblenet/train_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
from absl import flags
from absl import logging
from absl.testing import flagsaver
import tensorflow as tf
from official.projects.assemblenet import train as train_lib
from official.vision.dataloaders import tfexample_utils
FLAGS = flags.FLAGS
class TrainTest(tf.test.TestCase):
def setUp(self):
super(TrainTest, self).setUp()
self._model_dir = os.path.join(self.get_temp_dir(), 'model_dir')
tf.io.gfile.makedirs(self._model_dir)
data_dir = os.path.join(self.get_temp_dir(), 'data')
tf.io.gfile.makedirs(data_dir)
self._data_path = os.path.join(data_dir, 'data.tfrecord')
# pylint: disable=g-complex-comprehension
examples = [
tfexample_utils.make_video_test_example(
image_shape=(36, 36, 3),
audio_shape=(20, 128),
label=random.randint(0, 100)) for _ in range(2)
]
# pylint: enable=g-complex-comprehension
tfexample_utils.dump_to_tfrecord(self._data_path, tf_examples=examples)
def test_run(self):
saved_flag_values = flagsaver.save_flag_values()
train_lib.tfm_flags.define_flags()
FLAGS.mode = 'train'
FLAGS.model_dir = self._model_dir
FLAGS.experiment = 'assemblenet50_kinetics600'
logging.info('Test pipeline correctness.')
num_frames = 4
params_override = json.dumps({
'runtime': {
'mixed_precision_dtype': 'float32',
},
'trainer': {
'train_steps': 1,
'validation_steps': 1,
},
'task': {
'model': {
'backbone': {
'assemblenet': {
'model_id': '26',
'num_frames': num_frames,
},
},
},
'train_data': {
'input_path': self._data_path,
'file_type': 'tfrecord',
'feature_shape': [num_frames, 32, 32, 3],
'global_batch_size': 2,
},
'validation_data': {
'input_path': self._data_path,
'file_type': 'tfrecord',
'global_batch_size': 2,
'feature_shape': [num_frames * 2, 32, 32, 3],
}
}
})
FLAGS.params_override = params_override
train_lib.main('unused_args')
FLAGS.mode = 'eval'
with train_lib.gin.unlock_config():
train_lib.main('unused_args')
flagsaver.restore_flag_values(saved_flag_values)
if __name__ == '__main__':
tf.test.main()
| 3,149 | 29.582524 | 75 | py |
models | models-master/official/projects/assemblenet/configs/assemblenet_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.assemblenet.configs import assemblenet
from official.vision.configs import video_classification as exp_cfg
class AssemblenetTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('assemblenet50_kinetics600',),)
def test_assemblenet_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, exp_cfg.VideoClassificationTask)
self.assertIsInstance(config.task.model, assemblenet.AssembleNetModel)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.task.train_data.is_training = None
with self.assertRaises(KeyError):
config.validate()
def test_configs_conversion(self):
blocks = assemblenet.flat_lists_to_blocks(assemblenet.asn50_structure,
assemblenet.asn_structure_weights)
re_structure, re_weights = assemblenet.blocks_to_flat_lists(blocks)
self.assertAllEqual(
re_structure, assemblenet.asn50_structure, msg='asn50_structure')
self.assertAllEqual(
re_weights,
assemblenet.asn_structure_weights,
msg='asn_structure_weights')
if __name__ == '__main__':
tf.test.main()
| 2,049 | 39.196078 | 80 | py |
models | models-master/official/projects/assemblenet/configs/assemblenet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definitions for AssembleNet/++ structures.
This structure is a `list` corresponding to a graph representation of the
network, where a node is a convolutional block and an edge specifies a
connection from one block to another.
Each node itself (in the structure list) is a list with the following format:
[block_level, [list_of_input_blocks], number_filter, temporal_dilation,
spatial_stride]. [list_of_input_blocks] should be the list of node indexes whose
values are less than the index of the node itself. The 'stems' of the network
directly taking raw inputs follow a different node format:
[stem_type, temporal_dilation]. The stem_type is -1 for RGB stem and is -2 for
optical flow stem. The stem_type -3 is reserved for the object segmentation
input.
In AssembleNet++lite, instead of passing a single `int` for number_filter, we
pass a list/tuple of three `int`s. They specify the number of channels to be
used for each layer in the inverted bottleneck modules.
The structure_weights specify the learned connection weights.
"""
import dataclasses
from typing import List, Optional, Tuple
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.vision.configs import backbones_3d
from official.vision.configs import common
from official.vision.configs import video_classification
@dataclasses.dataclass
class BlockSpec(hyperparams.Config):
level: int = -1
input_blocks: Tuple[int, ...] = tuple()
num_filters: int = -1
temporal_dilation: int = 1
spatial_stride: int = 1
input_block_weight: Tuple[float, ...] = tuple()
def flat_lists_to_blocks(model_structures, model_edge_weights):
"""Transforms the raw list structure configs to BlockSpec tuple."""
blocks = []
for node, edge_weights in zip(model_structures, model_edge_weights):
if node[0] < 0:
block = BlockSpec(level=node[0], temporal_dilation=node[1])
else:
block = BlockSpec( # pytype: disable=wrong-arg-types
level=node[0],
input_blocks=node[1],
num_filters=node[2],
temporal_dilation=node[3],
spatial_stride=node[4])
if edge_weights:
assert len(edge_weights[0]) == len(block.input_blocks), (
f'{len(edge_weights[0])} != {len(block.input_blocks)} at block '
f'{block} weight {edge_weights}')
block.input_block_weight = tuple(edge_weights[0])
blocks.append(block)
return tuple(blocks)
def blocks_to_flat_lists(blocks: List[BlockSpec]):
"""Transforms BlockSpec tuple to the raw list structure configs."""
# pylint: disable=g-complex-comprehension
# pylint: disable=g-long-ternary
model_structure = [[
b.level,
list(b.input_blocks), b.num_filters, b.temporal_dilation,
b.spatial_stride, 0
] if b.level >= 0 else [b.level, b.temporal_dilation] for b in blocks]
model_edge_weights = [
[list(b.input_block_weight)] if b.input_block_weight else []
for b in blocks
]
return model_structure, model_edge_weights
# AssembleNet structure for 50/101 layer models, found using evolution with the
# Moments-in-Time dataset. This is the structure used for the experiments in the
# AssembleNet paper. The learned connectivity weights are also provided.
asn50_structure = [[-1, 4], [-1, 4], [-2, 1], [-2, 1], [0, [1], 32, 1, 1, 0],
[0, [0], 32, 4, 1, 0], [0, [0, 1, 2, 3], 32, 1, 1, 0],
[0, [2, 3], 32, 2, 1, 0], [1, [0, 4, 5, 6, 7], 64, 2, 2, 0],
[1, [0, 2, 4, 7], 64, 1, 2, 0], [1, [0, 5, 7], 64, 4, 2, 0],
[1, [0, 5], 64, 1, 2, 0], [2, [4, 8, 10, 11], 256, 1, 2, 0],
[2, [8, 9], 256, 4, 2, 0], [3, [12, 13], 512, 2, 2, 0]]
asn101_structure = [[-1, 4], [-1, 4], [-2, 1], [-2, 1], [0, [1], 32, 1, 1, 0],
[0, [0], 32, 4, 1, 0], [0, [0, 1, 2, 3], 32, 1, 1, 0],
[0, [2, 3], 32, 2, 1, 0], [1, [0, 4, 5, 6, 7], 64, 2, 2, 0],
[1, [0, 2, 4, 7], 64, 1, 2, 0], [1, [0, 5, 7], 64, 4, 2, 0],
[1, [0, 5], 64, 1, 2, 0], [2, [4, 8, 10, 11], 192, 1, 2, 0],
[2, [8, 9], 192, 4, 2, 0], [3, [12, 13], 256, 2, 2, 0]]
asn_structure_weights = [
[], [], [], [], [], [],
[[
0.13810564577579498, 0.8465337157249451, 0.3072969317436218,
0.2867436408996582
]], [[0.5846117734909058, 0.6066334843635559]],
[[
0.16382087767124176, 0.8852924704551697, 0.4039595425128937,
0.6823437809944153, 0.5331538319587708
]],
[[
0.028569204732775688, 0.10333596915006638, 0.7517264485359192,
0.9260114431381226
]], [[0.28832191228866577, 0.7627848982810974, 0.404977947473526]],
[[0.23474831879138947, 0.7841425538063049]],
[[
0.27616503834724426, 0.9514784812927246, 0.6568767428398132,
0.9547983407974243
]], [[0.5047007203102112, 0.8876819610595703]],
[[0.9892204403877258, 0.8454614877700806]]
]
# AssembleNet++ structure for 50 layer models, found with the Charades dataset.
# This is the model used in the experiments in the AssembleNet++ paper.
# Note that, in order the build AssembleNet++ with this structure, you also need
# to feed 'object segmentation input' to the network indicated as [-3, 4]. It's
# the 5th block in the architecture.
# If you don't plan to use the object input but want to still benefit from
# peer-attention in AssembleNet++ (with RGB and OF), please use the above
# AssembleNet-50 model instead with assemblenet_plus.py code.
full_asnp50_structure = [[-1, 2], [-1, 4], [-2, 2], [-2, 1], [-3, 4],
[0, [0, 1, 2, 3, 4], 32, 1, 1, 0],
[0, [0, 1, 4], 32, 4, 1, 0],
[0, [2, 3, 4], 32, 8, 1, 0],
[0, [2, 3, 4], 32, 1, 1, 0],
[1, [0, 1, 2, 4, 5, 6, 7, 8], 64, 4, 2, 0],
[1, [2, 3, 4, 7, 8], 64, 1, 2, 0],
[1, [0, 4, 5, 6, 7], 128, 8, 2, 0],
[2, [4, 11], 256, 8, 2, 0],
[2, [2, 3, 4, 5, 6, 7, 8, 10, 11], 256, 4, 2, 0],
[3, [12, 13], 512, 2, 2, 0]]
full_asnp_structure_weights = [[], [], [], [], [], [[0.6143830418586731, 0.7111759185791016, 0.19351491332054138, 0.1701001077890396, 0.7178536653518677]], [[0.5755624771118164, 0.5644599795341492, 0.7128658294677734]], [[0.26563042402267456, 0.3033692538738251, 0.8244096636772156]], [[0.07013848423957825, 0.07905343919992447, 0.8767927885055542]], [[0.5008697509765625, 0.5020178556442261, 0.49819135665893555, 0.5015180706977844, 0.4987695813179016, 0.4990265369415283, 0.499239057302475, 0.4974501430988312]], [[0.47034338116645813, 0.4694305658340454, 0.767791748046875, 0.5539310574531555, 0.4520096182823181]], [[0.2769702076911926, 0.8116549253463745, 0.597356915473938, 0.6585626602172852, 0.5915306210517883]], [[0.501274824142456, 0.5016682147979736]], [[0.0866393893957138, 0.08469288796186447, 0.9739039540290833, 0.058271341025829315, 0.08397126197814941, 0.10285478830337524, 0.18506969511508942, 0.23874442279338837, 0.9188644886016846]], [[0.4174623489379883, 0.5844835638999939]]] # pylint: disable=line-too-long
# AssembleNet++lite structure using inverted bottleneck blocks. By specifing
# the connection weights as [], the model could alos automatically learn the
# connection weights during its training.
asnp_lite_structure = [[-1, 1], [-2, 1],
[0, [0, 1], [27, 27, 12], 1, 2, 0],
[0, [0, 1], [27, 27, 12], 4, 2, 0],
[1, [0, 1, 2, 3], [54, 54, 24], 2, 2, 0],
[1, [0, 1, 2, 3], [54, 54, 24], 1, 2, 0],
[1, [0, 1, 2, 3], [54, 54, 24], 4, 2, 0],
[1, [0, 1, 2, 3], [54, 54, 24], 1, 2, 0],
[2, [0, 1, 2, 3, 4, 5, 6, 7], [152, 152, 68], 1, 2, 0],
[2, [0, 1, 2, 3, 4, 5, 6, 7], [152, 152, 68], 4, 2, 0],
[3, [2, 3, 4, 5, 6, 7, 8, 9], [432, 432, 192], 2, 2, 0]]
asnp_lite_structure_weights = [[], [], [[0.19914183020591736, 0.9278576374053955]], [[0.010816320776939392, 0.888792097568512]], [[0.9473835825920105, 0.6303419470787048, 0.1704932451248169, 0.05950307101011276]], [[0.9560931324958801, 0.7898273468017578, 0.36138781905174255, 0.07344610244035721]], [[0.9213919043540955, 0.13418640196323395, 0.8371981978416443, 0.07936054468154907]], [[0.9441559910774231, 0.9435100555419922, 0.7253988981246948, 0.13498817384243011]], [[0.9964852333068848, 0.8427878618240356, 0.8895476460456848, 0.11014710366725922, 0.6270533204078674, 0.44782018661499023, 0.61344975233078, 0.44898226857185364]], [[0.9970942735671997, 0.7105681896209717, 0.5078442096710205, 0.0951600968837738, 0.624282717704773, 0.8527252674102783, 0.8105692863464355, 0.7857823967933655]], [[0.6180334091186523, 0.11882413923740387, 0.06102970987558365, 0.04484326392412186, 0.05602221190929413, 0.052324872463941574, 0.9969874024391174, 0.9987731575965881]]] # pylint: disable=line-too-long
@dataclasses.dataclass
class AssembleNet(hyperparams.Config):
model_id: str = '50'
num_frames: int = 0
combine_method: str = 'sigmoid'
blocks: Tuple[BlockSpec, ...] = tuple()
@dataclasses.dataclass
class AssembleNetPlus(hyperparams.Config):
model_id: str = '50'
num_frames: int = 0
attention_mode: str = 'None'
blocks: Tuple[BlockSpec, ...] = tuple()
use_object_input: bool = False
@dataclasses.dataclass
class Backbone3D(backbones_3d.Backbone3D):
"""Configuration for backbones.
Attributes:
type: 'str', type of backbone be used, on the of fields below.
assemblenet: AssembleNet backbone config.
assemblenet_plus : AssembleNetPlus backbone config.
"""
type: Optional[str] = None
assemblenet: AssembleNet = dataclasses.field(default_factory=AssembleNet)
assemblenet_plus: AssembleNetPlus = dataclasses.field(
default_factory=AssembleNetPlus
)
@dataclasses.dataclass
class AssembleNetModel(video_classification.VideoClassificationModel):
"""The AssembleNet model config."""
model_type: str = 'assemblenet'
backbone: Backbone3D = dataclasses.field(
default_factory=lambda: Backbone3D(type='assemblenet')
)
norm_activation: common.NormActivation = dataclasses.field(
default_factory=lambda: common.NormActivation( # pylint: disable=g-long-lambda
norm_momentum=0.99, norm_epsilon=1e-5, use_sync_bn=True
)
)
max_pool_predictions: bool = False
@dataclasses.dataclass
class AssembleNetPlusModel(video_classification.VideoClassificationModel):
"""The AssembleNet model config."""
model_type: str = 'assemblenet_plus'
backbone: Backbone3D = dataclasses.field(
default_factory=lambda: Backbone3D(type='assemblenet_plus')
)
norm_activation: common.NormActivation = dataclasses.field(
default_factory=lambda: common.NormActivation( # pylint: disable=g-long-lambda
norm_momentum=0.99, norm_epsilon=1e-5, use_sync_bn=True
)
)
max_pool_predictions: bool = False
@exp_factory.register_config_factory('assemblenet50_kinetics600')
def assemblenet_kinetics600() -> cfg.ExperimentConfig:
"""Video classification on Videonet with assemblenet."""
exp = video_classification.video_classification_kinetics600()
feature_shape = (32, 224, 224, 3)
exp.task.train_data.global_batch_size = 1024
exp.task.validation_data.global_batch_size = 32
exp.task.train_data.feature_shape = feature_shape
exp.task.validation_data.feature_shape = (120, 224, 224, 3)
exp.task.train_data.dtype = 'bfloat16'
exp.task.validation_data.dtype = 'bfloat16'
model = AssembleNetModel()
model.backbone.assemblenet.model_id = '50'
model.backbone.assemblenet.blocks = flat_lists_to_blocks(
asn50_structure, asn_structure_weights)
model.backbone.assemblenet.num_frames = feature_shape[0]
exp.task.model = model
assert exp.task.model.backbone.assemblenet.num_frames > 0, (
f'backbone num_frames '
f'{exp.task.model.backbone.assemblenet}')
return exp
@exp_factory.register_config_factory('assemblenet_ucf101')
def assemblenet_ucf101() -> cfg.ExperimentConfig:
"""Video classification on Videonet with assemblenet."""
exp = video_classification.video_classification_ucf101()
exp.task.train_data.dtype = 'bfloat16'
exp.task.validation_data.dtype = 'bfloat16'
feature_shape = (32, 224, 224, 3)
model = AssembleNetModel()
model.backbone.assemblenet.blocks = flat_lists_to_blocks(
asn50_structure, asn_structure_weights)
model.backbone.assemblenet.num_frames = feature_shape[0]
exp.task.model = model
assert exp.task.model.backbone.assemblenet.num_frames > 0, (
f'backbone num_frames '
f'{exp.task.model.backbone.assemblenet}')
return exp
@exp_factory.register_config_factory('assemblenetplus_ucf101')
def assemblenetplus_ucf101() -> cfg.ExperimentConfig:
"""Video classification on Videonet with assemblenet."""
exp = video_classification.video_classification_ucf101()
exp.task.train_data.dtype = 'bfloat16'
exp.task.validation_data.dtype = 'bfloat16'
feature_shape = (32, 224, 224, 3)
model = AssembleNetPlusModel()
model.backbone.assemblenet_plus.blocks = flat_lists_to_blocks(
asn50_structure, asn_structure_weights)
model.backbone.assemblenet_plus.num_frames = feature_shape[0]
exp.task.model = model
assert exp.task.model.backbone.assemblenet_plus.num_frames > 0, (
f'backbone num_frames '
f'{exp.task.model.backbone.assemblenet_plus}')
return exp
| 14,207 | 46.518395 | 1,032 | py |
models | models-master/official/projects/assemblenet/modeling/assemblenet_plus.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions for the AssembleNet++ [2] models (without object input).
Requires the AssembleNet++ architecture to be specified in
FLAGS.model_structure (and optionally FLAGS.model_edge_weights). This is
identical to the form described in assemblenet.py for the AssembleNet. Please
check assemblenet.py for the detailed format of the model strings.
AssembleNet++ adds `peer-attention' to the basic AssembleNet, which allows each
conv. block connection to be conditioned differently based on another block [2].
It is a form of channel-wise attention. Note that we learn to apply attention
independently for each frame.
The `peer-attention' implementation in this file is the version that enables
one-shot differentiable search of attention connectivity (Fig. 2 in [2]), using
a softmax weighted summation of possible attention vectors.
[2] Michael S. Ryoo, AJ Piergiovanni, Juhana Kangaspunta, Anelia Angelova,
AssembleNet++: Assembling Modality Representations via Attention
Connections. ECCV 2020
https://arxiv.org/abs/2008.08072
In order to take advantage of object inputs, one will need to set the flag
FLAGS.use_object_input as True, and provide the list of input tensors as an
input to the network, as shown in run_asn_with_object.py. This will require a
pre-processed object data stream.
It uses (2+1)D convolutions for video representations. The main AssembleNet++
takes a 4-D (N*T)HWC tensor as an input (i.e., the batch dim and time dim are
mixed), and it reshapes a tensor to NT(H*W)C whenever a 1-D temporal conv. is
necessary. This is to run this on TPU efficiently.
"""
import functools
from typing import Any, Dict, List, Mapping, Optional
from absl import logging
import numpy as np
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.assemblenet.configs import assemblenet as cfg
from official.projects.assemblenet.modeling import assemblenet as asn
from official.projects.assemblenet.modeling import rep_flow_2d_layer as rf
from official.vision.modeling import factory_3d as model_factory
from official.vision.modeling.backbones import factory as backbone_factory
layers = tf.keras.layers
def softmax_merge_peer_attentions(peers):
"""Merge multiple peer-attention vectors with softmax weighted sum.
Summation weights are to be learned.
Args:
peers: A list of `Tensors` of size `[batch*time, channels]`.
Returns:
The output `Tensor` of size `[batch*time, channels].
"""
data_format = tf.keras.backend.image_data_format()
dtype = peers[0].dtype
assert data_format == 'channels_last'
initial_attn_weights = tf.keras.initializers.TruncatedNormal(stddev=0.01)(
[len(peers)])
attn_weights = tf.cast(tf.nn.softmax(initial_attn_weights), dtype)
weighted_peers = []
for i, peer in enumerate(peers):
weighted_peers.append(attn_weights[i] * peer)
return tf.add_n(weighted_peers)
def apply_attention(inputs,
attention_mode=None,
attention_in=None,
use_5d_mode=False):
"""Applies peer-attention or self-attention to the input tensor.
Depending on the attention_mode, this function either applies channel-wise
self-attention or peer-attention. For the peer-attention, the function
combines multiple candidate attention vectors (given as attention_in), by
learning softmax-sum weights described in the AssembleNet++ paper. Note that
the attention is applied individually for each frame, which showed better
accuracies than using video-level attention.
Args:
inputs: A `Tensor`. Either 4D or 5D, depending of use_5d_mode.
attention_mode: `str` specifying mode. If not `peer', does self-attention.
attention_in: A list of `Tensors' of size [batch*time, channels].
use_5d_mode: `bool` indicating whether the inputs are in 5D tensor or 4D.
Returns:
The output `Tensor` after concatenation.
"""
data_format = tf.keras.backend.image_data_format()
assert data_format == 'channels_last'
if use_5d_mode:
h_channel_loc = 2
else:
h_channel_loc = 1
if attention_mode == 'peer':
attn = softmax_merge_peer_attentions(attention_in)
else:
attn = tf.math.reduce_mean(inputs, [h_channel_loc, h_channel_loc + 1])
attn = tf.keras.layers.Dense(
units=inputs.shape[-1],
kernel_initializer=tf.random_normal_initializer(stddev=.01))(
inputs=attn)
attn = tf.math.sigmoid(attn)
channel_attn = tf.expand_dims(
tf.expand_dims(attn, h_channel_loc), h_channel_loc)
inputs = tf.math.multiply(inputs, channel_attn)
return inputs
class _ApplyEdgeWeight(layers.Layer):
"""Multiply weight on each input tensor.
A weight is assigned for each connection (i.e., each input tensor). This layer
is used by the fusion_with_peer_attention to compute the weighted inputs.
"""
def __init__(self,
weights_shape,
index: Optional[int] = None,
use_5d_mode: bool = False,
model_edge_weights: Optional[List[Any]] = None,
num_object_classes: Optional[int] = None,
**kwargs):
"""Constructor.
Args:
weights_shape: A list of intergers. Each element means number of edges.
index: `int` index of the block within the AssembleNet architecture. Used
for summation weight initial loading.
use_5d_mode: `bool` indicating whether the inputs are in 5D tensor or 4D.
model_edge_weights: AssembleNet++ model structure connection weights in
the string format.
num_object_classes: Assemblenet++ structure used object inputs so we
should use what dataset classes you might be use (e.g. ADE-20k 151
classes)
**kwargs: pass through arguments.
Returns:
The output `Tensor` after concatenation.
"""
super(_ApplyEdgeWeight, self).__init__(**kwargs)
self._weights_shape = weights_shape
self._index = index
self._use_5d_mode = use_5d_mode
self._model_edge_weights = model_edge_weights
self._num_object_classes = num_object_classes
data_format = tf.keras.backend.image_data_format()
assert data_format == 'channels_last'
def get_config(self):
config = {
'weights_shape': self._weights_shape,
'index': self._index,
'use_5d_mode': self._use_5d_mode,
'model_edge_weights': self._model_edge_weights,
'num_object_classes': self._num_object_classes
}
base_config = super(_ApplyEdgeWeight, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape: tf.TensorShape):
if self._weights_shape[0] == 1:
self._edge_weights = 1.0
return
if self._index is None or not self._model_edge_weights:
self._edge_weights = self.add_weight(
shape=self._weights_shape,
initializer=tf.keras.initializers.TruncatedNormal(
mean=0.0, stddev=0.01),
trainable=True,
name='agg_weights')
else:
initial_weights_after_sigmoid = np.asarray(
self._model_edge_weights[self._index][0]).astype('float32')
# Initial_weights_after_sigmoid is never 0, as the initial weights are
# based the results of a successful connectivity search.
initial_weights = -np.log(1. / initial_weights_after_sigmoid - 1.)
self._edge_weights = self.add_weight(
shape=self._weights_shape,
initializer=tf.constant_initializer(initial_weights),
trainable=False,
name='agg_weights')
def call(self,
inputs: List[tf.Tensor],
training: Optional[bool] = None) -> Mapping[Any, List[tf.Tensor]]:
use_5d_mode = self._use_5d_mode
dtype = inputs[0].dtype
assert len(inputs) > 1
if use_5d_mode:
h_channel_loc = 2
else:
h_channel_loc = 1
# get smallest spatial size and largest channels
sm_size = [10000, 10000]
lg_channel = 0
for inp in inputs:
# assume batch X height x width x channels
sm_size[0] = min(sm_size[0], inp.shape[h_channel_loc])
sm_size[1] = min(sm_size[1], inp.shape[h_channel_loc + 1])
# Note that, when using object inputs, object channel sizes are usually
# big. Since we do not want the object channel size to increase the number
# of parameters for every fusion, we exclude it when computing lg_channel.
if inp.shape[-1] > lg_channel and inp.shape[-1] != self._num_object_classes: # pylint: disable=line-too-long
lg_channel = inp.shape[3]
# loads or creates weight variables to fuse multiple inputs
weights = tf.math.sigmoid(tf.cast(self._edge_weights, dtype))
# Compute weighted inputs. We group inputs with the same channels.
per_channel_inps = dict({0: []})
for i, inp in enumerate(inputs):
if inp.shape[h_channel_loc] != sm_size[0] or inp.shape[h_channel_loc + 1] != sm_size[1]: # pylint: disable=line-too-long
assert sm_size[0] != 0
ratio = (inp.shape[h_channel_loc] + 1) // sm_size[0]
if use_5d_mode:
inp = tf.keras.layers.MaxPool3D([1, ratio, ratio], [1, ratio, ratio],
padding='same')(
inp)
else:
inp = tf.keras.layers.MaxPool2D([ratio, ratio], ratio,
padding='same')(
inp)
weights = tf.cast(weights, inp.dtype)
if inp.shape[-1] in per_channel_inps:
per_channel_inps[inp.shape[-1]].append(weights[i] * inp)
else:
per_channel_inps.update({inp.shape[-1]: [weights[i] * inp]})
return per_channel_inps
def fusion_with_peer_attention(inputs: List[tf.Tensor],
index: Optional[int] = None,
attention_mode: Optional[str] = None,
attention_in: Optional[List[tf.Tensor]] = None,
use_5d_mode: bool = False,
model_edge_weights: Optional[List[Any]] = None,
num_object_classes: Optional[int] = None):
"""Weighted summation of multiple tensors, while using peer-attention.
Summation weights are to be learned. Uses spatial max pooling and 1x1 conv.
to match their sizes. Before the summation, each connection (i.e., each input)
itself is scaled with channel-wise peer-attention. Notice that attention is
applied for each connection, conditioned based on attention_in.
Args:
inputs: A list of `Tensors`. Either 4D or 5D, depending of use_5d_mode.
index: `int` index of the block within the AssembleNet architecture. Used
for summation weight initial loading.
attention_mode: `str` specifying mode. If not `peer', does self-attention.
attention_in: A list of `Tensors' of size [batch*time, channels].
use_5d_mode: `bool` indicating whether the inputs are in 5D tensor or 4D.
model_edge_weights: AssembleNet model structure connection weights in the
string format.
num_object_classes: Assemblenet++ structure used object inputs so we should
use what dataset classes you might be use (e.g. ADE-20k 151 classes)
Returns:
The output `Tensor` after concatenation.
"""
if use_5d_mode:
h_channel_loc = 2
conv_function = asn.conv3d_same_padding
else:
h_channel_loc = 1
conv_function = asn.conv2d_fixed_padding
# If only 1 input.
if len(inputs) == 1:
inputs[0] = apply_attention(inputs[0], attention_mode, attention_in,
use_5d_mode)
return inputs[0]
# get smallest spatial size and largest channels
sm_size = [10000, 10000]
lg_channel = 0
for inp in inputs:
# assume batch X height x width x channels
sm_size[0] = min(sm_size[0], inp.shape[h_channel_loc])
sm_size[1] = min(sm_size[1], inp.shape[h_channel_loc + 1])
# Note that, when using object inputs, object channel sizes are usually big.
# Since we do not want the object channel size to increase the number of
# parameters for every fusion, we exclude it when computing lg_channel.
if inp.shape[-1] > lg_channel and inp.shape[-1] != num_object_classes: # pylint: disable=line-too-long
lg_channel = inp.shape[3]
per_channel_inps = _ApplyEdgeWeight(
weights_shape=[len(inputs)],
index=index,
use_5d_mode=use_5d_mode,
model_edge_weights=model_edge_weights)(
inputs)
# Implementation of connectivity with peer-attention
if attention_mode:
for key, channel_inps in per_channel_inps.items():
for idx in range(len(channel_inps)):
with tf.name_scope('Connection_' + str(key) + '_' + str(idx)):
channel_inps[idx] = apply_attention(channel_inps[idx], attention_mode,
attention_in, use_5d_mode)
# Adding 1x1 conv layers (to match channel size) and fusing all inputs.
# We add inputs with the same channels first before applying 1x1 conv to save
# memory.
inps = []
for key, channel_inps in per_channel_inps.items():
if len(channel_inps) < 1:
continue
if len(channel_inps) == 1:
if key == lg_channel:
inp = channel_inps[0]
else:
inp = conv_function(
channel_inps[0], lg_channel, kernel_size=1, strides=1)
inps.append(inp)
else:
if key == lg_channel:
inp = tf.add_n(channel_inps)
else:
inp = conv_function(
channel_inps[0], lg_channel, kernel_size=1, strides=1)
inps.append(inp)
return tf.add_n(inps)
def object_conv_stem(inputs):
"""Layers for an object input stem.
It expects its input tensor to have a separate channel for each object class.
Each channel should be specify each object class.
Args:
inputs: A `Tensor`.
Returns:
The output `Tensor`.
"""
inputs = tf.keras.layers.MaxPool2D(
pool_size=4, strides=4, padding='SAME')(
inputs=inputs)
inputs = tf.identity(inputs, 'initial_max_pool')
return inputs
class AssembleNetPlus(tf.keras.Model):
"""AssembleNet++ backbone."""
def __init__(self,
block_fn,
num_blocks: List[int],
num_frames: int,
model_structure: List[Any],
input_specs: layers.InputSpec = layers.InputSpec(
shape=[None, None, None, None, 3]),
model_edge_weights: Optional[List[Any]] = None,
use_object_input: bool = False,
attention_mode: str = 'peer',
bn_decay: float = rf.BATCH_NORM_DECAY,
bn_epsilon: float = rf.BATCH_NORM_EPSILON,
use_sync_bn: bool = False,
**kwargs):
"""Generator for AssembleNet++ models.
Args:
block_fn: `function` for the block to use within the model. Currently only
has `bottleneck_block_interleave as its option`.
num_blocks: list of 4 `int`s denoting the number of blocks to include in
each of the 4 block groups. Each group consists of blocks that take
inputs of the same resolution.
num_frames: the number of frames in the input tensor.
model_structure: AssembleNetPlus model structure in the string format.
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
Dimension should be `[batch*time, height, width, channels]`.
model_edge_weights: AssembleNet model structure connection weight in the
string format.
use_object_input : 'bool' values whether using object inputs
attention_mode : 'str' , default = 'self', If we use peer attention 'peer'
bn_decay: `float` batch norm decay parameter to use.
bn_epsilon: `float` batch norm epsilon parameter to use.
use_sync_bn: use synchronized batch norm for TPU.
**kwargs: pass through arguments.
Returns:
Model `function` that takes in `inputs` and `is_training` and returns the
output `Tensor` of the AssembleNetPlus model.
"""
data_format = tf.keras.backend.image_data_format()
# Creation of the model graph.
logging.info('model_structure=%r', model_structure)
logging.info('model_structure=%r', model_structure)
logging.info('model_edge_weights=%r', model_edge_weights)
structure = model_structure
if use_object_input:
original_inputs = tf.keras.Input(shape=input_specs[0].shape[1:])
object_inputs = tf.keras.Input(shape=input_specs[1].shape[1:])
input_specs = input_specs[0]
else:
original_inputs = tf.keras.Input(shape=input_specs.shape[1:])
object_inputs = None
original_num_frames = num_frames
assert num_frames > 0, f'Invalid num_frames {num_frames}'
grouping = {-3: [], -2: [], -1: [], 0: [], 1: [], 2: [], 3: []}
for i in range(len(structure)):
grouping[structure[i][0]].append(i)
stem_count = len(grouping[-3]) + len(grouping[-2]) + len(grouping[-1])
assert stem_count != 0
stem_filters = 128 // stem_count
if len(input_specs.shape) == 5:
first_dim = (
input_specs.shape[0] * input_specs.shape[1]
if input_specs.shape[0] and input_specs.shape[1] else -1)
reshape_inputs = tf.reshape(original_inputs,
(first_dim,) + input_specs.shape[2:])
elif len(input_specs.shape) == 4:
reshape_inputs = original_inputs
else:
raise ValueError(
f'Expect input spec to be 4 or 5 dimensions {input_specs.shape}')
if grouping[-2]:
# Instead of loading optical flows as inputs from data pipeline, we are
# applying the "Representation Flow" to RGB frames so that we can compute
# the flow within TPU/GPU on fly. It's essentially optical flow since we
# do it with RGBs.
axis = 3 if data_format == 'channels_last' else 1
flow_inputs = rf.RepresentationFlow(
original_num_frames,
depth=reshape_inputs.shape.as_list()[axis],
num_iter=40,
bottleneck=1)(
reshape_inputs)
streams = []
for i in range(len(structure)):
with tf.name_scope('Node_' + str(i)):
if structure[i][0] == -1:
inputs = asn.rgb_conv_stem(
reshape_inputs,
original_num_frames,
stem_filters,
temporal_dilation=structure[i][1],
bn_decay=bn_decay,
bn_epsilon=bn_epsilon,
use_sync_bn=use_sync_bn)
streams.append(inputs)
elif structure[i][0] == -2:
inputs = asn.flow_conv_stem(
flow_inputs,
stem_filters,
temporal_dilation=structure[i][1],
bn_decay=bn_decay,
bn_epsilon=bn_epsilon,
use_sync_bn=use_sync_bn)
streams.append(inputs)
elif structure[i][0] == -3:
# In order to use the object inputs, you need to feed your object
# input tensor here.
inputs = object_conv_stem(object_inputs)
streams.append(inputs)
else:
block_number = structure[i][0]
combined_inputs = [
streams[structure[i][1][j]]
for j in range(0, len(structure[i][1]))
]
logging.info(grouping)
nodes_below = []
for k in range(-3, structure[i][0]):
nodes_below = nodes_below + grouping[k]
peers = []
if attention_mode:
lg_channel = -1
# To show structures for attention we show nodes_below
logging.info(nodes_below)
for k in nodes_below:
logging.info(streams[k].shape)
lg_channel = max(streams[k].shape[3], lg_channel)
for node_index in nodes_below:
attn = tf.reduce_mean(streams[node_index], [1, 2])
attn = tf.keras.layers.Dense(
units=lg_channel,
kernel_initializer=tf.random_normal_initializer(stddev=.01))(
inputs=attn)
peers.append(attn)
combined_inputs = fusion_with_peer_attention(
combined_inputs,
index=i,
attention_mode=attention_mode,
attention_in=peers,
use_5d_mode=False)
graph = asn.block_group(
inputs=combined_inputs,
filters=structure[i][2],
block_fn=block_fn,
blocks=num_blocks[block_number],
strides=structure[i][4],
name='block_group' + str(i),
block_level=structure[i][0],
num_frames=num_frames,
temporal_dilation=structure[i][3])
streams.append(graph)
if use_object_input:
inputs = [original_inputs, object_inputs]
else:
inputs = original_inputs
super(AssembleNetPlus, self).__init__(
inputs=inputs, outputs=streams, **kwargs)
@tf.keras.utils.register_keras_serializable(package='Vision')
class AssembleNetPlusModel(tf.keras.Model):
"""An AssembleNet++ model builder."""
def __init__(self,
backbone,
num_classes,
num_frames: int,
model_structure: List[Any],
input_specs: Optional[Dict[str,
tf.keras.layers.InputSpec]] = None,
max_pool_predictions: bool = False,
use_object_input: bool = False,
**kwargs):
if not input_specs:
input_specs = {
'image': layers.InputSpec(shape=[None, None, None, None, 3])
}
if use_object_input and 'object' not in input_specs:
input_specs['object'] = layers.InputSpec(shape=[None, None, None, None])
self._self_setattr_tracking = False
self._config_dict = {
'backbone': backbone,
'num_classes': num_classes,
'num_frames': num_frames,
'input_specs': input_specs,
'model_structure': model_structure,
}
self._input_specs = input_specs
self._backbone = backbone
grouping = {-3: [], -2: [], -1: [], 0: [], 1: [], 2: [], 3: []}
for i in range(len(model_structure)):
grouping[model_structure[i][0]].append(i)
inputs = {
k: tf.keras.Input(shape=v.shape[1:]) for k, v in input_specs.items()
}
if use_object_input:
streams = self._backbone(inputs=[inputs['image'], inputs['object']])
else:
streams = self._backbone(inputs=inputs['image'])
outputs = asn.multi_stream_heads(
streams,
grouping[3],
num_frames,
num_classes,
max_pool_predictions=max_pool_predictions)
super(AssembleNetPlusModel, self).__init__(
inputs=inputs, outputs=outputs, **kwargs)
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
return dict(backbone=self.backbone)
@property
def backbone(self):
return self._backbone
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
def assemblenet_plus(assemblenet_depth: int,
num_classes: int,
num_frames: int,
model_structure: List[Any],
input_specs: layers.InputSpec = layers.InputSpec(
shape=[None, None, None, None, 3]),
model_edge_weights: Optional[List[Any]] = None,
use_object_input: bool = False,
attention_mode: Optional[str] = None,
max_pool_predictions: bool = False,
**kwargs):
"""Returns the AssembleNet++ model for a given size and number of output classes."""
data_format = tf.keras.backend.image_data_format()
assert data_format == 'channels_last'
if assemblenet_depth not in asn.ASSEMBLENET_SPECS:
raise ValueError('Not a valid assemblenet_depth:', assemblenet_depth)
if use_object_input:
# assuming input_specs = [vide, obj] when use_object_input = True
input_specs_dict = {'image': input_specs[0], 'object': input_specs[1]}
else:
input_specs_dict = {'image': input_specs}
params = asn.ASSEMBLENET_SPECS[assemblenet_depth]
backbone = AssembleNetPlus(
block_fn=params['block'],
num_blocks=params['num_blocks'],
num_frames=num_frames,
model_structure=model_structure,
input_specs=input_specs,
model_edge_weights=model_edge_weights,
use_object_input=use_object_input,
attention_mode=attention_mode,
**kwargs)
return AssembleNetPlusModel(
backbone,
num_classes=num_classes,
num_frames=num_frames,
model_structure=model_structure,
input_specs=input_specs_dict,
use_object_input=use_object_input,
max_pool_predictions=max_pool_predictions,
**kwargs)
@backbone_factory.register_backbone_builder('assemblenet_plus')
def build_assemblenet_plus(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None
) -> tf.keras.Model:
"""Builds assemblenet++ backbone."""
del l2_regularizer
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert backbone_type == 'assemblenet_plus'
assemblenet_depth = int(backbone_cfg.model_id)
if assemblenet_depth not in asn.ASSEMBLENET_SPECS:
raise ValueError('Not a valid assemblenet_depth:', assemblenet_depth)
model_structure, model_edge_weights = cfg.blocks_to_flat_lists(
backbone_cfg.blocks)
params = asn.ASSEMBLENET_SPECS[assemblenet_depth]
block_fn = functools.partial(
params['block'],
use_sync_bn=norm_activation_config.use_sync_bn,
bn_decay=norm_activation_config.norm_momentum,
bn_epsilon=norm_activation_config.norm_epsilon)
backbone = AssembleNetPlus(
block_fn=block_fn,
num_blocks=params['num_blocks'],
num_frames=backbone_cfg.num_frames,
model_structure=model_structure,
input_specs=input_specs,
model_edge_weights=model_edge_weights,
use_object_input=backbone_cfg.use_object_input,
attention_mode=backbone_cfg.attention_mode,
use_sync_bn=norm_activation_config.use_sync_bn,
bn_decay=norm_activation_config.norm_momentum,
bn_epsilon=norm_activation_config.norm_epsilon)
logging.info('Number of parameters in AssembleNet++ backbone: %f M.',
backbone.count_params() / 10.**6)
return backbone
@model_factory.register_model_builder('assemblenet_plus')
def build_assemblenet_plus_model(
input_specs: tf.keras.layers.InputSpec,
model_config: cfg.AssembleNetPlusModel,
num_classes: int,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None):
"""Builds assemblenet++ model."""
input_specs_dict = {'image': input_specs}
backbone = build_assemblenet_plus(input_specs, model_config.backbone,
model_config.norm_activation,
l2_regularizer)
backbone_cfg = model_config.backbone.get()
model_structure, _ = cfg.blocks_to_flat_lists(backbone_cfg.blocks)
model = AssembleNetPlusModel(
backbone,
num_classes=num_classes,
num_frames=backbone_cfg.num_frames,
model_structure=model_structure,
input_specs=input_specs_dict,
max_pool_predictions=model_config.max_pool_predictions,
use_object_input=backbone_cfg.use_object_input)
return model
| 28,838 | 37.400799 | 127 | py |
models | models-master/official/projects/assemblenet/modeling/assemblenet_plus_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for assemblenet++ network."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.assemblenet.configs import assemblenet as asn_config
from official.projects.assemblenet.modeling import assemblenet_plus as asnp
class AssembleNetPlusTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters((50, True, ''), (50, False, ''),
(50, False, 'peer'), (50, True, 'peer'),
(50, True, 'self'), (50, False, 'self'))
def test_network_creation(self, depth, use_object_input, attention_mode):
batch_size = 2
num_frames = 32
img_size = 64
num_classes = 101 # ufc-101
num_object_classes = 151 # 151 is for ADE-20k
if use_object_input:
vid_input = (batch_size * num_frames, img_size, img_size, 3)
obj_input = (batch_size * num_frames, img_size, img_size,
num_object_classes)
input_specs = (tf.keras.layers.InputSpec(shape=(vid_input)),
tf.keras.layers.InputSpec(shape=(obj_input)))
vid_inputs = np.random.rand(batch_size * num_frames, img_size, img_size,
3)
obj_inputs = np.random.rand(batch_size * num_frames, img_size, img_size,
num_object_classes)
inputs = [vid_inputs, obj_inputs]
# We are using the full_asnp50_structure, since we feed both video and
# object.
model_structure = asn_config.full_asnp50_structure # Uses object input.
edge_weights = asn_config.full_asnp_structure_weights
else:
# video input: (batch_size, FLAGS.num_frames, image_size, image_size, 3)
input_specs = tf.keras.layers.InputSpec(
shape=(batch_size, num_frames, img_size, img_size, 3))
inputs = np.random.rand(batch_size, num_frames, img_size, img_size, 3)
# Here, we are using model_structures.asn50_structure for AssembleNet++
# instead of full_asnp50_structure. By using asn50_structure, it
# essentially becomes AssembleNet++ without objects, only requiring RGB
# inputs (and optical flow to be computed inside the model).
model_structure = asn_config.asn50_structure
edge_weights = asn_config.asn_structure_weights
model = asnp.assemblenet_plus(
assemblenet_depth=depth,
num_classes=num_classes,
num_frames=num_frames,
model_structure=model_structure,
model_edge_weights=edge_weights,
input_specs=input_specs,
use_object_input=use_object_input,
attention_mode=attention_mode,
)
outputs = model(inputs)
self.assertAllEqual(outputs.shape.as_list(), [batch_size, num_classes])
if __name__ == '__main__':
tf.test.main()
| 3,389 | 39.843373 | 78 | py |
models | models-master/official/projects/assemblenet/modeling/rep_flow_2d_layer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions for 'Representation Flow' layer [1].
Representation flow layer is a generalization of optical flow extraction; the
layer could be inserted anywhere within a CNN to capture feature movements. This
is the version taking 4D tensor with the shape [batch*time, height, width,
channels], to make this run on TPU.
[1] AJ Piergiovanni and Michael S. Ryoo,
Representation Flow for Action Recognition. CVPR 2019.
"""
import numpy as np
import tensorflow as tf
layers = tf.keras.layers
BATCH_NORM_DECAY = 0.99
BATCH_NORM_EPSILON = 1e-5
def build_batch_norm(init_zero: bool = False,
bn_decay: float = BATCH_NORM_DECAY,
bn_epsilon: float = BATCH_NORM_EPSILON,
use_sync_bn: bool = False):
"""Performs a batch normalization followed by a ReLU.
Args:
init_zero: `bool` if True, initializes scale parameter of batch
normalization with 0 instead of 1 (default).
bn_decay: `float` batch norm decay parameter to use.
bn_epsilon: `float` batch norm epsilon parameter to use.
use_sync_bn: use synchronized batch norm for TPU.
Returns:
A normalized `Tensor` with the same `data_format`.
"""
if init_zero:
gamma_initializer = tf.zeros_initializer()
else:
gamma_initializer = tf.ones_initializer()
data_format = tf.keras.backend.image_data_format()
assert data_format == 'channels_last'
if data_format == 'channels_first':
axis = 1
else:
axis = -1
if use_sync_bn:
batch_norm = layers.experimental.SyncBatchNormalization(
axis=axis,
momentum=bn_decay,
epsilon=bn_epsilon,
gamma_initializer=gamma_initializer)
else:
batch_norm = layers.BatchNormalization(
axis=axis,
momentum=bn_decay,
epsilon=bn_epsilon,
fused=True,
gamma_initializer=gamma_initializer)
return batch_norm
def divergence(p1, p2, f_grad_x, f_grad_y, name):
"""Computes the divergence value used with TV-L1 optical flow algorithm.
Args:
p1: 'Tensor' input.
p2: 'Tensor' input in the next frame.
f_grad_x: 'Tensor' x gradient of F value used in TV-L1.
f_grad_y: 'Tensor' y gradient of F value used in TV-L1.
name: 'str' name for the variable scope.
Returns:
A `Tensor` with the same `data_format` and shape as input.
"""
data_format = tf.keras.backend.image_data_format()
df = 'NHWC' if data_format == 'channels_last' else 'NCHW'
with tf.name_scope('divergence_' + name):
if data_format == 'channels_last':
p1 = tf.pad(p1[:, :, :-1, :], [[0, 0], [0, 0], [1, 0], [0, 0]])
p2 = tf.pad(p2[:, :-1, :, :], [[0, 0], [1, 0], [0, 0], [0, 0]])
else:
p1 = tf.pad(p1[:, :, :, :-1], [[0, 0], [0, 0], [0, 0], [1, 0]])
p2 = tf.pad(p2[:, :, :-1, :], [[0, 0], [0, 0], [1, 0], [0, 0]])
grad_x = tf.nn.conv2d(p1, f_grad_x, [1, 1, 1, 1], 'SAME', data_format=df)
grad_y = tf.nn.conv2d(p2, f_grad_y, [1, 1, 1, 1], 'SAME', data_format=df)
return grad_x + grad_y
def forward_grad(x, f_grad_x, f_grad_y, name):
data_format = tf.keras.backend.image_data_format()
with tf.name_scope('forward_grad_' + name):
df = 'NHWC' if data_format == 'channels_last' else 'NCHW'
grad_x = tf.nn.conv2d(x, f_grad_x, [1, 1, 1, 1], 'SAME', data_format=df)
grad_y = tf.nn.conv2d(x, f_grad_y, [1, 1, 1, 1], 'SAME', data_format=df)
return grad_x, grad_y
def norm_img(x):
mx = tf.reduce_max(x)
mn = tf.reduce_min(x)
if mx == mn:
return x
else:
return 255 * (x - mn) / (mx - mn)
class RepresentationFlow(layers.Layer):
"""Computes the representation flow motivated by TV-L1 optical flow."""
def __init__(self,
time: int,
depth: int,
num_iter: int = 20,
bottleneck: int = 32,
train_feature_grad: bool = False,
train_divergence: bool = False,
train_flow_grad: bool = False,
train_hyper: bool = False,
**kwargs):
"""Constructor.
Args:
time: 'int' number of frames in the input tensor.
depth: channel depth of the input tensor.
num_iter: 'int' number of iterations to use for the flow computation.
bottleneck: 'int' number of filters to be used for the flow computation.
train_feature_grad: Train image grad params.
train_divergence: train divergence params
train_flow_grad: train flow grad params.
train_hyper: train rep flow hyperparams.
**kwargs: keyword arguments to be passed to the parent constructor.
Returns:
A `Tensor` with the same `data_format` and shape as input.
"""
super(RepresentationFlow, self).__init__(**kwargs)
self._time = time
self._depth = depth
self._num_iter = num_iter
self._bottleneck = bottleneck
self._train_feature_grad = train_feature_grad
self._train_divergence = train_divergence
self._train_flow_grad = train_flow_grad
self._train_hyper = train_hyper
def get_config(self):
config = {
'time': self._time,
'num_iter': self._num_iter,
'bottleneck': self._bottleneck,
'train_feature_grad': self._train_feature_grad,
'train_divergence': self._train_divergence,
'train_flow_grad': self._train_flow_grad,
'train_hyper': self._train_hyper,
}
base_config = super(RepresentationFlow, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape: tf.TensorShape):
img_grad = np.array([-0.5, 0, 0.5], dtype='float32')
img_grad_x = np.repeat(
np.reshape(img_grad, (1, 3, 1, 1)), self._bottleneck, axis=2) * np.eye(
self._bottleneck, dtype='float32')
self.img_grad_x = self.add_weight(
shape=img_grad_x.shape,
initializer=tf.constant_initializer(img_grad_x),
trainable=self._train_feature_grad,
name='img_grad_x')
img_grad_y = np.repeat(
np.reshape(img_grad, (3, 1, 1, 1)), self._bottleneck, axis=2) * np.eye(
self._bottleneck, dtype='float32')
self.img_grad_y = self.add_weight(
shape=img_grad_y.shape,
initializer=tf.constant_initializer(img_grad_y),
trainable=self._train_feature_grad,
name='img_grad_y')
f_grad = np.array([-1, 1], dtype='float32')
f_grad_x = np.repeat(
np.reshape(f_grad, (1, 2, 1, 1)), self._bottleneck, axis=2) * np.eye(
self._bottleneck, dtype='float32')
self.f_grad_x = self.add_weight(
shape=f_grad_x.shape,
initializer=tf.constant_initializer(f_grad_x),
trainable=self._train_divergence,
name='f_grad_x')
f_grad_y = np.repeat(
np.reshape(f_grad, (2, 1, 1, 1)), self._bottleneck, axis=2) * np.eye(
self._bottleneck, dtype='float32')
self.f_grad_y = self.add_weight(
shape=f_grad_y.shape,
initializer=tf.constant_initializer(f_grad_y),
trainable=self._train_divergence,
name='f_grad_y')
f_grad_x2 = np.repeat(
np.reshape(f_grad, (1, 2, 1, 1)), self._bottleneck, axis=2) * np.eye(
self._bottleneck, dtype='float32')
self.f_grad_x2 = self.add_weight(
shape=f_grad_x2.shape,
initializer=tf.constant_initializer(f_grad_x2),
trainable=self._train_flow_grad,
name='f_grad_x2')
f_grad_y2 = np.repeat(
np.reshape(f_grad, (2, 1, 1, 1)), self._bottleneck, axis=2) * np.eye(
self._bottleneck, dtype='float32')
self.f_grad_y2 = self.add_weight(
shape=f_grad_y2.shape,
initializer=tf.constant_initializer(f_grad_y2),
trainable=self._train_flow_grad,
name='f_grad_y2')
self.t = self.add_weight(
name='theta',
initializer=tf.constant_initializer(0.3),
trainable=self._train_hyper)
self.l = self.add_weight(
name='lambda',
initializer=tf.constant_initializer(0.15),
trainable=self._train_hyper)
self.a = self.add_weight(
name='tau',
initializer=tf.constant_initializer(0.25),
trainable=self._train_hyper)
self.t = tf.abs(self.t) + 1e-12
self.l_t = self.l * self.t
self.taut = self.a / self.t
self._bottleneck_conv2 = None
self._bottleneck_conv2 = None
if self._bottleneck > 1:
self._bottleneck_conv1 = layers.Conv2D(
filters=self._bottleneck,
kernel_size=1,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=tf.keras.initializers.VarianceScaling(),
name='rf/bottleneck1')
self._bottleneck_conv2 = layers.Conv2D(
filters=self._depth,
kernel_size=1,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=tf.keras.initializers.VarianceScaling(),
name='rf/bottleneck2')
self._batch_norm = build_batch_norm(init_zero=True)
def call(self, inputs: tf.Tensor, training: bool = None) -> tf.Tensor:
"""Perform representation flows.
Args:
inputs: list of `Tensors` of shape `[batch*time, height, width,
channels]`.
training: True for training phase.
Returns:
A tensor of the same shape as the inputs.
"""
data_format = tf.keras.backend.image_data_format()
df = 'NHWC' if data_format == 'channels_last' else 'NCHW'
axis = 3 if data_format == 'channels_last' else 1 # channel axis
dtype = inputs.dtype
residual = inputs
depth = inputs.shape.as_list()[axis]
# assert depth == self._depth, f'rep_flow {depth} != {self._depth}'
if self._bottleneck == 1:
inputs = tf.reduce_mean(inputs, axis=axis)
inputs = tf.expand_dims(inputs, -1)
elif depth != self._bottleneck:
inputs = self._bottleneck_conv1(inputs)
input_shape = inputs.shape.as_list()
inp = norm_img(inputs)
inp = tf.reshape(
inp,
(-1, self._time, inputs.shape[1], inputs.shape[2], inputs.shape[3]))
inp = tf.ensure_shape(
inp, (None, self._time, input_shape[1], input_shape[2], input_shape[3]))
img1 = tf.reshape(
inp[:, :-1], (-1, tf.shape(inp)[2], tf.shape(inp)[3], tf.shape(inp)[4]))
img2 = tf.reshape(
inp[:, 1:], (-1, tf.shape(inp)[2], tf.shape(inp)[3], tf.shape(inp)[4]))
img1 = tf.ensure_shape(
img1, (None, inputs.shape[1], inputs.shape[2], inputs.shape[3]))
img2 = tf.ensure_shape(
img2, (None, inputs.shape[1], inputs.shape[2], inputs.shape[3]))
u1 = tf.zeros_like(img1, dtype=dtype)
u2 = tf.zeros_like(img2, dtype=dtype)
l_t = self.l_t
taut = self.taut
grad2_x = tf.nn.conv2d(
img2, self.img_grad_x, [1, 1, 1, 1], 'SAME', data_format=df)
grad2_y = tf.nn.conv2d(
img2, self.img_grad_y, [1, 1, 1, 1], 'SAME', data_format=df)
p11 = tf.zeros_like(img1, dtype=dtype)
p12 = tf.zeros_like(img1, dtype=dtype)
p21 = tf.zeros_like(img1, dtype=dtype)
p22 = tf.zeros_like(img1, dtype=dtype)
gsqx = grad2_x**2
gsqy = grad2_y**2
grad = gsqx + gsqy + 1e-12
rho_c = img2 - grad2_x * u1 - grad2_y * u2 - img1
for _ in range(self._num_iter):
rho = rho_c + grad2_x * u1 + grad2_y * u2 + 1e-12
v1 = tf.zeros_like(img1, dtype=dtype)
v2 = tf.zeros_like(img2, dtype=dtype)
mask1 = rho < -l_t * grad
tmp11 = tf.where(mask1, l_t * grad2_x,
tf.zeros_like(grad2_x, dtype=dtype))
tmp12 = tf.where(mask1, l_t * grad2_y,
tf.zeros_like(grad2_y, dtype=dtype))
mask2 = rho > l_t * grad
tmp21 = tf.where(mask2, -l_t * grad2_x,
tf.zeros_like(grad2_x, dtype=dtype))
tmp22 = tf.where(mask2, -l_t * grad2_y,
tf.zeros_like(grad2_y, dtype=dtype))
mask3 = (~mask1) & (~mask2) & (grad > 1e-12)
tmp31 = tf.where(mask3, (-rho / grad) * grad2_x,
tf.zeros_like(grad2_x, dtype=dtype))
tmp32 = tf.where(mask3, (-rho / grad) * grad2_y,
tf.zeros_like(grad2_y, dtype=dtype))
v1 = tmp11 + tmp21 + tmp31 + u1
v2 = tmp12 + tmp22 + tmp32 + u2
u1 = v1 + self.t * divergence(p11, p12, self.f_grad_x, self.f_grad_y,
'div_p1')
u2 = v2 + self.t * divergence(p21, p22, self.f_grad_x, self.f_grad_y,
'div_p2')
u1x, u1y = forward_grad(u1, self.f_grad_x2, self.f_grad_y2, 'u1')
u2x, u2y = forward_grad(u2, self.f_grad_x2, self.f_grad_y2, 'u2')
p11 = (p11 + taut * u1x) / (1. + taut * tf.sqrt(u1x**2 + u1y**2 + 1e-12))
p12 = (p12 + taut * u1y) / (1. + taut * tf.sqrt(u1x**2 + u1y**2 + 1e-12))
p21 = (p21 + taut * u2x) / (1. + taut * tf.sqrt(u2x**2 + u2y**2 + 1e-12))
p22 = (p22 + taut * u2y) / (1. + taut * tf.sqrt(u2x**2 + u2y**2 + 1e-12))
u1 = tf.reshape(u1, (-1, self._time - 1, tf.shape(u1)[1],
tf.shape(u1)[2], tf.shape(u1)[3]))
u2 = tf.reshape(u2, (-1, self._time - 1, tf.shape(u2)[1],
tf.shape(u2)[2], tf.shape(u2)[3]))
flow = tf.concat([u1, u2], axis=axis + 1)
flow = tf.concat([
flow,
tf.reshape(
flow[:, -1, :, :, :],
(-1, 1, tf.shape(u1)[2], tf.shape(u1)[3], tf.shape(u1)[4] * 2))
],
axis=1)
# padding: [bs, 1, w, h, 2*c] -> [bs, 1, w, h, 2*c]
# flow is [bs, t, w, h, 2*c]
flow = tf.reshape(
flow, (-1, tf.shape(u1)[2], tf.shape(u2)[3], tf.shape(u1)[4] * 2))
# folwo is [bs*t, w, h, 2*c]
if self._bottleneck == 1:
output_shape = residual.shape.as_list()
output_shape[-1] = self._bottleneck * 2
flow = tf.ensure_shape(flow, output_shape)
return flow
else:
flow = self._bottleneck_conv2(flow)
flow = self._batch_norm(flow)
flow = tf.ensure_shape(flow, residual.shape)
return tf.nn.relu(flow + residual)
| 14,584 | 35.012346 | 80 | py |
models | models-master/official/projects/assemblenet/modeling/assemblenet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions for the AssembleNet [1] models.
Requires the AssembleNet architecture to be specified in
FLAGS.model_structure (and optionally FLAGS.model_edge_weights).
This structure is a list corresponding to a graph representation of the
network, where a node is a convolutional block and an edge specifies a
connection from one block to another as described in [1].
Each node itself (in the structure list) is a list with the following format:
[block_level, [list_of_input_blocks], number_filter, temporal_dilation,
spatial_stride]. [list_of_input_blocks] should be the list of node indexes whose
values are less than the index of the node itself. The 'stems' of the network
directly taking raw inputs follow a different node format:
[stem_type, temporal_dilation]. The stem_type is -1 for RGB stem and is -2 for
optical flow stem.
Also note that the codes in this file could be used for one-shot differentiable
connection search by (1) giving an overly connected structure as
FLAGS.model_structure and by (2) setting FLAGS.model_edge_weights to be '[]'.
The 'agg_weights' variables will specify which connections are needed and which
are not, once trained.
[1] Michael S. Ryoo, AJ Piergiovanni, Mingxing Tan, Anelia Angelova,
AssembleNet: Searching for Multi-Stream Neural Connectivity in Video
Architectures. ICLR 2020
https://arxiv.org/abs/1905.13209
It uses (2+1)D convolutions for video representations. The main AssembleNet
takes a 4-D (N*T)HWC tensor as an input (i.e., the batch dim and time dim are
mixed), and it reshapes a tensor to NT(H*W)C whenever a 1-D temporal conv. is
necessary. This is to run this on TPU efficiently.
"""
import functools
import math
from typing import Any, Callable, List, Mapping, Optional
from absl import logging
import numpy as np
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.assemblenet.configs import assemblenet as cfg
from official.projects.assemblenet.modeling import rep_flow_2d_layer as rf
from official.vision.modeling import factory_3d as model_factory
from official.vision.modeling.backbones import factory as backbone_factory
layers = tf.keras.layers
intermediate_channel_size = [64, 128, 256, 512]
def fixed_padding(inputs, kernel_size):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]` or `[batch,
height, width, channels]` depending on `data_format`.
kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d`
operations. Should be a positive integer.
Returns:
A padded `Tensor` of the same `data_format` with size either intact
(if `kernel_size == 1`) or padded (if `kernel_size > 1`).
"""
data_format = tf.keras.backend.image_data_format()
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(
inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(
inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return padded_inputs
def reshape_temporal_conv1d_bn(inputs: tf.Tensor,
filters: int,
kernel_size: int,
num_frames: int = 32,
temporal_dilation: int = 1,
bn_decay: float = rf.BATCH_NORM_DECAY,
bn_epsilon: float = rf.BATCH_NORM_EPSILON,
use_sync_bn: bool = False):
"""Performs 1D temporal conv.
followed by batch normalization with reshaping.
Args:
inputs: `Tensor` of size `[batch*time, height, width, channels]`. Only
supports 'channels_last' as the data format.
filters: `int` number of filters in the convolution.
kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d`
operations. Should be a positive integer.
num_frames: `int` number of frames in the input tensor.
temporal_dilation: `int` temporal dilatioin size for the 1D conv.
bn_decay: `float` batch norm decay parameter to use.
bn_epsilon: `float` batch norm epsilon parameter to use.
use_sync_bn: use synchronized batch norm for TPU.
Returns:
A padded `Tensor` of the same `data_format` with size either intact
(if `kernel_size == 1`) or padded (if `kernel_size > 1`).
"""
data_format = tf.keras.backend.image_data_format()
assert data_format == 'channels_last'
feature_shape = inputs.shape
inputs = tf.reshape(
inputs,
[-1, num_frames, feature_shape[1] * feature_shape[2], feature_shape[3]])
if temporal_dilation == 1:
inputs = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=(kernel_size, 1),
strides=1,
padding='SAME',
use_bias=False,
kernel_initializer=tf.keras.initializers.VarianceScaling())(
inputs=inputs)
else:
inputs = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=(kernel_size, 1),
strides=1,
padding='SAME',
dilation_rate=(temporal_dilation, 1),
use_bias=False,
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=math.sqrt(2.0 / (kernel_size * feature_shape[3]))))(
inputs=inputs)
num_channel = inputs.shape[3]
inputs = tf.reshape(inputs,
[-1, feature_shape[1], feature_shape[2], num_channel])
inputs = rf.build_batch_norm(
bn_decay=bn_decay, bn_epsilon=bn_epsilon, use_sync_bn=use_sync_bn)(
inputs)
inputs = tf.nn.relu(inputs)
return inputs
def conv2d_fixed_padding(inputs: tf.Tensor, filters: int, kernel_size: int,
strides: int):
"""Strided 2-D convolution with explicit padding.
The padding is consistent and is based only on `kernel_size`, not on the
dimensions of `inputs` (as opposed to using `tf.keras.layers.Conv2D` alone).
Args:
inputs: `Tensor` of size `[batch, channels, height_in, width_in]`.
filters: `int` number of filters in the convolution.
kernel_size: `int` size of the kernel to be used in the convolution.
strides: `int` strides of the convolution.
Returns:
A `Tensor` of shape `[batch, filters, height_out, width_out]`.
"""
if strides > 1:
inputs = fixed_padding(inputs, kernel_size)
return tf.keras.layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=('SAME' if strides == 1 else 'VALID'),
use_bias=False,
kernel_initializer=tf.keras.initializers.VarianceScaling())(
inputs=inputs)
def conv3d_same_padding(inputs: tf.Tensor,
filters: int,
kernel_size: int,
strides: int,
temporal_dilation: int = 1,
do_2d_conv: bool = False):
"""3D convolution layer wrapper.
Uses conv3d function.
Args:
inputs: 5D `Tensor` following the data_format.
filters: `int` number of filters in the convolution.
kernel_size: `int` size of the kernel to be used in the convolution.
strides: `int` strides of the convolution.
temporal_dilation: `int` temporal dilatioin size for the 1D conv.
do_2d_conv: `bool` indicating whether to do 2d conv. If false, do 3D conv.
Returns:
A `Tensor` of shape `[batch, time_in, height_in, width_in, channels]`.
"""
if isinstance(kernel_size, int):
if do_2d_conv:
kernel_size = [1, kernel_size, kernel_size]
else:
kernel_size = [kernel_size, kernel_size, kernel_size]
return tf.keras.layers.Conv3D(
filters=filters,
kernel_size=kernel_size,
strides=[1, strides, strides],
padding='SAME',
dilation_rate=[temporal_dilation, 1, 1],
use_bias=False,
kernel_initializer=tf.keras.initializers.VarianceScaling())(
inputs=inputs)
def bottleneck_block_interleave(inputs: tf.Tensor,
filters: int,
inter_filters: int,
strides: int,
use_projection: bool = False,
num_frames: int = 32,
temporal_dilation: int = 1,
bn_decay: float = rf.BATCH_NORM_DECAY,
bn_epsilon: float = rf.BATCH_NORM_EPSILON,
use_sync_bn: bool = False,
step=1):
"""Interleaves a standard 2D residual module and (2+1)D residual module.
Bottleneck block variant for residual networks with BN after convolutions.
Args:
inputs: `Tensor` of size `[batch*time, channels, height, width]`.
filters: `int` number of filters for the first conv. layer. The last conv.
layer will use 4 times as many filters.
inter_filters: `int` number of filters for the second conv. layer.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input spatially.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
num_frames: `int` number of frames in the input tensor.
temporal_dilation: `int` temporal dilatioin size for the 1D conv.
bn_decay: `float` batch norm decay parameter to use.
bn_epsilon: `float` batch norm epsilon parameter to use.
use_sync_bn: use synchronized batch norm for TPU.
step: `int` to decide whether to put 2D module or (2+1)D module.
Returns:
The output `Tensor` of the block.
"""
if strides > 1 and not use_projection:
raise ValueError('strides > 1 requires use_projections=True, otherwise the '
'inputs and shortcut will have shape mismatch')
shortcut = inputs
if use_projection:
# Projection shortcut only in first block within a group. Bottleneck blocks
# end with 4 times the number of filters.
filters_out = 4 * filters
shortcut = conv2d_fixed_padding(
inputs=inputs, filters=filters_out, kernel_size=1, strides=strides)
shortcut = rf.build_batch_norm(
bn_decay=bn_decay, bn_epsilon=bn_epsilon, use_sync_bn=use_sync_bn)(
shortcut)
if step % 2 == 1:
k = 3
inputs = reshape_temporal_conv1d_bn(
inputs=inputs,
filters=filters,
kernel_size=k,
num_frames=num_frames,
temporal_dilation=temporal_dilation,
bn_decay=bn_decay,
bn_epsilon=bn_epsilon,
use_sync_bn=use_sync_bn)
else:
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=1, strides=1)
inputs = rf.build_batch_norm(
bn_decay=bn_decay, bn_epsilon=bn_epsilon, use_sync_bn=use_sync_bn)(
inputs)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=inter_filters, kernel_size=3, strides=strides)
inputs = rf.build_batch_norm(
bn_decay=bn_decay, bn_epsilon=bn_epsilon, use_sync_bn=use_sync_bn)(
inputs)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=4 * filters, kernel_size=1, strides=1)
inputs = rf.build_batch_norm(
init_zero=True,
bn_decay=bn_decay,
bn_epsilon=bn_epsilon,
use_sync_bn=use_sync_bn)(
inputs)
return tf.nn.relu(inputs + shortcut)
def block_group(inputs: tf.Tensor,
filters: int,
block_fn: Callable[..., tf.Tensor],
blocks: int,
strides: int,
name,
block_level,
num_frames=32,
temporal_dilation=1):
"""Creates one group of blocks for the AssembleNett model.
Args:
inputs: `Tensor` of size `[batch*time, channels, height, width]`.
filters: `int` number of filters for the first convolution of the layer.
block_fn: `function` for the block to use within the model
blocks: `int` number of blocks contained in the layer.
strides: `int` stride to use for the first convolution of the layer. If
greater than 1, this layer will downsample the input.
name: `str` name for the Tensor output of the block layer.
block_level: `int` block level in AssembleNet.
num_frames: `int` number of frames in the input tensor.
temporal_dilation: `int` temporal dilatioin size for the 1D conv.
Returns:
The output `Tensor` of the block layer.
"""
# Only the first block per block_group uses projection shortcut and strides.
inputs = block_fn(
inputs,
filters,
intermediate_channel_size[block_level],
strides,
use_projection=True,
num_frames=num_frames,
temporal_dilation=temporal_dilation,
step=0)
for i in range(1, blocks):
inputs = block_fn(
inputs,
filters,
intermediate_channel_size[block_level],
1,
num_frames=num_frames,
temporal_dilation=temporal_dilation,
step=i)
return tf.identity(inputs, name)
def spatial_resize_and_concat(inputs):
"""Concatenates multiple different sized tensors channel-wise.
Args:
inputs: A list of `Tensors` of size `[batch*time, channels, height, width]`.
Returns:
The output `Tensor` after concatenation.
"""
data_format = tf.keras.backend.image_data_format()
assert data_format == 'channels_last'
# Do nothing if only 1 input
if len(inputs) == 1:
return inputs[0]
if data_format != 'channels_last':
return inputs
# get smallest spatial size and largest channels
sm_size = [1000, 1000]
for inp in inputs:
# assume batch X height x width x channels
sm_size[0] = min(sm_size[0], inp.shape[1])
sm_size[1] = min(sm_size[1], inp.shape[2])
for i in range(len(inputs)):
if inputs[i].shape[1] != sm_size[0] or inputs[i].shape[2] != sm_size[1]:
ratio = (inputs[i].shape[1] + 1) // sm_size[0]
inputs[i] = tf.keras.layers.MaxPool2D([ratio, ratio],
ratio,
padding='same')(
inputs[i])
return tf.concat(inputs, 3)
class _ApplyEdgeWeight(layers.Layer):
"""Multiply weight on each input tensor.
A weight is assigned for each connection (i.e., each input tensor). This layer
is used by the multi_connection_fusion to compute the weighted inputs.
"""
def __init__(self,
weights_shape,
index: Optional[int] = None,
use_5d_mode: bool = False,
model_edge_weights: Optional[List[Any]] = None,
**kwargs):
"""Constructor.
Args:
weights_shape: shape of the weights. Should equals to [len(inputs)].
index: `int` index of the block within the AssembleNet architecture. Used
for summation weight initial loading.
use_5d_mode: `bool` indicating whether the inputs are in 5D tensor or 4D.
model_edge_weights: AssembleNet model structure connection weights in the
string format.
**kwargs: pass through arguments.
"""
super(_ApplyEdgeWeight, self).__init__(**kwargs)
self._weights_shape = weights_shape
self._index = index
self._use_5d_mode = use_5d_mode
self._model_edge_weights = model_edge_weights
data_format = tf.keras.backend.image_data_format()
assert data_format == 'channels_last'
def get_config(self):
config = {
'weights_shape': self._weights_shape,
'index': self._index,
'use_5d_mode': self._use_5d_mode,
'model_edge_weights': self._model_edge_weights,
}
base_config = super(_ApplyEdgeWeight, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape: tf.TensorShape):
if self._weights_shape[0] == 1:
self._edge_weights = 1.0
return
if self._index is None or not self._model_edge_weights:
self._edge_weights = self.add_weight(
shape=self._weights_shape,
initializer=tf.keras.initializers.TruncatedNormal(
mean=0.0, stddev=0.01),
trainable=True,
name='agg_weights')
else:
initial_weights_after_sigmoid = np.asarray(
self._model_edge_weights[self._index][0]).astype('float32')
# Initial_weights_after_sigmoid is never 0, as the initial weights are
# based the results of a successful connectivity search.
initial_weights = -np.log(1. / initial_weights_after_sigmoid - 1.)
self._edge_weights = self.add_weight(
shape=self._weights_shape,
initializer=tf.constant_initializer(initial_weights),
trainable=False,
name='agg_weights')
def call(self,
inputs: List[tf.Tensor],
training: Optional[bool] = None) -> Mapping[Any, List[tf.Tensor]]:
use_5d_mode = self._use_5d_mode
dtype = inputs[0].dtype
assert len(inputs) > 1
if use_5d_mode:
h_channel_loc = 2
else:
h_channel_loc = 1
# get smallest spatial size and largest channels
sm_size = [10000, 10000]
lg_channel = 0
for inp in inputs:
# assume batch X height x width x channels
sm_size[0] = min(sm_size[0], inp.shape[h_channel_loc])
sm_size[1] = min(sm_size[1], inp.shape[h_channel_loc + 1])
lg_channel = max(lg_channel, inp.shape[-1])
# loads or creates weight variables to fuse multiple inputs
weights = tf.math.sigmoid(tf.cast(self._edge_weights, dtype))
# Compute weighted inputs. We group inputs with the same channels.
per_channel_inps = dict({0: []})
for i, inp in enumerate(inputs):
if inp.shape[h_channel_loc] != sm_size[0] or inp.shape[h_channel_loc + 1] != sm_size[1]: # pylint: disable=line-too-long
assert sm_size[0] != 0
ratio = (inp.shape[h_channel_loc] + 1) // sm_size[0]
if use_5d_mode:
inp = tf.keras.layers.MaxPool3D([1, ratio, ratio], [1, ratio, ratio],
padding='same')(
inp)
else:
inp = tf.keras.layers.MaxPool2D([ratio, ratio], ratio,
padding='same')(
inp)
weights = tf.cast(weights, inp.dtype)
if inp.shape[-1] in per_channel_inps:
per_channel_inps[inp.shape[-1]].append(weights[i] * inp)
else:
per_channel_inps.update({inp.shape[-1]: [weights[i] * inp]})
return per_channel_inps
def multi_connection_fusion(inputs: List[tf.Tensor],
index: Optional[int] = None,
use_5d_mode: bool = False,
model_edge_weights: Optional[List[Any]] = None):
"""Do weighted summation of multiple different sized tensors.
A weight is assigned for each connection (i.e., each input tensor), and their
summation weights are learned. Uses spatial max pooling and 1x1 conv.
to match their sizes.
Args:
inputs: A `Tensor`. Either 4D or 5D, depending of use_5d_mode.
index: `int` index of the block within the AssembleNet architecture. Used
for summation weight initial loading.
use_5d_mode: `bool` indicating whether the inputs are in 5D tensor or 4D.
model_edge_weights: AssembleNet model structure connection weights in the
string format.
Returns:
The output `Tensor` after concatenation.
"""
if use_5d_mode:
h_channel_loc = 2
conv_function = conv3d_same_padding
else:
h_channel_loc = 1
conv_function = conv2d_fixed_padding
# If only 1 input.
if len(inputs) == 1:
return inputs[0]
# get smallest spatial size and largest channels
sm_size = [10000, 10000]
lg_channel = 0
for inp in inputs:
# assume batch X height x width x channels
sm_size[0] = min(sm_size[0], inp.shape[h_channel_loc])
sm_size[1] = min(sm_size[1], inp.shape[h_channel_loc + 1])
lg_channel = max(lg_channel, inp.shape[-1])
per_channel_inps = _ApplyEdgeWeight(
weights_shape=[len(inputs)],
index=index,
use_5d_mode=use_5d_mode,
model_edge_weights=model_edge_weights)(
inputs)
# Adding 1x1 conv layers (to match channel size) and fusing all inputs.
# We add inputs with the same channels first before applying 1x1 conv to save
# memory.
inps = []
for key, channel_inps in per_channel_inps.items():
if len(channel_inps) < 1:
continue
if len(channel_inps) == 1:
if key == lg_channel:
inp = channel_inps[0]
else:
inp = conv_function(
channel_inps[0], lg_channel, kernel_size=1, strides=1)
inps.append(inp)
else:
if key == lg_channel:
inp = tf.add_n(channel_inps)
else:
inp = conv_function(
tf.add_n(channel_inps), lg_channel, kernel_size=1, strides=1)
inps.append(inp)
return tf.add_n(inps)
def rgb_conv_stem(inputs,
num_frames,
filters,
temporal_dilation,
bn_decay: float = rf.BATCH_NORM_DECAY,
bn_epsilon: float = rf.BATCH_NORM_EPSILON,
use_sync_bn: bool = False):
"""Layers for a RGB stem.
Args:
inputs: A `Tensor` of size `[batch*time, height, width, channels]`.
num_frames: `int` number of frames in the input tensor.
filters: `int` number of filters in the convolution.
temporal_dilation: `int` temporal dilatioin size for the 1D conv.
bn_decay: `float` batch norm decay parameter to use.
bn_epsilon: `float` batch norm epsilon parameter to use.
use_sync_bn: use synchronized batch norm for TPU.
Returns:
The output `Tensor`.
"""
data_format = tf.keras.backend.image_data_format()
assert data_format == 'channels_last'
if temporal_dilation < 1:
temporal_dilation = 1
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=7, strides=2)
inputs = tf.identity(inputs, 'initial_conv')
inputs = rf.build_batch_norm(
bn_decay=bn_decay, bn_epsilon=bn_epsilon, use_sync_bn=use_sync_bn)(
inputs)
inputs = tf.nn.relu(inputs)
inputs = reshape_temporal_conv1d_bn(
inputs=inputs,
filters=filters,
kernel_size=5,
num_frames=num_frames,
temporal_dilation=temporal_dilation,
bn_decay=bn_decay,
bn_epsilon=bn_epsilon,
use_sync_bn=use_sync_bn)
inputs = tf.keras.layers.MaxPool2D(
pool_size=3, strides=2, padding='SAME')(
inputs=inputs)
inputs = tf.identity(inputs, 'initial_max_pool')
return inputs
def flow_conv_stem(inputs,
filters,
temporal_dilation,
bn_decay: float = rf.BATCH_NORM_DECAY,
bn_epsilon: float = rf.BATCH_NORM_EPSILON,
use_sync_bn: bool = False):
"""Layers for an optical flow stem.
Args:
inputs: A `Tensor` of size `[batch*time, height, width, channels]`.
filters: `int` number of filters in the convolution.
temporal_dilation: `int` temporal dilatioin size for the 1D conv.
bn_decay: `float` batch norm decay parameter to use.
bn_epsilon: `float` batch norm epsilon parameter to use.
use_sync_bn: use synchronized batch norm for TPU.
Returns:
The output `Tensor`.
"""
if temporal_dilation < 1:
temporal_dilation = 1
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=7, strides=2)
inputs = tf.identity(inputs, 'initial_conv')
inputs = rf.build_batch_norm(
bn_decay=bn_decay, bn_epsilon=bn_epsilon, use_sync_bn=use_sync_bn)(
inputs)
inputs = tf.nn.relu(inputs)
inputs = tf.keras.layers.MaxPool2D(
pool_size=2, strides=2, padding='SAME')(
inputs=inputs)
inputs = tf.identity(inputs, 'initial_max_pool')
return inputs
def multi_stream_heads(streams,
final_nodes,
num_frames,
num_classes,
max_pool_predictions: bool = False):
"""Layers for the classification heads.
Args:
streams: A list of 4D `Tensors` following the data_format.
final_nodes: A list of `int` where classification heads will be added.
num_frames: `int` number of frames in the input tensor.
num_classes: `int` number of possible classes for video classification.
max_pool_predictions: Use max-pooling on predictions instead of mean
pooling on features. It helps if you have more than 32 frames.
Returns:
The output `Tensor`.
"""
inputs = streams[final_nodes[0]]
num_channels = inputs.shape[-1]
def _pool_and_reshape(net):
# The activation is 7x7 so this is a global average pool.
net = tf.keras.layers.GlobalAveragePooling2D()(inputs=net)
net = tf.identity(net, 'final_avg_pool0')
net = tf.reshape(net, [-1, num_frames, num_channels])
if not max_pool_predictions:
net = tf.reduce_mean(net, 1)
return net
outputs = _pool_and_reshape(inputs)
for i in range(1, len(final_nodes)):
inputs = streams[final_nodes[i]]
inputs = _pool_and_reshape(inputs)
outputs = outputs + inputs
if len(final_nodes) > 1:
outputs = outputs / len(final_nodes)
outputs = tf.keras.layers.Dense(
units=num_classes,
kernel_initializer=tf.random_normal_initializer(stddev=.01))(
inputs=outputs)
outputs = tf.identity(outputs, 'final_dense0')
if max_pool_predictions:
pre_logits = outputs / np.sqrt(num_frames)
acts = tf.nn.softmax(pre_logits, axis=1)
outputs = tf.math.multiply(outputs, acts)
outputs = tf.reduce_sum(outputs, 1)
return outputs
class AssembleNet(tf.keras.Model):
"""AssembleNet backbone."""
def __init__(
self,
block_fn,
num_blocks: List[int],
num_frames: int,
model_structure: List[Any],
input_specs: layers.InputSpec = layers.InputSpec(
shape=[None, None, None, None, 3]),
model_edge_weights: Optional[List[Any]] = None,
bn_decay: float = rf.BATCH_NORM_DECAY,
bn_epsilon: float = rf.BATCH_NORM_EPSILON,
use_sync_bn: bool = False,
combine_method: str = 'sigmoid',
**kwargs):
"""Generator for AssembleNet v1 models.
Args:
block_fn: `function` for the block to use within the model. Currently only
has `bottleneck_block_interleave as its option`.
num_blocks: list of 4 `int`s denoting the number of blocks to include in
each of the 4 block groups. Each group consists of blocks that take
inputs of the same resolution.
num_frames: the number of frames in the input tensor.
model_structure: AssembleNet model structure in the string format.
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
Dimension should be `[batch*time, height, width, channels]`.
model_edge_weights: AssembleNet model structure connection weights in the
string format.
bn_decay: `float` batch norm decay parameter to use.
bn_epsilon: `float` batch norm epsilon parameter to use.
use_sync_bn: use synchronized batch norm for TPU.
combine_method: 'str' for the weighted summation to fuse different blocks.
**kwargs: pass through arguments.
"""
inputs = tf.keras.Input(shape=input_specs.shape[1:])
data_format = tf.keras.backend.image_data_format()
# Creation of the model graph.
logging.info('model_structure=%r', model_structure)
logging.info('model_structure=%r', model_structure)
logging.info('model_edge_weights=%r', model_edge_weights)
structure = model_structure
original_num_frames = num_frames
assert num_frames > 0, f'Invalid num_frames {num_frames}'
grouping = {-3: [], -2: [], -1: [], 0: [], 1: [], 2: [], 3: []}
for i in range(len(structure)):
grouping[structure[i][0]].append(i)
stem_count = len(grouping[-3]) + len(grouping[-2]) + len(grouping[-1])
assert stem_count != 0
stem_filters = 128 // stem_count
original_inputs = inputs
if len(input_specs.shape) == 5:
first_dim = (
input_specs.shape[0] * input_specs.shape[1]
if input_specs.shape[0] and input_specs.shape[1] else -1)
reshape_inputs = tf.reshape(inputs, (first_dim,) + input_specs.shape[2:])
elif len(input_specs.shape) == 4:
reshape_inputs = original_inputs
else:
raise ValueError(
f'Expect input spec to be 4 or 5 dimensions {input_specs.shape}')
if grouping[-2]:
# Instead of loading optical flows as inputs from data pipeline, we are
# applying the "Representation Flow" to RGB frames so that we can compute
# the flow within TPU/GPU on fly. It's essentially optical flow since we
# do it with RGBs.
axis = 3 if data_format == 'channels_last' else 1
flow_inputs = rf.RepresentationFlow(
original_num_frames,
depth=reshape_inputs.shape.as_list()[axis],
num_iter=40,
bottleneck=1)(
reshape_inputs)
streams = []
for i in range(len(structure)):
with tf.name_scope('Node_' + str(i)):
if structure[i][0] == -1:
inputs = rgb_conv_stem(
reshape_inputs,
original_num_frames,
stem_filters,
temporal_dilation=structure[i][1],
bn_decay=bn_decay,
bn_epsilon=bn_epsilon,
use_sync_bn=use_sync_bn)
streams.append(inputs)
elif structure[i][0] == -2:
inputs = flow_conv_stem(
flow_inputs,
stem_filters,
temporal_dilation=structure[i][1],
bn_decay=bn_decay,
bn_epsilon=bn_epsilon,
use_sync_bn=use_sync_bn)
streams.append(inputs)
else:
num_frames = original_num_frames
block_number = structure[i][0]
combined_inputs = []
if combine_method == 'concat':
combined_inputs = [
streams[structure[i][1][j]]
for j in range(0, len(structure[i][1]))
]
combined_inputs = spatial_resize_and_concat(combined_inputs)
else:
combined_inputs = [
streams[structure[i][1][j]]
for j in range(0, len(structure[i][1]))
]
combined_inputs = multi_connection_fusion(
combined_inputs, index=i, model_edge_weights=model_edge_weights)
graph = block_group(
inputs=combined_inputs,
filters=structure[i][2],
block_fn=block_fn,
blocks=num_blocks[block_number],
strides=structure[i][4],
name='block_group' + str(i),
block_level=structure[i][0],
num_frames=num_frames,
temporal_dilation=structure[i][3])
streams.append(graph)
super(AssembleNet, self).__init__(
inputs=original_inputs, outputs=streams, **kwargs)
class AssembleNetModel(tf.keras.Model):
"""An AssembleNet model builder."""
def __init__(self,
backbone,
num_classes,
num_frames: int,
model_structure: List[Any],
input_specs: Optional[Mapping[str,
tf.keras.layers.InputSpec]] = None,
max_pool_predictions: bool = False,
**kwargs):
if not input_specs:
input_specs = {
'image': layers.InputSpec(shape=[None, None, None, None, 3])
}
self._self_setattr_tracking = False
self._config_dict = {
'backbone': backbone,
'num_classes': num_classes,
'num_frames': num_frames,
'input_specs': input_specs,
'model_structure': model_structure,
}
self._input_specs = input_specs
self._backbone = backbone
grouping = {-3: [], -2: [], -1: [], 0: [], 1: [], 2: [], 3: []}
for i in range(len(model_structure)):
grouping[model_structure[i][0]].append(i)
inputs = {
k: tf.keras.Input(shape=v.shape[1:]) for k, v in input_specs.items()
}
streams = self._backbone(inputs['image'])
outputs = multi_stream_heads(
streams,
grouping[3],
num_frames,
num_classes,
max_pool_predictions=max_pool_predictions)
super(AssembleNetModel, self).__init__(
inputs=inputs, outputs=outputs, **kwargs)
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
return dict(backbone=self.backbone)
@property
def backbone(self):
return self._backbone
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
ASSEMBLENET_SPECS = {
26: {
'block': bottleneck_block_interleave,
'num_blocks': [2, 2, 2, 2]
},
38: {
'block': bottleneck_block_interleave,
'num_blocks': [2, 4, 4, 2]
},
50: {
'block': bottleneck_block_interleave,
'num_blocks': [3, 4, 6, 3]
},
68: {
'block': bottleneck_block_interleave,
'num_blocks': [3, 4, 12, 3]
},
77: {
'block': bottleneck_block_interleave,
'num_blocks': [3, 4, 15, 3]
},
101: {
'block': bottleneck_block_interleave,
'num_blocks': [3, 4, 23, 3]
},
}
def assemblenet_v1(assemblenet_depth: int,
num_classes: int,
num_frames: int,
model_structure: List[Any],
input_specs: layers.InputSpec = layers.InputSpec(
shape=[None, None, None, None, 3]),
model_edge_weights: Optional[List[Any]] = None,
max_pool_predictions: bool = False,
combine_method: str = 'sigmoid',
**kwargs):
"""Returns the AssembleNet model for a given size and number of output classes."""
data_format = tf.keras.backend.image_data_format()
assert data_format == 'channels_last'
if assemblenet_depth not in ASSEMBLENET_SPECS:
raise ValueError('Not a valid assemblenet_depth:', assemblenet_depth)
input_specs_dict = {'image': input_specs}
params = ASSEMBLENET_SPECS[assemblenet_depth]
backbone = AssembleNet(
block_fn=params['block'],
num_blocks=params['num_blocks'],
num_frames=num_frames,
model_structure=model_structure,
input_specs=input_specs,
model_edge_weights=model_edge_weights,
combine_method=combine_method,
**kwargs)
return AssembleNetModel(
backbone,
num_classes=num_classes,
num_frames=num_frames,
model_structure=model_structure,
input_specs=input_specs_dict,
max_pool_predictions=max_pool_predictions,
**kwargs)
@backbone_factory.register_backbone_builder('assemblenet')
def build_assemblenet_v1(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None
) -> tf.keras.Model:
"""Builds assemblenet backbone."""
del l2_regularizer
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert 'assemblenet' in backbone_type
assemblenet_depth = int(backbone_cfg.model_id)
if assemblenet_depth not in ASSEMBLENET_SPECS:
raise ValueError('Not a valid assemblenet_depth:', assemblenet_depth)
model_structure, model_edge_weights = cfg.blocks_to_flat_lists(
backbone_cfg.blocks)
params = ASSEMBLENET_SPECS[assemblenet_depth]
block_fn = functools.partial(
params['block'],
use_sync_bn=norm_activation_config.use_sync_bn,
bn_decay=norm_activation_config.norm_momentum,
bn_epsilon=norm_activation_config.norm_epsilon)
backbone = AssembleNet(
block_fn=block_fn,
num_blocks=params['num_blocks'],
num_frames=backbone_cfg.num_frames,
model_structure=model_structure,
input_specs=input_specs,
model_edge_weights=model_edge_weights,
combine_method=backbone_cfg.combine_method,
use_sync_bn=norm_activation_config.use_sync_bn,
bn_decay=norm_activation_config.norm_momentum,
bn_epsilon=norm_activation_config.norm_epsilon)
logging.info('Number of parameters in AssembleNet backbone: %f M.',
backbone.count_params() / 10.**6)
return backbone
@model_factory.register_model_builder('assemblenet')
def build_assemblenet_model(
input_specs: tf.keras.layers.InputSpec,
model_config: cfg.AssembleNetModel,
num_classes: int,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None):
"""Builds assemblenet model."""
input_specs_dict = {'image': input_specs}
backbone = build_assemblenet_v1(input_specs, model_config.backbone,
model_config.norm_activation, l2_regularizer)
backbone_cfg = model_config.backbone.get()
model_structure, _ = cfg.blocks_to_flat_lists(backbone_cfg.blocks)
model = AssembleNetModel(
backbone,
num_classes=num_classes,
num_frames=backbone_cfg.num_frames,
model_structure=model_structure,
input_specs=input_specs_dict,
max_pool_predictions=model_config.max_pool_predictions)
return model
| 38,298 | 34.593866 | 127 | py |
models | models-master/official/projects/text_classification_example/classification_example.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classifcation Task Showcase."""
import dataclasses
from typing import List, Mapping, Text
from seqeval import metrics as seqeval_metrics
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.nlp.modeling import models
from official.nlp.tasks import utils
from official.projects.text_classification_example import classification_data_loader
@dataclasses.dataclass
class ModelConfig(base_config.Config):
"""A base span labeler configuration."""
encoder: encoders.EncoderConfig = dataclasses.field(
default_factory=encoders.EncoderConfig
)
head_dropout: float = 0.1
head_initializer_range: float = 0.02
@dataclasses.dataclass
class ClassificationExampleConfig(cfg.TaskConfig):
"""The model config."""
# At most one of `init_checkpoint` and `hub_module_url` can be specified.
init_checkpoint: str = ''
hub_module_url: str = ''
model: ModelConfig = ModelConfig()
num_classes = 2
class_names = ['A', 'B']
train_data: cfg.DataConfig = dataclasses.field(
default_factory=classification_data_loader.ClassificationExampleDataConfig
)
validation_data: cfg.DataConfig = dataclasses.field(
default_factory=classification_data_loader.ClassificationExampleDataConfig
)
class ClassificationExampleTask(base_task.Task):
"""Task object for classification."""
def build_model(self) -> tf.keras.Model:
if self.task_config.hub_module_url and self.task_config.init_checkpoint:
raise ValueError('At most one of `hub_module_url` and '
'`init_checkpoint` can be specified.')
if self.task_config.hub_module_url:
encoder_network = utils.get_encoder_from_hub(
self.task_config.hub_module_url)
else:
encoder_network = encoders.build_encoder(self.task_config.model.encoder)
return models.BertClassifier(
network=encoder_network,
num_classes=len(self.task_config.class_names),
initializer=tf.keras.initializers.TruncatedNormal(
stddev=self.task_config.model.head_initializer_range),
dropout_rate=self.task_config.model.head_dropout)
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, tf.cast(model_outputs, tf.float32), from_logits=True)
return tf_utils.safe_mean(loss)
def build_inputs(self,
params: cfg.DataConfig,
input_context=None) -> tf.data.Dataset:
"""Returns tf.data.Dataset for sentence_prediction task."""
loader = classification_data_loader.ClassificationDataLoader(params)
return loader.load(input_context)
def inference_step(self, inputs,
model: tf.keras.Model) -> Mapping[str, tf.Tensor]:
"""Performs the forward step."""
logits = model(inputs, training=False)
return {
'logits': logits,
'predict_ids': tf.argmax(logits, axis=-1, output_type=tf.int32)
}
def validation_step(self,
inputs,
model: tf.keras.Model,
metrics=None) -> Mapping[str, tf.Tensor]:
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
outputs = self.inference_step(features, model)
loss = self.build_losses(labels=labels, model_outputs=outputs['logits'])
# Negative label ids are padding labels which should be ignored.
real_label_index = tf.where(tf.greater_equal(labels, 0))
predict_ids = tf.gather_nd(outputs['predict_ids'], real_label_index)
label_ids = tf.gather_nd(labels, real_label_index)
return {
self.loss: loss,
'predict_ids': predict_ids,
'label_ids': label_ids,
}
def aggregate_logs(self,
state=None,
step_outputs=None) -> Mapping[Text, List[List[Text]]]:
"""Aggregates over logs returned from a validation step."""
if state is None:
state = {'predict_class': [], 'label_class': []}
def id_to_class_name(batched_ids):
class_names = []
for per_example_ids in batched_ids:
class_names.append([])
for per_token_id in per_example_ids.numpy().tolist():
class_names[-1].append(self.task_config.class_names[per_token_id])
return class_names
# Convert id to class names, because `seqeval_metrics` relies on the class
# name to decide IOB tags.
state['predict_class'].extend(id_to_class_name(step_outputs['predict_ids']))
state['label_class'].extend(id_to_class_name(step_outputs['label_ids']))
return state
def reduce_aggregated_logs(self,
aggregated_logs,
global_step=None) -> Mapping[Text, float]:
"""Reduces aggregated logs over validation steps."""
label_class = aggregated_logs['label_class']
predict_class = aggregated_logs['predict_class']
return {
'f1':
seqeval_metrics.f1_score(label_class, predict_class),
'precision':
seqeval_metrics.precision_score(label_class, predict_class),
'recall':
seqeval_metrics.recall_score(label_class, predict_class),
'accuracy':
seqeval_metrics.accuracy_score(label_class, predict_class),
}
@exp_factory.register_config_factory('example_bert_classification_example')
def bert_classification_example() -> cfg.ExperimentConfig:
"""Return a minimum experiment config for Bert token classification."""
return cfg.ExperimentConfig(
task=ClassificationExampleConfig(),
trainer=cfg.TrainerConfig(
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
},
'learning_rate': {
'type': 'polynomial',
},
'warmup': {
'type': 'polynomial'
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
| 7,073 | 35.84375 | 84 | py |
models | models-master/official/projects/text_classification_example/classification_data_loader.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loads dataset for classification tasks."""
from typing import Dict, Mapping, Optional, Tuple
import dataclasses
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import input_reader
from official.nlp.data import data_loader
@dataclasses.dataclass
class ClassificationExampleDataConfig(cfg.DataConfig):
"""Data config for token classification task."""
seq_length: int = 128
class ClassificationDataLoader(data_loader.DataLoader):
"""A class to load dataset for sentence prediction (classification) task."""
def __init__(self, params):
self._params = params
self._seq_length = params.seq_length
def _decode(self, record: tf.Tensor) -> Dict[str, tf.Tensor]:
"""Decodes a serialized tf.Example."""
name_to_features = {
'input_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64),
'input_mask': tf.io.FixedLenFeature([self._seq_length], tf.int64),
'segment_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64),
'label_ids': tf.io.FixedLenFeature([], tf.int64),
}
example = tf.io.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in example:
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example
def _parse(
self,
record: Mapping[str,
tf.Tensor]) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]:
"""Parses raw tensors into a dict of tensors to be consumed by the model."""
x = {
'input_word_ids': record['input_ids'],
'input_mask': record['input_mask'],
'input_type_ids': record['segment_ids']
}
y = record['label_ids']
return (x, y)
def load(
self,
input_context: Optional[tf.distribute.InputContext] = None
) -> tf.data.Dataset:
"""Returns a tf.dataset.Dataset."""
reader = input_reader.InputReader(
params=self._params, decoder_fn=self._decode, parser_fn=self._parse)
return reader.read(input_context)
| 2,753 | 32.585366 | 80 | py |
models | models-master/official/projects/text_classification_example/classification_example_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for nlp.projects.example.classification_example."""
import tensorflow as tf
from official.core import config_definitions as cfg
from official.nlp.configs import encoders
from official.projects.text_classification_example import classification_data_loader
from official.projects.text_classification_example import classification_example
class ClassificationExampleTest(tf.test.TestCase):
def get_model_config(self):
return classification_example.ModelConfig(
encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=2)))
def get_dummy_dataset(self, params: cfg.DataConfig):
def dummy_data(_):
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
x = dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids)
y = tf.zeros((1, 1), dtype=tf.int32)
return (x, y)
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
def test_task_with_dummy_data(self):
train_data_config = (
classification_data_loader.ClassificationExampleDataConfig(
input_path='dummy', seq_length=128, global_batch_size=1))
task_config = classification_example.ClassificationExampleConfig(
model=self.get_model_config(),)
task = classification_example.ClassificationExampleTask(task_config)
task.build_inputs = self.get_dummy_dataset
model = task.build_model()
metrics = task.build_metrics()
dataset = task.build_inputs(train_data_config)
iterator = iter(dataset)
optimizer = tf.keras.optimizers.SGD(lr=0.1)
task.initialize(model)
task.train_step(next(iterator), model, optimizer, metrics=metrics)
if __name__ == '__main__':
tf.test.main()
| 2,496 | 34.671429 | 84 | py |
models | models-master/official/projects/text_classification_example/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A customized training library for the specific task."""
from absl import app
from absl import flags
import gin
from official.common import distribute_utils
from official.common import flags as tfm_flags
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
from official.projects.text_classification_example import classification_example
FLAGS = flags.FLAGS
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu,
**params.runtime.model_parallelism())
with distribution_strategy.scope():
task = classification_example.ClassificationExampleTask(params.task)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir)
train_utils.save_gin_config(FLAGS.mode, model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(main)
| 2,519 | 35.521739 | 80 | py |
models | models-master/official/projects/labse/export_tfhub.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Exports the LaBSE model and its preprocessing as SavedModels for TF Hub.
Example usage:
# Point this variable to your training results.
# Note that flag --do_lower_case is inferred from the name.
LaBSE_DIR=<Your LaBSE model dir>
# Step 1: export the core LaBSE model.
python3 ./export_tfhub.py \
--bert_config_file ${LaBSE_DIR:?}/bert_config.json \
--model_checkpoint_path ${LaBSE_DIR:?}/labse_model.ckpt \
--vocab_file ${LaBSE_DIR:?}/vocab.txt \
--export_type model --export_path /tmp/labse_model
# Step 2: export matching preprocessing (be sure to use same flags).
python3 ./export_tfhub.py \
--vocab_file ${LaBSE_DIR:?}/vocab.txt \
--export_type preprocessing --export_path /tmp/labse_preprocessing
"""
from typing import Text
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from official.legacy.bert import bert_models
from official.legacy.bert import configs
from official.nlp.modeling import models
from official.nlp.tasks import utils
from official.nlp.tools import export_tfhub_lib
FLAGS = flags.FLAGS
flags.DEFINE_enum("export_type", "model", ["model", "preprocessing"],
"The type of model to export")
flags.DEFINE_string("export_path", None, "TF-Hub SavedModel destination path.")
flags.DEFINE_string(
"bert_tfhub_module", None,
"Bert tfhub module to define core bert layers. Needed for --export_type "
"model.")
flags.DEFINE_string(
"bert_config_file", None,
"Bert configuration file to define core bert layers. It will not be used "
"if bert_tfhub_module is set. Needed for --export_type model.")
flags.DEFINE_string(
"model_checkpoint_path", None, "File path to TF model checkpoint. "
"Needed for --export_type model.")
flags.DEFINE_string(
"vocab_file", None,
"The vocabulary file that the BERT model was trained on. "
"Needed for both --export_type model and preprocessing.")
flags.DEFINE_bool(
"do_lower_case", None,
"Whether to lowercase before tokenization. If left as None, "
"do_lower_case will be enabled if 'uncased' appears in the "
"name of --vocab_file. "
"Needed for both --export_type model and preprocessing.")
flags.DEFINE_integer(
"default_seq_length", 128,
"The sequence length of preprocessing results from "
"top-level preprocess method. This is also the default "
"sequence length for the bert_pack_inputs subobject."
"Needed for --export_type preprocessing.")
flags.DEFINE_bool(
"tokenize_with_offsets", False, # TODO(b/181866850)
"Whether to export a .tokenize_with_offsets subobject for "
"--export_type preprocessing.")
flags.DEFINE_bool(
"normalize", True,
"Parameter of DualEncoder model, normalize the embedding (pooled_output) "
"if set to True.")
def _get_do_lower_case(do_lower_case, vocab_file):
"""Returns do_lower_case, replacing None by a guess from vocab file name."""
if do_lower_case is None:
do_lower_case = "uncased" in vocab_file
logging.info("Using do_lower_case=%s based on name of vocab_file=%s",
do_lower_case, vocab_file)
return do_lower_case
def create_labse_model(bert_tfhub_module: Text,
bert_config: configs.BertConfig,
normalize: bool) -> tf.keras.Model:
"""Creates a LaBSE keras core model from BERT configuration.
Args:
bert_tfhub_module: The bert tfhub module path. The LaBSE will be built upon
the tfhub module if it is not empty.
bert_config: A `BertConfig` to create the core model. Used if
bert_tfhub_module is empty.
normalize: Parameter of DualEncoder model, normalize the embedding (
pooled_output) if set to True.
Returns:
A keras model.
"""
if bert_tfhub_module:
encoder_network = utils.get_encoder_from_hub(bert_tfhub_module)
else:
encoder_network = bert_models.get_transformer_encoder(
bert_config, sequence_length=None)
labse_model = models.DualEncoder(
network=encoder_network,
max_seq_length=None,
normalize=normalize,
output="predictions")
return labse_model, encoder_network # pytype: disable=bad-return-type # typed-keras
def export_labse_model(bert_tfhub_module: Text, bert_config: configs.BertConfig,
model_checkpoint_path: Text, hub_destination: Text,
vocab_file: Text, do_lower_case: bool, normalize: bool):
"""Restores a tf.keras.Model and saves for TF-Hub."""
core_model, encoder = create_labse_model(
bert_tfhub_module, bert_config, normalize)
checkpoint = tf.train.Checkpoint(encoder=encoder)
checkpoint.restore(model_checkpoint_path).assert_existing_objects_matched()
core_model.vocab_file = tf.saved_model.Asset(vocab_file)
core_model.do_lower_case = tf.Variable(do_lower_case, trainable=False)
core_model.save(hub_destination, include_optimizer=False, save_format="tf")
def main(_):
do_lower_case = export_tfhub_lib.get_do_lower_case(FLAGS.do_lower_case,
FLAGS.vocab_file)
if FLAGS.export_type == "model":
if FLAGS.bert_tfhub_module:
bert_config = None
else:
bert_config = configs.BertConfig.from_json_file(FLAGS.bert_config_file)
export_labse_model(FLAGS.bert_tfhub_module, bert_config,
FLAGS.model_checkpoint_path, FLAGS.export_path,
FLAGS.vocab_file, do_lower_case, FLAGS.normalize)
elif FLAGS.export_type == "preprocessing":
# LaBSE is still a BERT model, reuse the export_bert_preprocessing here.
export_tfhub_lib.export_bert_preprocessing(
FLAGS.export_path, FLAGS.vocab_file, do_lower_case,
FLAGS.default_seq_length, FLAGS.tokenize_with_offsets)
else:
raise app.UsageError("Unknown value '%s' for flag --export_type")
if __name__ == "__main__":
app.run(main)
| 6,473 | 38.962963 | 87 | py |
models | models-master/official/projects/labse/export_tfhub_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests LaBSE's export_tfhub."""
import os
# Import libraries
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from official.legacy.bert import configs
from official.projects.labse import export_tfhub
class ExportModelTest(tf.test.TestCase):
def test_export_model(self):
# Exports a savedmodel for TF-Hub
hidden_size = 16
bert_config = configs.BertConfig(
vocab_size=100,
hidden_size=hidden_size,
intermediate_size=32,
max_position_embeddings=128,
num_attention_heads=2,
num_hidden_layers=1)
labse_model, encoder = export_tfhub.create_labse_model(
None, bert_config, normalize=True)
model_checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoint")
checkpoint = tf.train.Checkpoint(encoder=encoder)
checkpoint.save(os.path.join(model_checkpoint_dir, "test"))
model_checkpoint_path = tf.train.latest_checkpoint(model_checkpoint_dir)
vocab_file = os.path.join(self.get_temp_dir(), "uncased_vocab.txt")
with tf.io.gfile.GFile(vocab_file, "w") as f:
f.write("dummy content")
hub_destination = os.path.join(self.get_temp_dir(), "hub")
export_tfhub.export_labse_model(
None, # bert_tfhub_module
bert_config,
model_checkpoint_path,
hub_destination,
vocab_file,
do_lower_case=True,
normalize=True)
# Restores a hub KerasLayer.
hub_layer = hub.KerasLayer(hub_destination, trainable=True)
if hasattr(hub_layer, "resolved_object"):
# Checks meta attributes.
self.assertTrue(hub_layer.resolved_object.do_lower_case.numpy())
with tf.io.gfile.GFile(
hub_layer.resolved_object.vocab_file.asset_path.numpy()) as f:
self.assertEqual("dummy content", f.read())
# Checks the hub KerasLayer.
for source_weight, hub_weight in zip(labse_model.trainable_weights,
hub_layer.trainable_weights):
self.assertAllClose(source_weight.numpy(), hub_weight.numpy())
seq_length = 10
dummy_ids = np.zeros((2, seq_length), dtype=np.int32)
hub_outputs = hub_layer([dummy_ids, dummy_ids, dummy_ids])
source_outputs = labse_model([dummy_ids, dummy_ids, dummy_ids])
self.assertEqual(hub_outputs["pooled_output"].shape, (2, hidden_size))
self.assertEqual(hub_outputs["sequence_output"].shape,
(2, seq_length, hidden_size))
for output_name in source_outputs:
self.assertAllClose(hub_outputs[output_name].numpy(),
hub_outputs[output_name].numpy())
# Test that training=True makes a difference (activates dropout).
def _dropout_mean_stddev(training, num_runs=20):
input_ids = np.array([[14, 12, 42, 95, 99]], np.int32)
inputs = [input_ids, np.ones_like(input_ids), np.zeros_like(input_ids)]
outputs = np.concatenate([
hub_layer(inputs, training=training)["pooled_output"]
for _ in range(num_runs)
])
return np.mean(np.std(outputs, axis=0))
self.assertLess(_dropout_mean_stddev(training=False), 1e-6)
self.assertGreater(_dropout_mean_stddev(training=True), 1e-3)
# Test propagation of seq_length in shape inference.
input_word_ids = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)
input_mask = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)
input_type_ids = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)
outputs = hub_layer([input_word_ids, input_mask, input_type_ids])
self.assertEqual(outputs["pooled_output"].shape.as_list(),
[None, hidden_size])
self.assertEqual(outputs["sequence_output"].shape.as_list(),
[None, seq_length, hidden_size])
if __name__ == "__main__":
tf.test.main()
| 4,422 | 38.491071 | 79 | py |
models | models-master/official/projects/labse/config_labse.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=g-doc-return-or-yield,line-too-long
"""LaBSE configurations."""
import dataclasses
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.nlp.data import dual_encoder_dataloader
from official.nlp.tasks import dual_encoder
AdamWeightDecay = optimization.AdamWeightDecayConfig
PolynomialLr = optimization.PolynomialLrConfig
PolynomialWarmupConfig = optimization.PolynomialWarmupConfig
@dataclasses.dataclass
class LaBSEOptimizationConfig(optimization.OptimizationConfig):
"""Bert optimization config."""
optimizer: optimization.OptimizerConfig = dataclasses.field(
default_factory=lambda: optimization.OptimizerConfig( # pylint: disable=g-long-lambda
type="adamw", adamw=AdamWeightDecay()
)
)
learning_rate: optimization.LrConfig = dataclasses.field(
default_factory=lambda: optimization.LrConfig( # pylint: disable=g-long-lambda
type="polynomial",
polynomial=PolynomialLr(
initial_learning_rate=1e-4,
decay_steps=1000000,
end_learning_rate=0.0,
),
)
)
warmup: optimization.WarmupConfig = dataclasses.field(
default_factory=lambda: optimization.WarmupConfig( # pylint: disable=g-long-lambda
type="polynomial",
polynomial=PolynomialWarmupConfig(warmup_steps=10000),
)
)
@exp_factory.register_config_factory("labse/train")
def labse_train() -> cfg.ExperimentConfig:
r"""Language-agnostic bert sentence embedding.
*Note*: this experiment does not use cross-accelerator global softmax so it
does not reproduce the exact LABSE training.
"""
config = cfg.ExperimentConfig(
task=dual_encoder.DualEncoderConfig(
train_data=dual_encoder_dataloader.DualEncoderDataConfig(),
validation_data=dual_encoder_dataloader.DualEncoderDataConfig(
is_training=False, drop_remainder=False)),
trainer=cfg.TrainerConfig(
optimizer_config=LaBSEOptimizationConfig(
learning_rate=optimization.LrConfig(
type="polynomial",
polynomial=PolynomialLr(
initial_learning_rate=3e-5, end_learning_rate=0.0)),
warmup=optimization.WarmupConfig(
type="polynomial", polynomial=PolynomialWarmupConfig()))),
restrictions=[
"task.train_data.is_training != None",
"task.validation_data.is_training != None"
])
return config
| 3,162 | 38.5375 | 92 | py |
models | models-master/official/projects/labse/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Model Garden Labse training driver, register labse configs."""
# pylint: disable=unused-import
from absl import app
from official.common import flags as tfm_flags
from official.nlp import tasks
from official.nlp import train
from official.projects.labse import config_labse
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(train.main)
| 976 | 33.892857 | 76 | py |
models | models-master/official/projects/pruning/registry_imports.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All necessary imports for registration on pruning project."""
# pylint: disable=unused-import
from official.projects.pruning import configs
from official.projects.pruning.tasks import image_classification
| 817 | 42.052632 | 74 | py |
models | models-master/official/projects/pruning/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Model Garden Vision training driver, including Pruning configs.."""
from absl import app
from official.common import flags as tfm_flags
# To build up a connection with the training binary for pruning, the custom
# configs & tasks are imported while unused.
from official.projects.pruning import configs # pylint: disable=unused-import
from official.projects.pruning.tasks import image_classification # pylint: disable=unused-import
from official.vision import train
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(train.main)
| 1,171 | 38.066667 | 97 | py |
models | models-master/official/projects/pruning/configs/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configs package definition."""
from official.projects.pruning.configs import image_classification
| 711 | 38.555556 | 74 | py |
models | models-master/official/projects/pruning/configs/image_classification_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for image_classification."""
# pylint: disable=unused-import
from absl.testing import parameterized
import tensorflow as tf
from official import vision
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.pruning.configs import image_classification as pruning_exp_cfg
from official.vision.configs import image_classification as exp_cfg
class ImageClassificationConfigTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('resnet_imagenet_pruning',),
('mobilenet_imagenet_pruning'),
)
def test_image_classification_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, exp_cfg.ImageClassificationTask)
self.assertIsInstance(config.task, pruning_exp_cfg.ImageClassificationTask)
self.assertIsInstance(config.task.pruning, pruning_exp_cfg.PruningConfig)
self.assertIsInstance(config.task.model, exp_cfg.ImageClassificationModel)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.validate()
config.task.train_data.is_training = None
with self.assertRaises(KeyError):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 1,928 | 38.367347 | 85 | py |
models | models-master/official/projects/pruning/configs/image_classification.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image classification configuration definition."""
import dataclasses
from typing import Optional, Tuple
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.vision.configs import image_classification
@dataclasses.dataclass
class PruningConfig(hyperparams.Config):
"""Pruning parameters.
Attributes:
pretrained_original_checkpoint: The pretrained checkpoint location of the
original model.
pruning_schedule: A string that indicates the name of `PruningSchedule`
object that controls pruning rate throughout training. Current available
options are: `PolynomialDecay` and `ConstantSparsity`.
begin_step: Step at which to begin pruning.
end_step: Step at which to end pruning.
initial_sparsity: Sparsity ratio at which pruning begins.
final_sparsity: Sparsity ratio at which pruning ends.
frequency: Number of training steps between sparsity adjustment.
sparsity_m_by_n: Structured sparsity specification. It specifies m zeros
over n consecutive weight elements.
"""
pretrained_original_checkpoint: Optional[str] = None
pruning_schedule: str = 'PolynomialDecay'
begin_step: int = 0
end_step: int = 1000
initial_sparsity: float = 0.0
final_sparsity: float = 0.1
frequency: int = 100
sparsity_m_by_n: Optional[Tuple[int, int]] = None
@dataclasses.dataclass
class ImageClassificationTask(image_classification.ImageClassificationTask):
pruning: Optional[PruningConfig] = None
@exp_factory.register_config_factory('resnet_imagenet_pruning')
def image_classification_imagenet() -> cfg.ExperimentConfig:
"""Builds an image classification config for the resnet with pruning."""
config = image_classification.image_classification_imagenet()
task = ImageClassificationTask.from_args(
pruning=PruningConfig(), **config.task.as_dict())
config.task = task
runtime = cfg.RuntimeConfig(enable_xla=False)
config.runtime = runtime
return config
@exp_factory.register_config_factory('mobilenet_imagenet_pruning')
def image_classification_imagenet_mobilenet() -> cfg.ExperimentConfig:
"""Builds an image classification config for the mobilenetV2 with pruning."""
config = image_classification.image_classification_imagenet_mobilenet()
task = ImageClassificationTask.from_args(
pruning=PruningConfig(), **config.task.as_dict())
config.task = task
return config
| 3,071 | 36.925926 | 79 | py |
models | models-master/official/projects/pruning/tasks/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modeling package definition."""
from official.projects.pruning.tasks import image_classification
| 710 | 38.5 | 74 | py |
models | models-master/official/projects/pruning/tasks/image_classification_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for image classification task."""
# pylint: disable=unused-import
import os
import tempfile
from absl.testing import parameterized
import numpy as np
import orbit
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official import vision
from official.core import actions
from official.core import exp_factory
from official.modeling import optimization
from official.projects.pruning.tasks import image_classification as img_cls_task
from official.vision.dataloaders import tfexample_utils
class ImageClassificationTaskTest(tf.test.TestCase, parameterized.TestCase):
def _validate_model_pruned(self, model, config_name):
pruning_weight_names = []
prunable_layers = img_cls_task.collect_prunable_layers(model)
for layer in prunable_layers:
for weight, _, _ in layer.pruning_vars:
pruning_weight_names.append(weight.name)
if config_name == 'resnet_imagenet_pruning':
# Conv2D : 1
# BottleneckBlockGroup : 4+3+3 = 10
# BottleneckBlockGroup1 : 4+3+3+3 = 13
# BottleneckBlockGroup2 : 4+3+3+3+3+3 = 19
# BottleneckBlockGroup3 : 4+3+3 = 10
# FullyConnected : 1
# Total : 54
self.assertLen(pruning_weight_names, 54)
elif config_name == 'mobilenet_imagenet_pruning':
# Conv2DBN = 1
# InvertedBottleneckBlockGroup = 2
# InvertedBottleneckBlockGroup1~16 = 48
# Conv2DBN = 1
# FullyConnected : 1
# Total : 53
self.assertLen(pruning_weight_names, 53)
def _check_2x4_sparsity(self, model):
def _is_pruned_2_by_4(weights):
if weights.shape.rank == 2:
prepared_weights = tf.transpose(weights)
elif weights.shape.rank == 4:
perm_weights = tf.transpose(weights, perm=[3, 0, 1, 2])
prepared_weights = tf.reshape(perm_weights,
[-1, perm_weights.shape[-1]])
prepared_weights_np = prepared_weights.numpy()
for row in range(0, prepared_weights_np.shape[0]):
for col in range(0, prepared_weights_np.shape[1], 4):
if np.count_nonzero(prepared_weights_np[row, col:col + 4]) > 2:
return False
return True
prunable_layers = img_cls_task.collect_prunable_layers(model)
for layer in prunable_layers:
for weight, _, _ in layer.pruning_vars:
if weight.shape[-2] % 4 == 0:
self.assertTrue(_is_pruned_2_by_4(weight))
def _validate_metrics(self, logs, metrics):
for metric in metrics:
logs[metric.name] = metric.result()
self.assertIn('loss', logs)
self.assertIn('accuracy', logs)
self.assertIn('top_5_accuracy', logs)
def _create_test_tfrecord(self, test_tfrecord_file, num_samples,
input_image_size):
example = tf.train.Example.FromString(
tfexample_utils.create_classification_example(
image_height=input_image_size[0], image_width=input_image_size[1]))
examples = [example] * num_samples
tfexample_utils.dump_to_tfrecord(
record_file=test_tfrecord_file, tf_examples=examples)
@parameterized.parameters(('resnet_imagenet_pruning'),
('mobilenet_imagenet_pruning'))
def testTaskWithUnstructuredSparsity(self, config_name):
test_tfrecord_file = os.path.join(self.get_temp_dir(), 'cls_test.tfrecord')
self._create_test_tfrecord(
test_tfrecord_file=test_tfrecord_file,
num_samples=10,
input_image_size=[224, 224])
config = exp_factory.get_exp_config(config_name)
config.task.train_data.global_batch_size = 2
config.task.validation_data.input_path = test_tfrecord_file
config.task.train_data.input_path = test_tfrecord_file
task = img_cls_task.ImageClassificationTask(config.task)
model = task.build_model()
metrics = task.build_metrics()
strategy = tf.distribute.get_strategy()
dataset = orbit.utils.make_distributed_dataset(strategy, task.build_inputs,
config.task.train_data)
iterator = iter(dataset)
opt_factory = optimization.OptimizerFactory(config.trainer.optimizer_config)
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
if isinstance(optimizer, optimization.ExponentialMovingAverage
) and not optimizer.has_shadow_copy:
optimizer.shadow_copy(model)
if config.task.pruning:
# This is an auxilary initialization required to prune a model which is
# originally done in the train library.
actions.PruningAction(
export_dir=tempfile.gettempdir(), model=model, optimizer=optimizer)
# Check all layers and target weights are successfully pruned.
self._validate_model_pruned(model, config_name)
logs = task.train_step(next(iterator), model, optimizer, metrics=metrics)
self._validate_metrics(logs, metrics)
logs = task.validation_step(next(iterator), model, metrics=metrics)
self._validate_metrics(logs, metrics)
@parameterized.parameters(('resnet_imagenet_pruning'),
('mobilenet_imagenet_pruning'))
def testTaskWithStructuredSparsity(self, config_name):
test_tfrecord_file = os.path.join(self.get_temp_dir(), 'cls_test.tfrecord')
self._create_test_tfrecord(
test_tfrecord_file=test_tfrecord_file,
num_samples=10,
input_image_size=[224, 224])
config = exp_factory.get_exp_config(config_name)
config.task.train_data.global_batch_size = 2
config.task.validation_data.input_path = test_tfrecord_file
config.task.train_data.input_path = test_tfrecord_file
# Add structured sparsity
config.task.pruning.sparsity_m_by_n = (2, 4)
config.task.pruning.frequency = 1
task = img_cls_task.ImageClassificationTask(config.task)
model = task.build_model()
metrics = task.build_metrics()
strategy = tf.distribute.get_strategy()
dataset = orbit.utils.make_distributed_dataset(strategy, task.build_inputs,
config.task.train_data)
iterator = iter(dataset)
opt_factory = optimization.OptimizerFactory(config.trainer.optimizer_config)
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
if isinstance(optimizer, optimization.ExponentialMovingAverage
) and not optimizer.has_shadow_copy:
optimizer.shadow_copy(model)
# This is an auxiliary initialization required to prune a model which is
# originally done in the train library.
pruning_actions = actions.PruningAction(
export_dir=tempfile.gettempdir(), model=model, optimizer=optimizer)
# Check all layers and target weights are successfully pruned.
self._validate_model_pruned(model, config_name)
logs = task.train_step(next(iterator), model, optimizer, metrics=metrics)
self._validate_metrics(logs, metrics)
logs = task.validation_step(next(iterator), model, metrics=metrics)
self._validate_metrics(logs, metrics)
pruning_actions.update_pruning_step.on_epoch_end(batch=None)
# Check whether the weights are pruned in 2x4 pattern.
self._check_2x4_sparsity(model)
if __name__ == '__main__':
tf.test.main()
| 7,814 | 37.688119 | 80 | py |
models | models-master/official/projects/pruning/tasks/image_classification.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image classification task definition."""
from absl import logging
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from official.core import task_factory
from official.projects.pruning.configs import image_classification as exp_cfg
from official.vision.modeling.backbones import mobilenet
from official.vision.modeling.layers import nn_blocks
from official.vision.tasks import image_classification
@task_factory.register_task_cls(exp_cfg.ImageClassificationTask)
class ImageClassificationTask(image_classification.ImageClassificationTask):
"""A task for image classification with pruning."""
_BLOCK_LAYER_SUFFIX_MAP = {
mobilenet.Conv2DBNBlock: ('conv2d/kernel:0',),
nn_blocks.BottleneckBlock: (
'conv2d/kernel:0',
'conv2d_1/kernel:0',
'conv2d_2/kernel:0',
'conv2d_3/kernel:0',
),
nn_blocks.InvertedBottleneckBlock: (
'conv2d/kernel:0',
'conv2d_1/kernel:0',
'conv2d_2/kernel:0',
'conv2d_3/kernel:0',
'depthwise_conv2d/depthwise_kernel:0',
),
nn_blocks.ResidualBlock: (
'conv2d/kernel:0',
'conv2d_1/kernel:0',
'conv2d_2/kernel:0',
),
}
def build_model(self) -> tf.keras.Model:
"""Builds classification model with pruning."""
model = super(ImageClassificationTask, self).build_model()
if self.task_config.pruning is None:
return model
pruning_cfg = self.task_config.pruning
prunable_model = tf.keras.models.clone_model(
model,
clone_function=self._make_block_prunable,
)
original_checkpoint = pruning_cfg.pretrained_original_checkpoint
if original_checkpoint is not None:
ckpt = tf.train.Checkpoint(model=prunable_model, **model.checkpoint_items)
status = ckpt.read(original_checkpoint)
status.expect_partial().assert_existing_objects_matched()
pruning_params = {}
if pruning_cfg.sparsity_m_by_n is not None:
pruning_params['sparsity_m_by_n'] = pruning_cfg.sparsity_m_by_n
if pruning_cfg.pruning_schedule == 'PolynomialDecay':
pruning_params['pruning_schedule'] = tfmot.sparsity.keras.PolynomialDecay(
initial_sparsity=pruning_cfg.initial_sparsity,
final_sparsity=pruning_cfg.final_sparsity,
begin_step=pruning_cfg.begin_step,
end_step=pruning_cfg.end_step,
frequency=pruning_cfg.frequency)
elif pruning_cfg.pruning_schedule == 'ConstantSparsity':
pruning_params[
'pruning_schedule'] = tfmot.sparsity.keras.ConstantSparsity(
target_sparsity=pruning_cfg.final_sparsity,
begin_step=pruning_cfg.begin_step,
frequency=pruning_cfg.frequency)
else:
raise NotImplementedError(
'Only PolynomialDecay and ConstantSparsity are currently supported. Not support %s'
% pruning_cfg.pruning_schedule)
pruned_model = tfmot.sparsity.keras.prune_low_magnitude(
prunable_model, **pruning_params)
# Print out prunable weights for debugging purpose.
prunable_layers = collect_prunable_layers(pruned_model)
pruned_weights = []
for layer in prunable_layers:
pruned_weights += [weight.name for weight, _, _ in layer.pruning_vars]
unpruned_weights = [
weight.name
for weight in pruned_model.weights
if weight.name not in pruned_weights
]
logging.info(
'%d / %d weights are pruned.\nPruned weights: [ \n%s \n],\n'
'Unpruned weights: [ \n%s \n],',
len(pruned_weights), len(model.weights), ', '.join(pruned_weights),
', '.join(unpruned_weights))
return pruned_model
def _make_block_prunable(
self, layer: tf.keras.layers.Layer) -> tf.keras.layers.Layer:
if isinstance(layer, tf.keras.Model):
return tf.keras.models.clone_model(
layer, input_tensors=None, clone_function=self._make_block_prunable)
if layer.__class__ not in self._BLOCK_LAYER_SUFFIX_MAP:
return layer
prunable_weights = []
for layer_suffix in self._BLOCK_LAYER_SUFFIX_MAP[layer.__class__]:
for weight in layer.weights:
if weight.name.endswith(layer_suffix):
prunable_weights.append(weight)
def get_prunable_weights():
return prunable_weights
layer.get_prunable_weights = get_prunable_weights
return layer
def collect_prunable_layers(model):
"""Recursively collect the prunable layers in the model."""
prunable_layers = []
for layer in model.layers:
if isinstance(layer, tf.keras.Model):
prunable_layers += collect_prunable_layers(layer)
if layer.__class__.__name__ == 'PruneLowMagnitude':
prunable_layers.append(layer)
return prunable_layers
| 5,362 | 35.236486 | 93 | py |
models | models-master/official/projects/detr/optimization.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Customized optimizer to match paper results."""
import dataclasses
import tensorflow as tf
from official.modeling import optimization
from official.nlp import optimization as nlp_optimization
@dataclasses.dataclass
class DETRAdamWConfig(optimization.AdamWeightDecayConfig):
pass
@dataclasses.dataclass
class OptimizerConfig(optimization.OptimizerConfig):
detr_adamw: DETRAdamWConfig = dataclasses.field(
default_factory=DETRAdamWConfig
)
@dataclasses.dataclass
class OptimizationConfig(optimization.OptimizationConfig):
"""Configuration for optimizer and learning rate schedule.
Attributes:
optimizer: optimizer oneof config.
ema: optional exponential moving average optimizer config, if specified, ema
optimizer will be used.
learning_rate: learning rate oneof config.
warmup: warmup oneof config.
"""
optimizer: OptimizerConfig = dataclasses.field(
default_factory=OptimizerConfig
)
# TODO(frederickliu): figure out how to make this configuable.
# TODO(frederickliu): Study if this is needed.
class _DETRAdamW(nlp_optimization.AdamWeightDecay):
"""Custom AdamW to support different lr scaling for backbone.
The code is copied from AdamWeightDecay and Adam with learning scaling.
"""
def _resource_apply_dense(self, grad, var, apply_state=None):
lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
apply_state = kwargs['apply_state']
if 'detr' not in var.name:
lr_t *= 0.1
decay = self._decay_weights_op(var, lr_t, apply_state)
with tf.control_dependencies([decay]):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
lr = coefficients[
'lr_t'] * 0.1 if 'detr' not in var.name else coefficients['lr_t']
if not self.amsgrad:
return tf.raw_ops.ResourceApplyAdam(
var=var.handle,
m=m.handle,
v=v.handle,
beta1_power=coefficients['beta_1_power'],
beta2_power=coefficients['beta_2_power'],
lr=lr,
beta1=coefficients['beta_1_t'],
beta2=coefficients['beta_2_t'],
epsilon=coefficients['epsilon'],
grad=grad,
use_locking=self._use_locking)
else:
vhat = self.get_slot(var, 'vhat')
return tf.raw_ops.ResourceApplyAdamWithAmsgrad(
var=var.handle,
m=m.handle,
v=v.handle,
vhat=vhat.handle,
beta1_power=coefficients['beta_1_power'],
beta2_power=coefficients['beta_2_power'],
lr=lr,
beta1=coefficients['beta_1_t'],
beta2=coefficients['beta_2_t'],
epsilon=coefficients['epsilon'],
grad=grad,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
apply_state = kwargs['apply_state']
if 'detr' not in var.name:
lr_t *= 0.1
decay = self._decay_weights_op(var, lr_t, apply_state)
with tf.control_dependencies([decay]):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, 'm')
m_scaled_g_values = grad * coefficients['one_minus_beta_1_t']
m_t = tf.compat.v1.assign(m, m * coefficients['beta_1_t'],
use_locking=self._use_locking)
with tf.control_dependencies([m_t]):
m_t = self._resource_scatter_add(m, indices, m_scaled_g_values)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, 'v')
v_scaled_g_values = (grad * grad) * coefficients['one_minus_beta_2_t']
v_t = tf.compat.v1.assign(v, v * coefficients['beta_2_t'],
use_locking=self._use_locking)
with tf.control_dependencies([v_t]):
v_t = self._resource_scatter_add(v, indices, v_scaled_g_values)
lr = coefficients[
'lr_t'] * 0.1 if 'detr' not in var.name else coefficients['lr_t']
if not self.amsgrad:
v_sqrt = tf.sqrt(v_t)
var_update = tf.compat.v1.assign_sub(
var, lr * m_t / (v_sqrt + coefficients['epsilon']),
use_locking=self._use_locking)
return tf.group(*[var_update, m_t, v_t])
else:
v_hat = self.get_slot(var, 'vhat')
v_hat_t = tf.maximum(v_hat, v_t)
with tf.control_dependencies([v_hat_t]):
v_hat_t = tf.compat.v1.assign(
v_hat, v_hat_t, use_locking=self._use_locking)
v_hat_sqrt = tf.sqrt(v_hat_t)
var_update = tf.compat.v1.assign_sub(
var,
lr* m_t / (v_hat_sqrt + coefficients['epsilon']),
use_locking=self._use_locking)
return tf.group(*[var_update, m_t, v_t, v_hat_t])
optimization.register_optimizer_cls('detr_adamw', _DETRAdamW)
| 5,882 | 37.703947 | 80 | py |
models | models-master/official/projects/detr/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Model Garden Vision training driver."""
from absl import app
from absl import flags
import gin
from official.common import distribute_utils
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
# pylint: disable=unused-import
from official.projects.detr.configs import detr
from official.projects.detr.tasks import detection
# pylint: enable=unused-import
FLAGS = flags.FLAGS
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu)
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir)
train_utils.save_gin_config(FLAGS.mode, model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
flags.mark_flags_as_required(['experiment', 'mode', 'model_dir'])
app.run(main)
| 2,652 | 36.366197 | 80 | py |
models | models-master/official/projects/detr/serving/export_saved_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Vision models export binary for serving/inference.
To export a trained checkpoint in saved_model format (shell script):
EXPERIMENT_TYPE = XX
CHECKPOINT_PATH = XX
EXPORT_DIR_PATH = XX
export_saved_model --experiment=${EXPERIMENT_TYPE} \
--export_dir=${EXPORT_DIR_PATH}/ \
--checkpoint_path=${CHECKPOINT_PATH} \
--batch_size=2 \
--input_image_size=224,224
To serve (python):
export_dir_path = XX
input_type = XX
input_images = XX
imported = tf.saved_model.load(export_dir_path)
model_fn = imported.signatures['serving_default']
output = model_fn(input_images)
"""
from absl import app
from absl import flags
from official.core import exp_factory
from official.modeling import hyperparams
from official.projects.detr.configs import detr as exp_cfg # pylint: disable=unused-import
from official.projects.detr.serving import export_module
from official.vision.serving import export_saved_model_lib
FLAGS = flags.FLAGS
_EXPERIMENT = flags.DEFINE_string('experiment', None,
'experiment type, e.g. detr_coco')
_EXPORT_DIR = flags.DEFINE_string('export_dir', None, 'The export directory.')
_CHECKPOINT_PATH = flags.DEFINE_string('checkpoint_path', None,
'Checkpoint path.')
_CONFIG_FILE = flags.DEFINE_multi_string(
'config_file',
default=None,
help='YAML/JSON files which specifies overrides. The override order '
'follows the order of args. Note that each file '
'can be used as an override template to override the default parameters '
'specified in Python. If the same parameter is specified in both '
'`--config_file` and `--params_override`, `config_file` will be used '
'first, followed by params_override.')
_PARAMS_OVERRIDE = flags.DEFINE_string(
'params_override', '',
'The JSON/YAML file or string which specifies the parameter to be overriden'
' on top of `config_file` template.')
_BATCH_SIZE = flags.DEFINE_integer('batch_size', None, 'The batch size.')
_IMAGE_TYPE = flags.DEFINE_string(
'input_type', 'image_tensor',
'One of `image_tensor`, `image_bytes`, `tf_example` and `tflite`.')
_INPUT_IMAGE_SIZE = flags.DEFINE_string(
'input_image_size', '224,224',
'The comma-separated string of two integers representing the height,width '
'of the input to the model.')
def main(_):
params = exp_factory.get_exp_config(_EXPERIMENT.value)
for config_file in _CONFIG_FILE.value or []:
params = hyperparams.override_params_dict(
params, config_file, is_strict=False)
if _PARAMS_OVERRIDE.value:
params = hyperparams.override_params_dict(
params, _PARAMS_OVERRIDE.value, is_strict=False)
params.validate()
params.lock()
input_image_size = [int(x) for x in _INPUT_IMAGE_SIZE.value.split(',')]
module = export_module.DETRModule(
params=params,
batch_size=_BATCH_SIZE.value,
input_image_size=input_image_size,
input_type=_IMAGE_TYPE.value,
num_channels=3)
export_saved_model_lib.export_inference_graph(
input_type=_IMAGE_TYPE.value,
batch_size=_BATCH_SIZE.value,
input_image_size=input_image_size,
params=params,
checkpoint_path=_CHECKPOINT_PATH.value,
export_dir=_EXPORT_DIR.value,
export_module=module)
if __name__ == '__main__':
app.run(main)
| 3,995 | 35.327273 | 91 | py |
models | models-master/official/projects/detr/serving/export_module_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for DETR export module."""
import io
import os
from absl.testing import parameterized
import numpy as np
from PIL import Image
import tensorflow as tf
from official.core import exp_factory
from official.projects.detr.configs import detr as exp_cfg # pylint: disable=unused-import
from official.projects.detr.serving import export_module
class ExportModuleTest(tf.test.TestCase, parameterized.TestCase):
def _get_module(self, input_type):
params = exp_factory.get_exp_config('detr_coco')
return export_module.DETRModule(
params,
batch_size=1,
input_image_size=[384, 384],
input_type=input_type)
def _export_from_module(self, module, input_type, save_directory):
signatures = module.get_inference_signatures(
{input_type: 'serving_default'})
tf.saved_model.save(module, save_directory, signatures=signatures)
def _get_dummy_input(self, input_type):
"""Gets dummy input for the given input type."""
if input_type == 'image_tensor':
return tf.zeros((1, 384, 384, 3), dtype=np.uint8)
elif input_type == 'image_bytes':
image = Image.fromarray(np.zeros((384, 384, 3), dtype=np.uint8))
byte_io = io.BytesIO()
image.save(byte_io, 'PNG')
return [byte_io.getvalue()]
elif input_type == 'tf_example':
image_tensor = tf.zeros((384, 384, 3), dtype=tf.uint8)
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy()
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[encoded_jpeg])),
})).SerializeToString()
return [example]
@parameterized.parameters(
{'input_type': 'image_tensor'},
{'input_type': 'image_bytes'},
{'input_type': 'tf_example'},
)
def test_export(self, input_type='image_tensor'):
tmp_dir = self.get_temp_dir()
module = self._get_module(input_type)
self._export_from_module(module, input_type, tmp_dir)
self.assertTrue(os.path.exists(os.path.join(tmp_dir, 'saved_model.pb')))
self.assertTrue(
os.path.exists(os.path.join(tmp_dir, 'variables', 'variables.index')))
self.assertTrue(
os.path.exists(
os.path.join(tmp_dir, 'variables',
'variables.data-00000-of-00001')))
imported = tf.saved_model.load(tmp_dir)
predict_fn = imported.signatures['serving_default']
images = self._get_dummy_input(input_type)
outputs = predict_fn(tf.constant(images))
self.assertNotEmpty(outputs['detection_boxes'])
self.assertNotEmpty(outputs['detection_classes'])
self.assertNotEmpty(outputs['detection_scores'])
self.assertNotEmpty(outputs['num_detections'])
if __name__ == '__main__':
tf.test.main()
| 3,487 | 34.232323 | 91 | py |
models | models-master/official/projects/detr/serving/export_module.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export module for DETR model."""
import tensorflow as tf
from official.projects.detr.modeling import detr
from official.vision.modeling import backbones
from official.vision.ops import preprocess_ops
from official.vision.serving import detection
class DETRModule(detection.DetectionModule):
"""DETR detection module."""
def _build_model(self) -> tf.keras.Model:
input_specs = tf.keras.layers.InputSpec(shape=[self._batch_size] +
self._input_image_size +
[self._num_channels])
backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=self.params.task.model.backbone,
norm_activation_config=self.params.task.model.norm_activation)
model = detr.DETR(backbone, self.params.task.model.backbone_endpoint_name,
self.params.task.model.num_queries,
self.params.task.model.hidden_size,
self.params.task.model.num_classes,
self.params.task.model.num_encoder_layers,
self.params.task.model.num_decoder_layers)
model(tf.keras.Input(input_specs.shape[1:]))
return model
def _build_inputs(self, image: tf.Tensor) -> tuple[tf.Tensor, tf.Tensor]:
"""Builds detection model inputs for serving."""
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(
image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB)
image, image_info = preprocess_ops.resize_image(
image, size=self._input_image_size)
return image, image_info
def serve(self, images: tf.Tensor) -> dict[str, tf.Tensor]:
"""Cast image to float and run inference.
Args:
images: uint8 Tensor of shape [batch_size, None, None, 3]
Returns:
Tensor holding classification output logits.
"""
# Skip image preprocessing when input_type is tflite so it is compatible
# with TFLite quantization.
image_info = None
if self._input_type != 'tflite':
with tf.device('cpu:0'):
images = tf.cast(images, dtype=tf.float32)
images_spec = tf.TensorSpec(
shape=self._input_image_size + [3], dtype=tf.float32)
image_info_spec = tf.TensorSpec(shape=[4, 2], dtype=tf.float32)
images, image_info = tf.nest.map_structure(
tf.identity,
tf.map_fn(
self._build_inputs,
elems=images,
fn_output_signature=(images_spec, image_info_spec),
parallel_iterations=32))
outputs = self.inference_step(images)[-1]
outputs = {
'detection_boxes': outputs['detection_boxes'],
'detection_scores': outputs['detection_scores'],
'detection_classes': outputs['detection_classes'],
'num_detections': outputs['num_detections']
}
if image_info is not None:
outputs['detection_boxes'] = outputs['detection_boxes'] * tf.expand_dims(
tf.concat([
image_info[:, 1:2, 0], image_info[:, 1:2, 1],
image_info[:, 1:2, 0], image_info[:, 1:2, 1]
],
axis=1),
axis=1)
outputs.update({'image_info': image_info})
return outputs
| 3,919 | 36.692308 | 79 | py |
models | models-master/official/projects/detr/configs/detr.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DETR configurations."""
import dataclasses
import os
from typing import List, Optional, Union
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.projects.detr import optimization
from official.projects.detr.dataloaders import coco
from official.vision.configs import backbones
from official.vision.configs import common
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""Input config for training."""
input_path: str = ''
tfds_name: str = ''
tfds_split: str = 'train'
global_batch_size: int = 0
is_training: bool = False
dtype: str = 'bfloat16'
decoder: common.DataDecoder = dataclasses.field(default_factory=common.DataDecoder)
shuffle_buffer_size: int = 10000
file_type: str = 'tfrecord'
drop_remainder: bool = True
@dataclasses.dataclass
class Losses(hyperparams.Config):
class_offset: int = 0
lambda_cls: float = 1.0
lambda_box: float = 5.0
lambda_giou: float = 2.0
background_cls_weight: float = 0.1
l2_weight_decay: float = 1e-4
@dataclasses.dataclass
class Detr(hyperparams.Config):
"""Detr model definations."""
num_queries: int = 100
hidden_size: int = 256
num_classes: int = 91 # 0: background
num_encoder_layers: int = 6
num_decoder_layers: int = 6
input_size: List[int] = dataclasses.field(default_factory=list)
backbone: backbones.Backbone = dataclasses.field(default_factory=lambda:backbones.Backbone(
type='resnet', resnet=backbones.ResNet(model_id=50, bn_trainable=False)))
norm_activation: common.NormActivation = dataclasses.field(default_factory=common.NormActivation)
backbone_endpoint_name: str = '5'
@dataclasses.dataclass
class DetrTask(cfg.TaskConfig):
model: Detr = dataclasses.field(default_factory=Detr)
train_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
validation_data: cfg.DataConfig = dataclasses.field(default_factory=cfg.DataConfig)
losses: Losses = dataclasses.field(default_factory=Losses)
init_checkpoint: Optional[str] = None
init_checkpoint_modules: Union[str, List[str]] = 'all' # all, backbone
annotation_file: Optional[str] = None
per_category_metrics: bool = False
COCO_INPUT_PATH_BASE = 'coco'
COCO_TRAIN_EXAMPLES = 118287
COCO_VAL_EXAMPLES = 5000
@exp_factory.register_config_factory('detr_coco')
def detr_coco() -> cfg.ExperimentConfig:
"""Config to get results that matches the paper."""
train_batch_size = 64
eval_batch_size = 64
num_train_data = COCO_TRAIN_EXAMPLES
num_steps_per_epoch = num_train_data // train_batch_size
train_steps = 500 * num_steps_per_epoch # 500 epochs
decay_at = train_steps - 100 * num_steps_per_epoch # 400 epochs
config = cfg.ExperimentConfig(
task=DetrTask(
init_checkpoint='',
init_checkpoint_modules='backbone',
model=Detr(
num_classes=81,
input_size=[1333, 1333, 3],
norm_activation=common.NormActivation()),
losses=Losses(),
train_data=coco.COCODataConfig(
tfds_name='coco/2017',
tfds_split='train',
is_training=True,
global_batch_size=train_batch_size,
shuffle_buffer_size=1000,
),
validation_data=coco.COCODataConfig(
tfds_name='coco/2017',
tfds_split='validation',
is_training=False,
global_batch_size=eval_batch_size,
drop_remainder=False)),
trainer=cfg.TrainerConfig(
train_steps=train_steps,
validation_steps=-1,
steps_per_loop=10000,
summary_interval=10000,
checkpoint_interval=10000,
validation_interval=10000,
max_to_keep=1,
best_checkpoint_export_subdir='best_ckpt',
best_checkpoint_eval_metric='AP',
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'detr_adamw',
'detr_adamw': {
'weight_decay_rate': 1e-4,
'global_clipnorm': 0.1,
# Avoid AdamW legacy behavior.
'gradient_clip_norm': 0.0
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [decay_at],
'values': [0.0001, 1.0e-05]
}
},
})),
restrictions=[
'task.train_data.is_training != None',
])
return config
@exp_factory.register_config_factory('detr_coco_tfrecord')
def detr_coco_tfrecord() -> cfg.ExperimentConfig:
"""Config to get results that matches the paper."""
train_batch_size = 64
eval_batch_size = 64
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
train_steps = 300 * steps_per_epoch # 300 epochs
decay_at = train_steps - 100 * steps_per_epoch # 200 epochs
config = cfg.ExperimentConfig(
task=DetrTask(
init_checkpoint='',
init_checkpoint_modules='backbone',
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=Detr(
input_size=[1333, 1333, 3],
norm_activation=common.NormActivation()),
losses=Losses(),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
shuffle_buffer_size=1000,
),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size,
drop_remainder=False,
)),
trainer=cfg.TrainerConfig(
train_steps=train_steps,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
validation_interval=5 * steps_per_epoch,
max_to_keep=1,
best_checkpoint_export_subdir='best_ckpt',
best_checkpoint_eval_metric='AP',
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'detr_adamw',
'detr_adamw': {
'weight_decay_rate': 1e-4,
'global_clipnorm': 0.1,
# Avoid AdamW legacy behavior.
'gradient_clip_norm': 0.0
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [decay_at],
'values': [0.0001, 1.0e-05]
}
},
})),
restrictions=[
'task.train_data.is_training != None',
])
return config
@exp_factory.register_config_factory('detr_coco_tfds')
def detr_coco_tfds() -> cfg.ExperimentConfig:
"""Config to get results that matches the paper."""
train_batch_size = 64
eval_batch_size = 64
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
train_steps = 300 * steps_per_epoch # 300 epochs
decay_at = train_steps - 100 * steps_per_epoch # 200 epochs
config = cfg.ExperimentConfig(
task=DetrTask(
init_checkpoint='',
init_checkpoint_modules='backbone',
model=Detr(
num_classes=81,
input_size=[1333, 1333, 3],
norm_activation=common.NormActivation()),
losses=Losses(class_offset=1),
train_data=DataConfig(
tfds_name='coco/2017',
tfds_split='train',
is_training=True,
global_batch_size=train_batch_size,
shuffle_buffer_size=1000,
),
validation_data=DataConfig(
tfds_name='coco/2017',
tfds_split='validation',
is_training=False,
global_batch_size=eval_batch_size,
drop_remainder=False)),
trainer=cfg.TrainerConfig(
train_steps=train_steps,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
validation_interval=5 * steps_per_epoch,
max_to_keep=1,
best_checkpoint_export_subdir='best_ckpt',
best_checkpoint_eval_metric='AP',
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'detr_adamw',
'detr_adamw': {
'weight_decay_rate': 1e-4,
'global_clipnorm': 0.1,
# Avoid AdamW legacy behavior.
'gradient_clip_norm': 0.0
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [decay_at],
'values': [0.0001, 1.0e-05]
}
},
})),
restrictions=[
'task.train_data.is_training != None',
])
return config
| 10,054 | 35.169065 | 99 | py |
models | models-master/official/projects/detr/configs/detr_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for detr."""
# pylint: disable=unused-import
from absl.testing import parameterized
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.detr.configs import detr as exp_cfg
from official.projects.detr.dataloaders import coco
class DetrTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(('detr_coco',))
def test_detr_configs_tfds(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, exp_cfg.DetrTask)
self.assertIsInstance(config.task.train_data, coco.COCODataConfig)
config.task.train_data.is_training = None
with self.assertRaises(KeyError):
config.validate()
@parameterized.parameters(('detr_coco_tfrecord'), ('detr_coco_tfds'))
def test_detr_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, exp_cfg.DetrTask)
self.assertIsInstance(config.task.train_data, cfg.DataConfig)
config.task.train_data.is_training = None
with self.assertRaises(KeyError):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 1,931 | 36.153846 | 74 | py |
models | models-master/official/projects/detr/dataloaders/detr_input.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""COCO data loader for DETR."""
from typing import Tuple
import tensorflow as tf
from official.vision.dataloaders import parser
from official.vision.ops import box_ops
from official.vision.ops import preprocess_ops
RESIZE_SCALES = (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800)
class Parser(parser.Parser):
"""Parse an image and its annotations into a dictionary of tensors."""
def __init__(self,
class_offset: int = 0,
output_size: Tuple[int, int] = (1333, 1333),
max_num_boxes: int = 100,
resize_scales: Tuple[int, ...] = RESIZE_SCALES,
aug_rand_hflip=True):
self._class_offset = class_offset
self._output_size = output_size
self._max_num_boxes = max_num_boxes
self._resize_scales = resize_scales
self._aug_rand_hflip = aug_rand_hflip
def _parse_train_data(self, data):
"""Parses data for training and evaluation."""
classes = data['groundtruth_classes'] + self._class_offset
boxes = data['groundtruth_boxes']
is_crowd = data['groundtruth_is_crowd']
# Gets original image.
image = data['image']
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(image)
image, boxes, _ = preprocess_ops.random_horizontal_flip(image, boxes)
do_crop = tf.greater(tf.random.uniform([]), 0.5)
if do_crop:
# Rescale
boxes = box_ops.denormalize_boxes(boxes, tf.shape(image)[:2])
index = tf.random.categorical(tf.zeros([1, 3]), 1)[0]
scales = tf.gather([400.0, 500.0, 600.0], index, axis=0)
short_side = scales[0]
image, image_info = preprocess_ops.resize_image(image, short_side)
boxes = preprocess_ops.resize_and_crop_boxes(boxes, image_info[2, :],
image_info[1, :],
image_info[3, :])
boxes = box_ops.normalize_boxes(boxes, image_info[1, :])
# Do croping
shape = tf.cast(image_info[1], dtype=tf.int32)
h = tf.random.uniform([],
384,
tf.math.minimum(shape[0], 600),
dtype=tf.int32)
w = tf.random.uniform([],
384,
tf.math.minimum(shape[1], 600),
dtype=tf.int32)
i = tf.random.uniform([], 0, shape[0] - h + 1, dtype=tf.int32)
j = tf.random.uniform([], 0, shape[1] - w + 1, dtype=tf.int32)
image = tf.image.crop_to_bounding_box(image, i, j, h, w)
boxes = tf.clip_by_value(
(boxes[..., :] * tf.cast(
tf.stack([shape[0], shape[1], shape[0], shape[1]]),
dtype=tf.float32) -
tf.cast(tf.stack([i, j, i, j]), dtype=tf.float32)) /
tf.cast(tf.stack([h, w, h, w]), dtype=tf.float32), 0.0, 1.0)
scales = tf.constant(self._resize_scales, dtype=tf.float32)
index = tf.random.categorical(tf.zeros([1, 11]), 1)[0]
scales = tf.gather(scales, index, axis=0)
image_shape = tf.shape(image)[:2]
boxes = box_ops.denormalize_boxes(boxes, image_shape)
short_side = scales[0]
image, image_info = preprocess_ops.resize_image(image, short_side,
max(self._output_size))
boxes = preprocess_ops.resize_and_crop_boxes(boxes, image_info[2, :],
image_info[1, :],
image_info[3, :])
boxes = box_ops.normalize_boxes(boxes, image_info[1, :])
# Filters out ground truth boxes that are all zeros.
indices = box_ops.get_non_empty_box_indices(boxes)
boxes = tf.gather(boxes, indices)
classes = tf.gather(classes, indices)
is_crowd = tf.gather(is_crowd, indices)
boxes = box_ops.yxyx_to_cycxhw(boxes)
image = tf.image.pad_to_bounding_box(image, 0, 0, self._output_size[0],
self._output_size[1])
labels = {
'classes':
preprocess_ops.clip_or_pad_to_fixed_size(classes,
self._max_num_boxes),
'boxes':
preprocess_ops.clip_or_pad_to_fixed_size(boxes, self._max_num_boxes)
}
return image, labels
def _parse_eval_data(self, data):
"""Parses data for training and evaluation."""
classes = data['groundtruth_classes']
boxes = data['groundtruth_boxes']
is_crowd = data['groundtruth_is_crowd']
# Gets original image and its size.
image = data['image']
# Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(image)
scales = tf.constant([self._resize_scales[-1]], tf.float32)
image_shape = tf.shape(image)[:2]
boxes = box_ops.denormalize_boxes(boxes, image_shape)
gt_boxes = boxes
short_side = scales[0]
image, image_info = preprocess_ops.resize_image(image, short_side,
max(self._output_size))
boxes = preprocess_ops.resize_and_crop_boxes(boxes, image_info[2, :],
image_info[1, :],
image_info[3, :])
boxes = box_ops.normalize_boxes(boxes, image_info[1, :])
# Filters out ground truth boxes that are all zeros.
indices = box_ops.get_non_empty_box_indices(boxes)
boxes = tf.gather(boxes, indices)
classes = tf.gather(classes, indices)
is_crowd = tf.gather(is_crowd, indices)
boxes = box_ops.yxyx_to_cycxhw(boxes)
image = tf.image.pad_to_bounding_box(image, 0, 0, self._output_size[0],
self._output_size[1])
labels = {
'classes':
preprocess_ops.clip_or_pad_to_fixed_size(classes,
self._max_num_boxes),
'boxes':
preprocess_ops.clip_or_pad_to_fixed_size(boxes, self._max_num_boxes)
}
labels.update({
'id':
int(data['source_id']),
'image_info':
image_info,
'is_crowd':
preprocess_ops.clip_or_pad_to_fixed_size(is_crowd,
self._max_num_boxes),
'gt_boxes':
preprocess_ops.clip_or_pad_to_fixed_size(gt_boxes,
self._max_num_boxes),
})
return image, labels
| 7,100 | 39.346591 | 80 | py |
models | models-master/official/projects/detr/dataloaders/coco_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_models.official.projects.detr.dataloaders.coco."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from official.projects.detr.dataloaders import coco
def _gen_fn():
h = np.random.randint(0, 300)
w = np.random.randint(0, 300)
num_boxes = np.random.randint(0, 50)
return {
'image': np.ones(shape=(h, w, 3), dtype=np.uint8),
'image/id': np.random.randint(0, 100),
'image/filename': 'test',
'objects': {
'is_crowd': np.ones(shape=(num_boxes), dtype=bool),
'bbox': np.ones(shape=(num_boxes, 4), dtype=np.float32),
'label': np.ones(shape=(num_boxes), dtype=np.int64),
'id': np.ones(shape=(num_boxes), dtype=np.int64),
'area': np.ones(shape=(num_boxes), dtype=np.int64),
}
}
class CocoDataloaderTest(tf.test.TestCase, parameterized.TestCase):
def test_load_dataset(self):
output_size = 1280
max_num_boxes = 100
batch_size = 2
data_config = coco.COCODataConfig(
tfds_name='coco/2017',
tfds_split='validation',
is_training=False,
global_batch_size=batch_size,
output_size=(output_size, output_size),
max_num_boxes=max_num_boxes,
)
num_examples = 10
def as_dataset(self, *args, **kwargs):
del args
del kwargs
return tf.data.Dataset.from_generator(
lambda: (_gen_fn() for i in range(num_examples)),
output_types=self.info.features.dtype,
output_shapes=self.info.features.shape,
)
with tfds.testing.mock_data(num_examples=num_examples,
as_dataset_fn=as_dataset):
dataset = coco.COCODataLoader(data_config).load()
dataset_iter = iter(dataset)
images, labels = next(dataset_iter)
self.assertEqual(images.shape, (batch_size, output_size, output_size, 3))
self.assertEqual(labels['classes'].shape, (batch_size, max_num_boxes))
self.assertEqual(labels['boxes'].shape, (batch_size, max_num_boxes, 4))
self.assertEqual(labels['id'].shape, (batch_size,))
self.assertEqual(
labels['image_info'].shape, (batch_size, 4, 2))
self.assertEqual(labels['is_crowd'].shape, (batch_size, max_num_boxes))
@parameterized.named_parameters(
('training', True),
('validation', False))
def test_preprocess(self, is_training):
output_size = 1280
max_num_boxes = 100
batch_size = 2
data_config = coco.COCODataConfig(
tfds_name='coco/2017',
tfds_split='validation',
is_training=is_training,
global_batch_size=batch_size,
output_size=(output_size, output_size),
max_num_boxes=max_num_boxes,
)
dl = coco.COCODataLoader(data_config)
inputs = _gen_fn()
image, label = dl.preprocess(inputs)
self.assertEqual(image.shape, (output_size, output_size, 3))
self.assertEqual(label['classes'].shape, (max_num_boxes))
self.assertEqual(label['boxes'].shape, (max_num_boxes, 4))
if not is_training:
self.assertDTypeEqual(label['id'], int)
self.assertEqual(
label['image_info'].shape, (4, 2))
self.assertEqual(label['is_crowd'].shape, (max_num_boxes))
if __name__ == '__main__':
tf.test.main()
| 3,926 | 34.0625 | 79 | py |
models | models-master/official/projects/detr/dataloaders/coco.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""COCO data loader for DETR."""
import dataclasses
from typing import Optional, Tuple
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import input_reader
from official.vision.ops import box_ops
from official.vision.ops import preprocess_ops
@dataclasses.dataclass
class COCODataConfig(cfg.DataConfig):
"""Data config for COCO."""
output_size: Tuple[int, int] = (1333, 1333)
max_num_boxes: int = 100
resize_scales: Tuple[int, ...] = (
480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800)
class COCODataLoader():
"""A class to load dataset for COCO detection task."""
def __init__(self, params: COCODataConfig):
self._params = params
def preprocess(self, inputs):
"""Preprocess COCO for DETR."""
image = inputs['image']
boxes = inputs['objects']['bbox']
classes = inputs['objects']['label'] + 1
is_crowd = inputs['objects']['is_crowd']
image = preprocess_ops.normalize_image(image)
if self._params.is_training:
image, boxes, _ = preprocess_ops.random_horizontal_flip(image, boxes)
do_crop = tf.greater(tf.random.uniform([]), 0.5)
if do_crop:
# Rescale
boxes = box_ops.denormalize_boxes(boxes, tf.shape(image)[:2])
index = tf.random.categorical(tf.zeros([1, 3]), 1)[0]
scales = tf.gather([400.0, 500.0, 600.0], index, axis=0)
short_side = scales[0]
image, image_info = preprocess_ops.resize_image(image, short_side)
boxes = preprocess_ops.resize_and_crop_boxes(boxes,
image_info[2, :],
image_info[1, :],
image_info[3, :])
boxes = box_ops.normalize_boxes(boxes, image_info[1, :])
# Do croping
shape = tf.cast(image_info[1], dtype=tf.int32)
h = tf.random.uniform(
[], 384, tf.math.minimum(shape[0], 600), dtype=tf.int32)
w = tf.random.uniform(
[], 384, tf.math.minimum(shape[1], 600), dtype=tf.int32)
i = tf.random.uniform([], 0, shape[0] - h + 1, dtype=tf.int32)
j = tf.random.uniform([], 0, shape[1] - w + 1, dtype=tf.int32)
image = tf.image.crop_to_bounding_box(image, i, j, h, w)
boxes = tf.clip_by_value(
(boxes[..., :] * tf.cast(
tf.stack([shape[0], shape[1], shape[0], shape[1]]),
dtype=tf.float32) -
tf.cast(tf.stack([i, j, i, j]), dtype=tf.float32)) /
tf.cast(tf.stack([h, w, h, w]), dtype=tf.float32), 0.0, 1.0)
scales = tf.constant(
self._params.resize_scales,
dtype=tf.float32)
index = tf.random.categorical(tf.zeros([1, 11]), 1)[0]
scales = tf.gather(scales, index, axis=0)
else:
scales = tf.constant([self._params.resize_scales[-1]], tf.float32)
image_shape = tf.shape(image)[:2]
boxes = box_ops.denormalize_boxes(boxes, image_shape)
gt_boxes = boxes
short_side = scales[0]
image, image_info = preprocess_ops.resize_image(
image,
short_side,
max(self._params.output_size))
boxes = preprocess_ops.resize_and_crop_boxes(boxes,
image_info[2, :],
image_info[1, :],
image_info[3, :])
boxes = box_ops.normalize_boxes(boxes, image_info[1, :])
# Filters out ground truth boxes that are all zeros.
indices = box_ops.get_non_empty_box_indices(boxes)
boxes = tf.gather(boxes, indices)
classes = tf.gather(classes, indices)
is_crowd = tf.gather(is_crowd, indices)
boxes = box_ops.yxyx_to_cycxhw(boxes)
image = tf.image.pad_to_bounding_box(
image, 0, 0, self._params.output_size[0], self._params.output_size[1])
labels = {
'classes':
preprocess_ops.clip_or_pad_to_fixed_size(
classes, self._params.max_num_boxes),
'boxes':
preprocess_ops.clip_or_pad_to_fixed_size(
boxes, self._params.max_num_boxes)
}
if not self._params.is_training:
labels.update({
'id':
inputs['image/id'],
'image_info':
image_info,
'is_crowd':
preprocess_ops.clip_or_pad_to_fixed_size(
is_crowd, self._params.max_num_boxes),
'gt_boxes':
preprocess_ops.clip_or_pad_to_fixed_size(
gt_boxes, self._params.max_num_boxes),
})
return image, labels
def _transform_and_batch_fn(
self,
dataset,
input_context: Optional[tf.distribute.InputContext] = None):
"""Preprocess and batch."""
dataset = dataset.map(
self.preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE)
per_replica_batch_size = input_context.get_per_replica_batch_size(
self._params.global_batch_size
) if input_context else self._params.global_batch_size
dataset = dataset.batch(
per_replica_batch_size, drop_remainder=self._params.drop_remainder)
return dataset
def load(self, input_context: Optional[tf.distribute.InputContext] = None):
"""Returns a tf.dataset.Dataset."""
reader = input_reader.InputReader(
params=self._params,
decoder_fn=None,
transform_and_batch_fn=self._transform_and_batch_fn)
return reader.read(input_context)
| 6,112 | 37.689873 | 78 | py |
models | models-master/official/projects/detr/modeling/transformer_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for transformer."""
import tensorflow as tf
from official.projects.detr.modeling import transformer
class TransformerTest(tf.test.TestCase):
def test_transformer_encoder_block(self):
batch_size = 2
sequence_length = 100
feature_size = 256
num_attention_heads = 2
inner_dim = 256
inner_activation = 'relu'
model = transformer.TransformerEncoderBlock(num_attention_heads, inner_dim,
inner_activation)
input_tensor = tf.ones((batch_size, sequence_length, feature_size))
attention_mask = tf.ones((batch_size, sequence_length, sequence_length),
dtype=tf.int64)
pos_embed = tf.ones((batch_size, sequence_length, feature_size))
out = model([input_tensor, attention_mask, pos_embed])
self.assertAllEqual(
tf.shape(out), (batch_size, sequence_length, feature_size))
def test_transformer_encoder_block_get_config(self):
num_attention_heads = 2
inner_dim = 256
inner_activation = 'relu'
model = transformer.TransformerEncoderBlock(num_attention_heads, inner_dim,
inner_activation)
config = model.get_config()
expected_config = {
'name': 'transformer_encoder_block',
'trainable': True,
'dtype': 'float32',
'num_attention_heads': 2,
'inner_dim': 256,
'inner_activation': 'relu',
'output_dropout': 0.0,
'attention_dropout': 0.0,
'output_range': None,
'kernel_initializer': {
'class_name': 'GlorotUniform',
'config': {
'seed': None}
},
'bias_initializer': {
'class_name': 'Zeros',
'config': {}
},
'kernel_regularizer': None,
'bias_regularizer': None,
'activity_regularizer': None,
'kernel_constraint': None,
'bias_constraint': None,
'use_bias': True,
'norm_first': False,
'norm_epsilon': 1e-12,
'inner_dropout': 0.0,
'attention_initializer': {
'class_name': 'GlorotUniform',
'config': {'seed': None}
},
'attention_axes': None}
self.assertAllEqual(expected_config, config)
def test_transformer_encoder(self):
batch_size = 2
sequence_length = 100
feature_size = 256
num_layers = 2
num_attention_heads = 2
intermediate_size = 256
model = transformer.TransformerEncoder(
num_layers=num_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size)
input_tensor = tf.ones((batch_size, sequence_length, feature_size))
attention_mask = tf.ones((batch_size, sequence_length, sequence_length),
dtype=tf.int64)
pos_embed = tf.ones((batch_size, sequence_length, feature_size))
out = model(input_tensor, attention_mask, pos_embed)
self.assertAllEqual(
tf.shape(out), (batch_size, sequence_length, feature_size))
def test_transformer_encoder_get_config(self):
num_layers = 2
num_attention_heads = 2
intermediate_size = 256
model = transformer.TransformerEncoder(
num_layers=num_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size)
config = model.get_config()
expected_config = {
'name': 'transformer_encoder',
'trainable': True,
'dtype': 'float32',
'num_layers': 2,
'num_attention_heads': 2,
'intermediate_size': 256,
'activation': 'relu',
'dropout_rate': 0.0,
'attention_dropout_rate': 0.0,
'use_bias': False,
'norm_first': True,
'norm_epsilon': 1e-06,
'intermediate_dropout': 0.0
}
self.assertAllEqual(expected_config, config)
def test_transformer_decoder_block(self):
batch_size = 2
sequence_length = 100
memory_length = 200
feature_size = 256
num_attention_heads = 2
intermediate_size = 256
intermediate_activation = 'relu'
model = transformer.TransformerDecoderBlock(num_attention_heads,
intermediate_size,
intermediate_activation)
input_tensor = tf.ones((batch_size, sequence_length, feature_size))
memory = tf.ones((batch_size, memory_length, feature_size))
attention_mask = tf.ones((batch_size, sequence_length, memory_length),
dtype=tf.int64)
self_attention_mask = tf.ones(
(batch_size, sequence_length, sequence_length), dtype=tf.int64)
input_pos_embed = tf.ones((batch_size, sequence_length, feature_size))
memory_pos_embed = tf.ones((batch_size, memory_length, feature_size))
out, _ = model([
input_tensor, memory, attention_mask, self_attention_mask,
input_pos_embed, memory_pos_embed
])
self.assertAllEqual(
tf.shape(out), (batch_size, sequence_length, feature_size))
def test_transformer_decoder_block_get_config(self):
num_attention_heads = 2
intermediate_size = 256
intermediate_activation = 'relu'
model = transformer.TransformerDecoderBlock(num_attention_heads,
intermediate_size,
intermediate_activation)
config = model.get_config()
expected_config = {
'name': 'transformer_decoder_block',
'trainable': True,
'dtype': 'float32',
'num_attention_heads': 2,
'intermediate_size': 256,
'intermediate_activation': 'relu',
'dropout_rate': 0.0,
'attention_dropout_rate': 0.0,
'kernel_initializer': {
'class_name': 'GlorotUniform',
'config': {
'seed': None
}
},
'bias_initializer': {
'class_name': 'Zeros',
'config': {}
},
'kernel_regularizer': None,
'bias_regularizer': None,
'activity_regularizer': None,
'kernel_constraint': None,
'bias_constraint': None,
'use_bias': True,
'norm_first': False,
'norm_epsilon': 1e-12,
'intermediate_dropout': 0.0,
'attention_initializer': {
'class_name': 'GlorotUniform',
'config': {
'seed': None
}
}
}
self.assertAllEqual(expected_config, config)
def test_transformer_decoder(self):
batch_size = 2
sequence_length = 100
memory_length = 200
feature_size = 256
num_layers = 2
num_attention_heads = 2
intermediate_size = 256
model = transformer.TransformerDecoder(
num_layers=num_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size)
input_tensor = tf.ones((batch_size, sequence_length, feature_size))
memory = tf.ones((batch_size, memory_length, feature_size))
attention_mask = tf.ones((batch_size, sequence_length, memory_length),
dtype=tf.int64)
self_attention_mask = tf.ones(
(batch_size, sequence_length, sequence_length), dtype=tf.int64)
input_pos_embed = tf.ones((batch_size, sequence_length, feature_size))
memory_pos_embed = tf.ones((batch_size, memory_length, feature_size))
outs = model(
input_tensor,
memory,
self_attention_mask,
attention_mask,
return_all_decoder_outputs=True,
input_pos_embed=input_pos_embed,
memory_pos_embed=memory_pos_embed)
self.assertLen(outs, 2) # intermeidate decoded outputs.
for out in outs:
self.assertAllEqual(
tf.shape(out), (batch_size, sequence_length, feature_size))
def test_transformer_decoder_get_config(self):
num_layers = 2
num_attention_heads = 2
intermediate_size = 256
model = transformer.TransformerDecoder(
num_layers=num_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size)
config = model.get_config()
expected_config = {
'name': 'transformer_decoder',
'trainable': True,
'dtype': 'float32',
'num_layers': 2,
'num_attention_heads': 2,
'intermediate_size': 256,
'activation': 'relu',
'dropout_rate': 0.0,
'attention_dropout_rate': 0.0,
'use_bias': False,
'norm_first': True,
'norm_epsilon': 1e-06,
'intermediate_dropout': 0.0
}
self.assertAllEqual(expected_config, config)
if __name__ == '__main__':
tf.test.main()
| 9,256 | 34.064394 | 79 | py |
models | models-master/official/projects/detr/modeling/detr.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements End-to-End Object Detection with Transformers.
Model paper: https://arxiv.org/abs/2005.12872
This module does not support Keras de/serialization. Please use
tf.train.Checkpoint for object based saving and loading and tf.saved_model.save
for graph serializaiton.
"""
import math
from typing import Any, List
import tensorflow as tf
from official.modeling import tf_utils
from official.projects.detr.modeling import transformer
from official.vision.ops import box_ops
def position_embedding_sine(attention_mask,
num_pos_features=256,
temperature=10000.,
normalize=True,
scale=2 * math.pi):
"""Sine-based positional embeddings for 2D images.
Args:
attention_mask: a `bool` Tensor specifying the size of the input image to
the Transformer and which elements are padded, of size [batch_size,
height, width]
num_pos_features: a `int` specifying the number of positional features,
should be equal to the hidden size of the Transformer network
temperature: a `float` specifying the temperature of the positional
embedding. Any type that is converted to a `float` can also be accepted.
normalize: a `bool` determining whether the positional embeddings should be
normalized between [0, scale] before application of the sine and cos
functions.
scale: a `float` if normalize is True specifying the scale embeddings before
application of the embedding function.
Returns:
embeddings: a `float` tensor of the same shape as input_tensor specifying
the positional embeddings based on sine features.
"""
if num_pos_features % 2 != 0:
raise ValueError(
"Number of embedding features (num_pos_features) must be even when "
"column and row embeddings are concatenated.")
num_pos_features = num_pos_features // 2
# Produce row and column embeddings based on total size of the image
# <tf.float>[batch_size, height, width]
attention_mask = tf.cast(attention_mask, tf.float32)
row_embedding = tf.cumsum(attention_mask, 1)
col_embedding = tf.cumsum(attention_mask, 2)
if normalize:
eps = 1e-6
row_embedding = row_embedding / (row_embedding[:, -1:, :] + eps) * scale
col_embedding = col_embedding / (col_embedding[:, :, -1:] + eps) * scale
dim_t = tf.range(num_pos_features, dtype=row_embedding.dtype)
dim_t = tf.pow(temperature, 2 * (dim_t // 2) / num_pos_features)
# Creates positional embeddings for each row and column position
# <tf.float>[batch_size, height, width, num_pos_features]
pos_row = tf.expand_dims(row_embedding, -1) / dim_t
pos_col = tf.expand_dims(col_embedding, -1) / dim_t
pos_row = tf.stack(
[tf.sin(pos_row[:, :, :, 0::2]),
tf.cos(pos_row[:, :, :, 1::2])], axis=4)
pos_col = tf.stack(
[tf.sin(pos_col[:, :, :, 0::2]),
tf.cos(pos_col[:, :, :, 1::2])], axis=4)
# final_shape = pos_row.shape.as_list()[:3] + [-1]
final_shape = tf_utils.get_shape_list(pos_row)[:3] + [-1]
pos_row = tf.reshape(pos_row, final_shape)
pos_col = tf.reshape(pos_col, final_shape)
output = tf.concat([pos_row, pos_col], -1)
embeddings = tf.cast(output, tf.float32)
return embeddings
def postprocess(outputs: dict[str, tf.Tensor]) -> dict[str, tf.Tensor]:
"""Performs post-processing on model output.
Args:
outputs: The raw model output.
Returns:
Postprocessed model output.
"""
predictions = {
"detection_boxes": # Box coordinates are relative values here.
box_ops.cycxhw_to_yxyx(outputs["box_outputs"]),
"detection_scores":
tf.math.reduce_max(
tf.nn.softmax(outputs["cls_outputs"])[:, :, 1:], axis=-1),
"detection_classes":
tf.math.argmax(outputs["cls_outputs"][:, :, 1:], axis=-1) + 1,
# Fix this. It's not being used at the moment.
"num_detections":
tf.reduce_sum(
tf.cast(
tf.math.greater(
tf.math.reduce_max(outputs["cls_outputs"], axis=-1), 0),
tf.int32),
axis=-1)
}
return predictions
class DETR(tf.keras.Model):
"""DETR model with Keras.
DETR consists of backbone, query embedding, DETRTransformer,
class and box heads.
"""
def __init__(self,
backbone,
backbone_endpoint_name,
num_queries,
hidden_size,
num_classes,
num_encoder_layers=6,
num_decoder_layers=6,
dropout_rate=0.1,
**kwargs):
super().__init__(**kwargs)
self._num_queries = num_queries
self._hidden_size = hidden_size
self._num_classes = num_classes
self._num_encoder_layers = num_encoder_layers
self._num_decoder_layers = num_decoder_layers
self._dropout_rate = dropout_rate
if hidden_size % 2 != 0:
raise ValueError("hidden_size must be a multiple of 2.")
self._backbone = backbone
self._backbone_endpoint_name = backbone_endpoint_name
def build(self, input_shape=None):
self._input_proj = tf.keras.layers.Conv2D(
self._hidden_size, 1, name="detr/conv2d")
self._build_detection_decoder()
super().build(input_shape)
def _build_detection_decoder(self):
"""Builds detection decoder."""
self._transformer = DETRTransformer(
num_encoder_layers=self._num_encoder_layers,
num_decoder_layers=self._num_decoder_layers,
dropout_rate=self._dropout_rate)
self._query_embeddings = self.add_weight(
"detr/query_embeddings",
shape=[self._num_queries, self._hidden_size],
initializer=tf.keras.initializers.RandomNormal(mean=0., stddev=1.),
dtype=tf.float32)
sqrt_k = math.sqrt(1.0 / self._hidden_size)
self._class_embed = tf.keras.layers.Dense(
self._num_classes,
kernel_initializer=tf.keras.initializers.RandomUniform(-sqrt_k, sqrt_k),
name="detr/cls_dense")
self._bbox_embed = [
tf.keras.layers.Dense(
self._hidden_size, activation="relu",
kernel_initializer=tf.keras.initializers.RandomUniform(
-sqrt_k, sqrt_k),
name="detr/box_dense_0"),
tf.keras.layers.Dense(
self._hidden_size, activation="relu",
kernel_initializer=tf.keras.initializers.RandomUniform(
-sqrt_k, sqrt_k),
name="detr/box_dense_1"),
tf.keras.layers.Dense(
4, kernel_initializer=tf.keras.initializers.RandomUniform(
-sqrt_k, sqrt_k),
name="detr/box_dense_2")]
self._sigmoid = tf.keras.layers.Activation("sigmoid")
@property
def backbone(self) -> tf.keras.Model:
return self._backbone
def get_config(self):
return {
"backbone": self._backbone,
"backbone_endpoint_name": self._backbone_endpoint_name,
"num_queries": self._num_queries,
"hidden_size": self._hidden_size,
"num_classes": self._num_classes,
"num_encoder_layers": self._num_encoder_layers,
"num_decoder_layers": self._num_decoder_layers,
"dropout_rate": self._dropout_rate,
}
@classmethod
def from_config(cls, config):
return cls(**config)
def _generate_image_mask(self, inputs: tf.Tensor,
target_shape: tf.Tensor) -> tf.Tensor:
"""Generates image mask from input image."""
mask = tf.expand_dims(
tf.cast(tf.not_equal(tf.reduce_sum(inputs, axis=-1), 0), inputs.dtype),
axis=-1)
mask = tf.image.resize(
mask, target_shape, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return mask
def call(self, inputs: tf.Tensor, training: bool = None) -> List[Any]: # pytype: disable=signature-mismatch # overriding-parameter-count-checks
batch_size = tf.shape(inputs)[0]
features = self._backbone(inputs)[self._backbone_endpoint_name]
shape = tf.shape(features)
mask = self._generate_image_mask(inputs, shape[1: 3])
pos_embed = position_embedding_sine(
mask[:, :, :, 0], num_pos_features=self._hidden_size)
pos_embed = tf.reshape(pos_embed, [batch_size, -1, self._hidden_size])
features = tf.reshape(
self._input_proj(features), [batch_size, -1, self._hidden_size])
mask = tf.reshape(mask, [batch_size, -1])
decoded_list = self._transformer({
"inputs":
features,
"targets":
tf.tile(
tf.expand_dims(self._query_embeddings, axis=0),
(batch_size, 1, 1)),
"pos_embed": pos_embed,
"mask": mask,
})
out_list = []
for decoded in decoded_list:
decoded = tf.stack(decoded)
output_class = self._class_embed(decoded)
box_out = decoded
for layer in self._bbox_embed:
box_out = layer(box_out)
output_coord = self._sigmoid(box_out)
out = {"cls_outputs": output_class, "box_outputs": output_coord}
if not training:
out.update(postprocess(out))
out_list.append(out)
return out_list
class DETRTransformer(tf.keras.layers.Layer):
"""Encoder and Decoder of DETR."""
def __init__(self, num_encoder_layers=6, num_decoder_layers=6,
dropout_rate=0.1, **kwargs):
super().__init__(**kwargs)
self._dropout_rate = dropout_rate
self._num_encoder_layers = num_encoder_layers
self._num_decoder_layers = num_decoder_layers
def build(self, input_shape=None):
if self._num_encoder_layers > 0:
self._encoder = transformer.TransformerEncoder(
attention_dropout_rate=self._dropout_rate,
dropout_rate=self._dropout_rate,
intermediate_dropout=self._dropout_rate,
norm_first=False,
num_layers=self._num_encoder_layers)
else:
self._encoder = None
self._decoder = transformer.TransformerDecoder(
attention_dropout_rate=self._dropout_rate,
dropout_rate=self._dropout_rate,
intermediate_dropout=self._dropout_rate,
norm_first=False,
num_layers=self._num_decoder_layers)
super().build(input_shape)
def get_config(self):
return {
"num_encoder_layers": self._num_encoder_layers,
"num_decoder_layers": self._num_decoder_layers,
"dropout_rate": self._dropout_rate,
}
def call(self, inputs):
sources = inputs["inputs"]
targets = inputs["targets"]
pos_embed = inputs["pos_embed"]
mask = inputs["mask"]
input_shape = tf_utils.get_shape_list(sources)
source_attention_mask = tf.tile(
tf.expand_dims(mask, axis=1), [1, input_shape[1], 1])
if self._encoder is not None:
memory = self._encoder(
sources, attention_mask=source_attention_mask, pos_embed=pos_embed)
else:
memory = sources
target_shape = tf_utils.get_shape_list(targets)
cross_attention_mask = tf.tile(
tf.expand_dims(mask, axis=1), [1, target_shape[1], 1])
target_shape = tf.shape(targets)
decoded = self._decoder(
tf.zeros_like(targets),
memory,
# TODO(b/199545430): self_attention_mask could be set to None when this
# bug is resolved. Passing ones for now.
self_attention_mask=tf.ones(
(target_shape[0], target_shape[1], target_shape[1])),
cross_attention_mask=cross_attention_mask,
return_all_decoder_outputs=True,
input_pos_embed=targets,
memory_pos_embed=pos_embed)
return decoded
| 12,128 | 35.643505 | 147 | py |
models | models-master/official/projects/detr/modeling/detr_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_models.official.projects.detr.detr."""
import tensorflow as tf
from official.projects.detr.modeling import detr
from official.vision.modeling.backbones import resnet
class DetrTest(tf.test.TestCase):
def test_forward(self):
num_queries = 10
hidden_size = 128
num_classes = 10
image_size = 640
batch_size = 2
backbone = resnet.ResNet(50, bn_trainable=False)
backbone_endpoint_name = '5'
model = detr.DETR(backbone, backbone_endpoint_name, num_queries,
hidden_size, num_classes)
outs = model(tf.ones((batch_size, image_size, image_size, 3)))
self.assertLen(outs, 6) # intermediate decoded outputs.
for out in outs:
self.assertAllEqual(
tf.shape(out['cls_outputs']), (batch_size, num_queries, num_classes))
self.assertAllEqual(
tf.shape(out['box_outputs']), (batch_size, num_queries, 4))
def test_get_from_config_detr_transformer(self):
config = {
'num_encoder_layers': 1,
'num_decoder_layers': 2,
'dropout_rate': 0.5,
}
detr_model = detr.DETRTransformer.from_config(config)
retrieved_config = detr_model.get_config()
self.assertEqual(config, retrieved_config)
def test_get_from_config_detr(self):
config = {
'backbone': resnet.ResNet(50, bn_trainable=False),
'backbone_endpoint_name': '5',
'num_queries': 2,
'hidden_size': 4,
'num_classes': 10,
'num_encoder_layers': 4,
'num_decoder_layers': 5,
'dropout_rate': 0.5,
}
detr_model = detr.DETR.from_config(config)
retrieved_config = detr_model.get_config()
self.assertEqual(config, retrieved_config)
if __name__ == '__main__':
tf.test.main()
| 2,364 | 32.309859 | 79 | py |
models | models-master/official/projects/detr/modeling/transformer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Specialized Transformers for DETR.
the position embeddings are added to the query and key for every self- and
cross-attention layer.
"""
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp.modeling import layers
from official.nlp.modeling import models
class TransformerEncoder(tf.keras.layers.Layer):
"""Transformer encoder.
Transformer encoder is made up of N identical layers. Each layer is composed
of the sublayers:
1. Self-attention layer
2. Feedforward network (which is 2 fully-connected layers)
"""
def __init__(self,
num_layers=6,
num_attention_heads=8,
intermediate_size=2048,
activation="relu",
dropout_rate=0.0,
attention_dropout_rate=0.0,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
intermediate_dropout=0.0,
**kwargs):
"""Initialize a Transformer encoder.
Args:
num_layers: Number of layers.
num_attention_heads: Number of attention heads.
intermediate_size: Size of the intermediate (Feedforward) layer.
activation: Activation for the intermediate layer.
dropout_rate: Dropout probability.
attention_dropout_rate: Dropout probability for attention layers.
use_bias: Whether to enable use_bias in attention layer. If set False,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate
dense layers. If set False, output of attention and intermediate dense
layers is normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
intermediate_dropout: Dropout probability for intermediate_dropout_layer.
**kwargs: key word arguemnts passed to tf.keras.layers.Layer.
"""
super(TransformerEncoder, self).__init__(**kwargs)
self.num_layers = num_layers
self.num_attention_heads = num_attention_heads
self._intermediate_size = intermediate_size
self._activation = activation
self._dropout_rate = dropout_rate
self._attention_dropout_rate = attention_dropout_rate
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._intermediate_dropout = intermediate_dropout
def build(self, input_shape):
"""Implements build() for the layer."""
self.encoder_layers = []
for i in range(self.num_layers):
self.encoder_layers.append(
TransformerEncoderBlock(
num_attention_heads=self.num_attention_heads,
inner_dim=self._intermediate_size,
inner_activation=self._activation,
output_dropout=self._dropout_rate,
attention_dropout=self._attention_dropout_rate,
use_bias=self._use_bias,
norm_first=self._norm_first,
norm_epsilon=self._norm_epsilon,
inner_dropout=self._intermediate_dropout,
attention_initializer=tf_utils.clone_initializer(
models.seq2seq_transformer.attention_initializer(
input_shape[2])),
name=("layer_%d" % i)))
self.output_normalization = tf.keras.layers.LayerNormalization(
epsilon=self._norm_epsilon, dtype="float32")
super(TransformerEncoder, self).build(input_shape)
def get_config(self):
config = {
"num_layers": self.num_layers,
"num_attention_heads": self.num_attention_heads,
"intermediate_size": self._intermediate_size,
"activation": self._activation,
"dropout_rate": self._dropout_rate,
"attention_dropout_rate": self._attention_dropout_rate,
"use_bias": self._use_bias,
"norm_first": self._norm_first,
"norm_epsilon": self._norm_epsilon,
"intermediate_dropout": self._intermediate_dropout
}
base_config = super(TransformerEncoder, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, encoder_inputs, attention_mask=None, pos_embed=None):
"""Return the output of the encoder.
Args:
encoder_inputs: A tensor with shape `(batch_size, input_length,
hidden_size)`.
attention_mask: A mask for the encoder self-attention layer with shape
`(batch_size, input_length, input_length)`.
pos_embed: Position embedding to add to every encoder layer.
Returns:
Output of encoder which is a `float32` tensor with shape
`(batch_size, input_length, hidden_size)`.
"""
for layer_idx in range(self.num_layers):
encoder_inputs = self.encoder_layers[layer_idx](
[encoder_inputs, attention_mask, pos_embed])
output_tensor = encoder_inputs
output_tensor = self.output_normalization(output_tensor)
return output_tensor
class TransformerEncoderBlock(tf.keras.layers.Layer):
"""TransformerEncoderBlock layer.
This layer implements the Transformer Encoder from
"Attention Is All You Need". (https://arxiv.org/abs/1706.03762),
which combines a `tf.keras.layers.MultiHeadAttention` layer with a
two-layer feedforward network. The only difference: position embedding is
added to the query and key of self-attention.
References:
[Attention Is All You Need](https://arxiv.org/abs/1706.03762)
[BERT: Pre-training of Deep Bidirectional Transformers for Language
Understanding](https://arxiv.org/abs/1810.04805)
"""
def __init__(self,
num_attention_heads,
inner_dim,
inner_activation,
output_range=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_bias=True,
norm_first=False,
norm_epsilon=1e-12,
output_dropout=0.0,
attention_dropout=0.0,
inner_dropout=0.0,
attention_initializer=None,
attention_axes=None,
**kwargs):
"""Initializes `TransformerEncoderBlock`.
Args:
num_attention_heads: Number of attention heads.
inner_dim: The output dimension of the first Dense layer in a two-layer
feedforward network.
inner_activation: The activation for the first Dense layer in a two-layer
feedforward network.
output_range: the sequence output range, [0, output_range) for slicing the
target sequence. `None` means the target sequence is not sliced.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
use_bias: Whether to enable use_bias in attention layer. If set False,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate
dense layers. If set False, output of attention and intermediate dense
layers is normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
output_dropout: Dropout probability for the post-attention and output
dropout.
attention_dropout: Dropout probability for within the attention layer.
inner_dropout: Dropout probability for the first Dense layer in a
two-layer feedforward network.
attention_initializer: Initializer for kernels of attention layers. If set
`None`, attention layers use kernel_initializer as initializer for
kernel.
attention_axes: axes over which the attention is applied. `None` means
attention over all axes, but batch, heads, and features.
**kwargs: keyword arguments/
"""
super().__init__(**kwargs)
self._num_heads = num_attention_heads
self._inner_dim = inner_dim
self._inner_activation = inner_activation
self._attention_dropout = attention_dropout
self._attention_dropout_rate = attention_dropout
self._output_dropout = output_dropout
self._output_dropout_rate = output_dropout
self._output_range = output_range
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._inner_dropout = inner_dropout
if attention_initializer:
self._attention_initializer = tf.keras.initializers.get(
attention_initializer)
else:
self._attention_initializer = tf_utils.clone_initializer(
self._kernel_initializer)
self._attention_axes = attention_axes
def build(self, input_shape):
if isinstance(input_shape, tf.TensorShape):
input_tensor_shape = input_shape
elif isinstance(input_shape, (list, tuple)):
input_tensor_shape = tf.TensorShape(input_shape[0])
else:
raise ValueError(
"The type of input shape argument is not supported, got: %s" %
type(input_shape))
einsum_equation = "abc,cd->abd"
if len(input_tensor_shape.as_list()) > 3:
einsum_equation = "...bc,cd->...bd"
hidden_size = input_tensor_shape[-1]
if hidden_size % self._num_heads != 0:
raise ValueError(
"The input size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, self._num_heads))
self._attention_head_size = int(hidden_size // self._num_heads)
common_kwargs = dict(
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
self._attention_layer = tf.keras.layers.MultiHeadAttention(
num_heads=self._num_heads,
key_dim=self._attention_head_size,
dropout=self._attention_dropout,
use_bias=self._use_bias,
kernel_initializer=self._attention_initializer,
attention_axes=self._attention_axes,
name="self_attention",
**common_kwargs)
self._attention_dropout = tf.keras.layers.Dropout(rate=self._output_dropout)
# Use float32 in layernorm for numeric stability.
# It is probably safe in mixed_float16, but we haven't validated this yet.
self._attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32))
self._intermediate_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=(None, self._inner_dim),
bias_axes="d",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
name="intermediate",
**common_kwargs)
policy = tf.keras.mixed_precision.global_policy()
if policy.name == "mixed_bfloat16":
# bfloat16 causes BERT with the LAMB optimizer to not converge
# as well, so we use float32.
# TODO(b/154538392): Investigate this.
policy = tf.float32
self._intermediate_activation_layer = tf.keras.layers.Activation(
self._inner_activation, dtype=policy)
self._inner_dropout_layer = tf.keras.layers.Dropout(
rate=self._inner_dropout)
self._output_dense = tf.keras.layers.EinsumDense(
einsum_equation,
output_shape=(None, hidden_size),
bias_axes="d",
name="output",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
**common_kwargs)
self._output_dropout = tf.keras.layers.Dropout(rate=self._output_dropout)
# Use float32 in layernorm for numeric stability.
self._output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype=tf.float32)
super(TransformerEncoderBlock, self).build(input_shape)
def get_config(self):
config = {
"num_attention_heads": self._num_heads,
"inner_dim": self._inner_dim,
"inner_activation": self._inner_activation,
"output_dropout": self._output_dropout_rate,
"attention_dropout": self._attention_dropout_rate,
"output_range": self._output_range,
"kernel_initializer": tf_utils.serialize_initializer(
self._kernel_initializer, use_legacy_format=True
),
"bias_initializer": tf_utils.serialize_initializer(
self._bias_initializer, use_legacy_format=True
),
"kernel_regularizer": tf_utils.serialize_regularizer(
self._kernel_regularizer, use_legacy_format=True
),
"bias_regularizer": tf_utils.serialize_regularizer(
self._bias_regularizer, use_legacy_format=True
),
"activity_regularizer": tf_utils.serialize_regularizer(
self._activity_regularizer, use_legacy_format=True
),
"kernel_constraint": tf_utils.serialize_constraint(
self._kernel_constraint, use_legacy_format=True
),
"bias_constraint": tf_utils.serialize_constraint(
self._bias_constraint, use_legacy_format=True
),
"use_bias": self._use_bias,
"norm_first": self._norm_first,
"norm_epsilon": self._norm_epsilon,
"inner_dropout": self._inner_dropout,
"attention_initializer": tf_utils.serialize_initializer(
self._attention_initializer, use_legacy_format=True
),
"attention_axes": self._attention_axes,
}
base_config = super(TransformerEncoderBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
"""Transformer self-attention encoder block call.
Args:
inputs: a single tensor or a list of tensors. `input tensor` as the single
sequence of embeddings. [`input tensor`, `attention mask`] to have the
additional attention mask. [`input tensor`, `attention mask`, `query
embed`] to have an additional position embedding to add.
Returns:
An output tensor with the same dimensions as input/query tensor.
"""
input_tensor, attention_mask, pos_embed = inputs
key_value = None
if self._output_range:
if self._norm_first:
source_tensor = input_tensor[:, 0:self._output_range, :]
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm(key_value)
target_tensor = input_tensor[:, 0:self._output_range, :]
if attention_mask is not None:
attention_mask = attention_mask[:, 0:self._output_range, :]
else:
if self._norm_first:
source_tensor = input_tensor
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm(key_value)
target_tensor = input_tensor
if key_value is None:
key_value = input_tensor
attention_output = self._attention_layer(
query=target_tensor + pos_embed,
key=key_value + pos_embed,
value=key_value,
attention_mask=attention_mask)
attention_output = self._attention_dropout(attention_output)
if self._norm_first:
attention_output = source_tensor + attention_output
else:
attention_output = self._attention_layer_norm(target_tensor +
attention_output)
if self._norm_first:
source_attention_output = attention_output
attention_output = self._output_layer_norm(attention_output)
inner_output = self._intermediate_dense(attention_output)
inner_output = self._intermediate_activation_layer(inner_output)
inner_output = self._inner_dropout_layer(inner_output)
layer_output = self._output_dense(inner_output)
layer_output = self._output_dropout(layer_output)
if self._norm_first:
return source_attention_output + layer_output
# During mixed precision training, layer norm output is always fp32 for now.
# Casts fp32 for the subsequent add.
layer_output = tf.cast(layer_output, tf.float32)
return self._output_layer_norm(layer_output + attention_output)
class TransformerDecoder(tf.keras.layers.Layer):
"""Transformer decoder.
Like the encoder, the decoder is made up of N identical layers.
Each layer is composed of the sublayers:
1. Self-attention layer
2. Multi-headed attention layer combining encoder outputs with results from
the previous self-attention layer.
3. Feedforward network (2 fully-connected layers)
"""
def __init__(self,
num_layers=6,
num_attention_heads=8,
intermediate_size=2048,
activation="relu",
dropout_rate=0.0,
attention_dropout_rate=0.0,
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
intermediate_dropout=0.0,
**kwargs):
"""Initialize a Transformer decoder.
Args:
num_layers: Number of layers.
num_attention_heads: Number of attention heads.
intermediate_size: Size of the intermediate (Feedforward) layer.
activation: Activation for the intermediate layer.
dropout_rate: Dropout probability.
attention_dropout_rate: Dropout probability for attention layers.
use_bias: Whether to enable use_bias in attention layer. If set `False`,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate
dense layers. If set `False`, output of attention and intermediate dense
layers is normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
intermediate_dropout: Dropout probability for intermediate_dropout_layer.
**kwargs: key word arguemnts passed to tf.keras.layers.Layer.
"""
super(TransformerDecoder, self).__init__(**kwargs)
self.num_layers = num_layers
self.num_attention_heads = num_attention_heads
self._intermediate_size = intermediate_size
self._activation = activation
self._dropout_rate = dropout_rate
self._attention_dropout_rate = attention_dropout_rate
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._intermediate_dropout = intermediate_dropout
def build(self, input_shape):
"""Implements build() for the layer."""
self.decoder_layers = []
for i in range(self.num_layers):
self.decoder_layers.append(
TransformerDecoderBlock(
num_attention_heads=self.num_attention_heads,
intermediate_size=self._intermediate_size,
intermediate_activation=self._activation,
dropout_rate=self._dropout_rate,
attention_dropout_rate=self._attention_dropout_rate,
use_bias=self._use_bias,
norm_first=self._norm_first,
norm_epsilon=self._norm_epsilon,
intermediate_dropout=self._intermediate_dropout,
attention_initializer=tf_utils.clone_initializer(
models.seq2seq_transformer.attention_initializer(
input_shape[2])),
name=("layer_%d" % i)))
self.output_normalization = tf.keras.layers.LayerNormalization(
epsilon=self._norm_epsilon, dtype="float32")
super(TransformerDecoder, self).build(input_shape)
def get_config(self):
config = {
"num_layers": self.num_layers,
"num_attention_heads": self.num_attention_heads,
"intermediate_size": self._intermediate_size,
"activation": self._activation,
"dropout_rate": self._dropout_rate,
"attention_dropout_rate": self._attention_dropout_rate,
"use_bias": self._use_bias,
"norm_first": self._norm_first,
"norm_epsilon": self._norm_epsilon,
"intermediate_dropout": self._intermediate_dropout
}
base_config = super(TransformerDecoder, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self,
target,
memory,
self_attention_mask=None,
cross_attention_mask=None,
cache=None,
decode_loop_step=None,
return_all_decoder_outputs=False,
input_pos_embed=None,
memory_pos_embed=None):
"""Return the output of the decoder layer stacks.
Args:
target: A tensor with shape `(batch_size, target_length, hidden_size)`.
memory: A tensor with shape `(batch_size, input_length, hidden_size)`.
self_attention_mask: A tensor with shape `(batch_size, target_len,
target_length)`, the mask for decoder self-attention layer.
cross_attention_mask: A tensor with shape `(batch_size, target_length,
input_length)` which is the mask for encoder-decoder attention layer.
cache: (Used for fast decoding) A nested dictionary storing previous
decoder self-attention values. The items are:
{layer_n: {"k": A tensor with shape `(batch_size, i, key_channels)`,
"v": A tensor with shape `(batch_size, i, value_channels)`},
...}
decode_loop_step: An integer, the step number of the decoding loop. Used
only for autoregressive inference on TPU.
return_all_decoder_outputs: Return all decoder layer outputs. Note that
the outputs are layer normed. This is useful when introducing per layer
auxiliary loss.
input_pos_embed: A tensor that is added to the query and key of the
self-attention layer.
memory_pos_embed: A tensor that is added to the query and key of the
cross-attention layer.
Returns:
Output of decoder.
float32 tensor with shape `(batch_size, target_length, hidden_size`).
"""
output_tensor = target
decoder_outputs = []
for layer_idx in range(self.num_layers):
transformer_inputs = [
output_tensor, memory, cross_attention_mask, self_attention_mask,
input_pos_embed, memory_pos_embed
]
# Gets the cache for decoding.
if cache is None:
output_tensor, _ = self.decoder_layers[layer_idx](transformer_inputs)
else:
cache_layer_idx = str(layer_idx)
output_tensor, cache[cache_layer_idx] = self.decoder_layers[layer_idx](
transformer_inputs,
cache=cache[cache_layer_idx],
decode_loop_step=decode_loop_step)
if return_all_decoder_outputs:
decoder_outputs.append(self.output_normalization(output_tensor))
if return_all_decoder_outputs:
return decoder_outputs
else:
return self.output_normalization(output_tensor)
class TransformerDecoderBlock(tf.keras.layers.Layer):
"""Single transformer layer for decoder.
It has three sub-layers:
(1) a multi-head self-attention mechanism.
(2) a encoder-decoder attention.
(3) a positionwise fully connected feed-forward network.
"""
def __init__(self,
num_attention_heads,
intermediate_size,
intermediate_activation,
dropout_rate=0.0,
attention_dropout_rate=0.0,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_bias=True,
norm_first=False,
norm_epsilon=1e-12,
intermediate_dropout=0.0,
attention_initializer=None,
**kwargs):
"""Initialize a Transformer decoder block.
Args:
num_attention_heads: Number of attention heads.
intermediate_size: Size of the intermediate layer.
intermediate_activation: Activation for the intermediate layer.
dropout_rate: Dropout probability for the post-attention and output
dropout.
attention_dropout_rate: Dropout probability for within the attention
layer.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
use_bias: Whether to enable use_bias in attention layer. If set False,
use_bias in attention layer is disabled.
norm_first: Whether to normalize inputs to attention and intermediate
dense layers. If set False, output of attention and intermediate dense
layers is normalized.
norm_epsilon: Epsilon value to initialize normalization layers.
intermediate_dropout: Dropout probability for intermediate_dropout_layer.
attention_initializer: Initializer for kernels of attention layers. If set
`None`, attention layers use kernel_initializer as initializer for
kernel.
**kwargs: key word arguemnts passed to tf.keras.layers.Layer.
"""
super().__init__(**kwargs)
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.intermediate_activation = tf.keras.activations.get(
intermediate_activation)
self.dropout_rate = dropout_rate
self.attention_dropout_rate = attention_dropout_rate
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
self._use_bias = use_bias
self._norm_first = norm_first
self._norm_epsilon = norm_epsilon
self._intermediate_dropout = intermediate_dropout
if attention_initializer:
self._attention_initializer = tf.keras.initializers.get(
attention_initializer)
else:
self._attention_initializer = tf_utils.clone_initializer(
self._kernel_initializer)
self._cross_attention_cls = layers.attention.MultiHeadAttention
def build(self, input_shape):
target_tensor_shape = tf.TensorShape(input_shape[0])
if len(target_tensor_shape.as_list()) != 3:
raise ValueError("TransformerLayer expects a three-dimensional input of "
"shape [batch, sequence, width].")
hidden_size = target_tensor_shape[2]
if hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, self.num_attention_heads))
self.attention_head_size = int(hidden_size) // self.num_attention_heads
common_kwargs = dict(
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
# Self attention.
self.self_attention = layers.attention.CachedAttention(
num_heads=self.num_attention_heads,
key_dim=self.attention_head_size,
dropout=self.attention_dropout_rate,
use_bias=self._use_bias,
kernel_initializer=self._attention_initializer,
name="self_attention",
**common_kwargs)
self.self_attention_output_dense = tf.keras.layers.EinsumDense(
"abc,cd->abd",
output_shape=(None, hidden_size),
bias_axes="d",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
name="output",
**common_kwargs)
self.self_attention_dropout = tf.keras.layers.Dropout(
rate=self.dropout_rate)
self.self_attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype="float32"))
# Encoder-decoder attention.
self.encdec_attention = self._cross_attention_cls(
num_heads=self.num_attention_heads,
key_dim=self.attention_head_size,
dropout=self.attention_dropout_rate,
output_shape=hidden_size,
use_bias=self._use_bias,
kernel_initializer=self._attention_initializer,
name="attention/encdec",
**common_kwargs)
self.encdec_attention_dropout = tf.keras.layers.Dropout(
rate=self.dropout_rate)
self.encdec_attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="attention/encdec_output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype="float32"))
# Feed-forward projection.
self.intermediate_dense = tf.keras.layers.EinsumDense(
"abc,cd->abd",
output_shape=(None, self.intermediate_size),
bias_axes="d",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
name="intermediate",
**common_kwargs)
self.intermediate_activation_layer = tf.keras.layers.Activation(
self.intermediate_activation)
self._intermediate_dropout_layer = tf.keras.layers.Dropout(
rate=self._intermediate_dropout)
self.output_dense = tf.keras.layers.EinsumDense(
"abc,cd->abd",
output_shape=(None, hidden_size),
bias_axes="d",
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
name="output",
**common_kwargs)
self.output_dropout = tf.keras.layers.Dropout(rate=self.dropout_rate)
self.output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm",
axis=-1,
epsilon=self._norm_epsilon,
dtype="float32")
super().build(input_shape)
def get_config(self):
config = {
"num_attention_heads": self.num_attention_heads,
"intermediate_size": self.intermediate_size,
"intermediate_activation": tf_utils.serialize_activation(
self.intermediate_activation, use_legacy_format=True
),
"dropout_rate": self.dropout_rate,
"attention_dropout_rate": self.attention_dropout_rate,
"kernel_initializer": tf_utils.serialize_initializer(
self._kernel_initializer, use_legacy_format=True
),
"bias_initializer": tf_utils.serialize_initializer(
self._bias_initializer, use_legacy_format=True
),
"kernel_regularizer": tf_utils.serialize_regularizer(
self._kernel_regularizer, use_legacy_format=True
),
"bias_regularizer": tf_utils.serialize_regularizer(
self._bias_regularizer, use_legacy_format=True
),
"activity_regularizer": tf_utils.serialize_regularizer(
self._activity_regularizer, use_legacy_format=True
),
"kernel_constraint": tf_utils.serialize_constraint(
self._kernel_constraint, use_legacy_format=True
),
"bias_constraint": tf_utils.serialize_constraint(
self._bias_constraint, use_legacy_format=True
),
"use_bias": self._use_bias,
"norm_first": self._norm_first,
"norm_epsilon": self._norm_epsilon,
"intermediate_dropout": self._intermediate_dropout,
"attention_initializer": tf_utils.serialize_initializer(
self._attention_initializer, use_legacy_format=True
),
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def common_layers_with_encoder(self):
"""Gets layer objects that can make a Transformer encoder block."""
return [
self.self_attention, self.self_attention_layer_norm,
self.intermediate_dense, self.output_dense, self.output_layer_norm
]
def call(self, inputs, cache=None, decode_loop_step=None):
input_tensor, memory, attention_mask, self_attention_mask, input_pos_embed, memory_pos_embed = inputs
source_tensor = input_tensor
if self._norm_first:
input_tensor = self.self_attention_layer_norm(input_tensor)
self_attention_output, cache = self.self_attention(
query=input_tensor + input_pos_embed,
key=input_tensor + input_pos_embed,
value=input_tensor,
attention_mask=self_attention_mask,
cache=cache,
decode_loop_step=decode_loop_step)
self_attention_output = self.self_attention_dropout(self_attention_output)
if self._norm_first:
self_attention_output = source_tensor + self_attention_output
else:
self_attention_output = self.self_attention_layer_norm(
input_tensor + self_attention_output)
if self._norm_first:
source_self_attention_output = self_attention_output
self_attention_output = self.encdec_attention_layer_norm(
self_attention_output)
cross_attn_inputs = dict(
query=self_attention_output + input_pos_embed,
key=memory + memory_pos_embed,
value=memory,
attention_mask=attention_mask)
attention_output = self.encdec_attention(**cross_attn_inputs)
attention_output = self.encdec_attention_dropout(attention_output)
if self._norm_first:
attention_output = source_self_attention_output + attention_output
else:
attention_output = self.encdec_attention_layer_norm(
self_attention_output + attention_output)
if self._norm_first:
source_attention_output = attention_output
attention_output = self.output_layer_norm(attention_output)
intermediate_output = self.intermediate_dense(attention_output)
intermediate_output = self.intermediate_activation_layer(
intermediate_output)
intermediate_output = self._intermediate_dropout_layer(intermediate_output)
layer_output = self.output_dense(intermediate_output)
layer_output = self.output_dropout(layer_output)
if self._norm_first:
layer_output = source_attention_output + layer_output
else:
layer_output = self.output_layer_norm(layer_output + attention_output)
return layer_output, cache
| 36,237 | 41.632941 | 105 | py |
models | models-master/official/projects/detr/tasks/detection_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for detection."""
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from official.projects.detr import optimization
from official.projects.detr.configs import detr as detr_cfg
from official.projects.detr.dataloaders import coco
from official.projects.detr.tasks import detection
from official.vision.configs import backbones
_NUM_EXAMPLES = 10
def _gen_fn():
h = np.random.randint(0, 300)
w = np.random.randint(0, 300)
num_boxes = np.random.randint(0, 50)
return {
'image': np.ones(shape=(h, w, 3), dtype=np.uint8),
'image/id': np.random.randint(0, 100),
'image/filename': 'test',
'objects': {
'is_crowd': np.ones(shape=(num_boxes), dtype=bool),
'bbox': np.ones(shape=(num_boxes, 4), dtype=np.float32),
'label': np.ones(shape=(num_boxes), dtype=np.int64),
'id': np.ones(shape=(num_boxes), dtype=np.int64),
'area': np.ones(shape=(num_boxes), dtype=np.int64),
}
}
def _as_dataset(self, *args, **kwargs):
del args
del kwargs
return tf.data.Dataset.from_generator(
lambda: (_gen_fn() for i in range(_NUM_EXAMPLES)),
output_types=self.info.features.dtype,
output_shapes=self.info.features.shape,
)
class DetectionTest(tf.test.TestCase):
def test_train_step(self):
config = detr_cfg.DetrTask(
model=detr_cfg.Detr(
input_size=[1333, 1333, 3],
num_encoder_layers=1,
num_decoder_layers=1,
num_classes=81,
backbone=backbones.Backbone(
type='resnet',
resnet=backbones.ResNet(model_id=10, bn_trainable=False))
),
train_data=coco.COCODataConfig(
tfds_name='coco/2017',
tfds_split='validation',
is_training=True,
global_batch_size=2,
))
with tfds.testing.mock_data(as_dataset_fn=_as_dataset):
task = detection.DetectionTask(config)
model = task.build_model()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
opt_cfg = optimization.OptimizationConfig({
'optimizer': {
'type': 'detr_adamw',
'detr_adamw': {
'weight_decay_rate': 1e-4,
'global_clipnorm': 0.1,
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [120000],
'values': [0.0001, 1.0e-05]
}
},
})
optimizer = detection.DetectionTask.create_optimizer(opt_cfg)
task.train_step(next(iterator), model, optimizer)
def test_validation_step(self):
config = detr_cfg.DetrTask(
model=detr_cfg.Detr(
input_size=[1333, 1333, 3],
num_encoder_layers=1,
num_decoder_layers=1,
num_classes=81,
backbone=backbones.Backbone(
type='resnet',
resnet=backbones.ResNet(model_id=10, bn_trainable=False))
),
validation_data=coco.COCODataConfig(
tfds_name='coco/2017',
tfds_split='validation',
is_training=False,
global_batch_size=2,
))
with tfds.testing.mock_data(as_dataset_fn=_as_dataset):
task = detection.DetectionTask(config)
model = task.build_model()
metrics = task.build_metrics(training=False)
dataset = task.build_inputs(config.validation_data)
iterator = iter(dataset)
logs = task.validation_step(next(iterator), model, metrics)
state = task.aggregate_logs(step_outputs=logs)
task.reduce_aggregated_logs(state)
class DetectionTFDSTest(tf.test.TestCase):
def test_train_step(self):
config = detr_cfg.DetrTask(
model=detr_cfg.Detr(
input_size=[1333, 1333, 3],
num_encoder_layers=1,
num_decoder_layers=1,
backbone=backbones.Backbone(
type='resnet',
resnet=backbones.ResNet(model_id=10, bn_trainable=False))
),
losses=detr_cfg.Losses(class_offset=1),
train_data=detr_cfg.DataConfig(
tfds_name='coco/2017',
tfds_split='validation',
is_training=True,
global_batch_size=2,
))
with tfds.testing.mock_data(as_dataset_fn=_as_dataset):
task = detection.DetectionTask(config)
model = task.build_model()
dataset = task.build_inputs(config.train_data)
iterator = iter(dataset)
opt_cfg = optimization.OptimizationConfig({
'optimizer': {
'type': 'detr_adamw',
'detr_adamw': {
'weight_decay_rate': 1e-4,
'global_clipnorm': 0.1,
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [120000],
'values': [0.0001, 1.0e-05]
}
},
})
optimizer = detection.DetectionTask.create_optimizer(opt_cfg)
task.train_step(next(iterator), model, optimizer)
def test_validation_step(self):
config = detr_cfg.DetrTask(
model=detr_cfg.Detr(
input_size=[1333, 1333, 3],
num_encoder_layers=1,
num_decoder_layers=1,
backbone=backbones.Backbone(
type='resnet',
resnet=backbones.ResNet(model_id=10, bn_trainable=False))
),
losses=detr_cfg.Losses(class_offset=1),
validation_data=detr_cfg.DataConfig(
tfds_name='coco/2017',
tfds_split='validation',
is_training=False,
global_batch_size=2,
))
with tfds.testing.mock_data(as_dataset_fn=_as_dataset):
task = detection.DetectionTask(config)
model = task.build_model()
metrics = task.build_metrics(training=False)
dataset = task.build_inputs(config.validation_data)
iterator = iter(dataset)
logs = task.validation_step(next(iterator), model, metrics)
state = task.aggregate_logs(step_outputs=logs)
task.reduce_aggregated_logs(state)
if __name__ == '__main__':
tf.test.main()
| 6,854 | 32.602941 | 74 | py |
models | models-master/official/projects/detr/tasks/detection.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DETR detection task definition."""
from typing import Optional
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import base_task
from official.core import task_factory
from official.projects.detr.configs import detr as detr_cfg
from official.projects.detr.dataloaders import coco
from official.projects.detr.dataloaders import detr_input
from official.projects.detr.modeling import detr
from official.projects.detr.ops import matchers
from official.vision.dataloaders import input_reader_factory
from official.vision.dataloaders import tf_example_decoder
from official.vision.dataloaders import tfds_factory
from official.vision.dataloaders import tf_example_label_map_decoder
from official.vision.evaluation import coco_evaluator
from official.vision.modeling import backbones
from official.vision.ops import box_ops
@task_factory.register_task_cls(detr_cfg.DetrTask)
class DetectionTask(base_task.Task):
"""A single-replica view of training procedure.
DETR task provides artifacts for training/evalution procedures, including
loading/iterating over Datasets, initializing the model, calculating the loss,
post-processing, and customized metrics with reduction.
"""
def build_model(self):
"""Build DETR model."""
input_specs = tf.keras.layers.InputSpec(shape=[None] +
self._task_config.model.input_size)
backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=self._task_config.model.backbone,
norm_activation_config=self._task_config.model.norm_activation)
model = detr.DETR(backbone,
self._task_config.model.backbone_endpoint_name,
self._task_config.model.num_queries,
self._task_config.model.hidden_size,
self._task_config.model.num_classes,
self._task_config.model.num_encoder_layers,
self._task_config.model.num_decoder_layers)
return model
def initialize(self, model: tf.keras.Model):
"""Loading pretrained checkpoint."""
if not self._task_config.init_checkpoint:
return
ckpt_dir_or_file = self._task_config.init_checkpoint
# Restoring checkpoint.
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
if self._task_config.init_checkpoint_modules == 'all':
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.restore(ckpt_dir_or_file)
status.assert_consumed()
elif self._task_config.init_checkpoint_modules == 'backbone':
ckpt = tf.train.Checkpoint(backbone=model.backbone)
status = ckpt.restore(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(self,
params,
input_context: Optional[tf.distribute.InputContext] = None):
"""Build input dataset."""
if isinstance(params, coco.COCODataConfig):
dataset = coco.COCODataLoader(params).load(input_context)
else:
if params.tfds_name:
decoder = tfds_factory.get_detection_decoder(params.tfds_name)
else:
decoder_cfg = params.decoder.get()
if params.decoder.type == 'simple_decoder':
decoder = tf_example_decoder.TfExampleDecoder(
regenerate_source_id=decoder_cfg.regenerate_source_id)
elif params.decoder.type == 'label_map_decoder':
decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(
label_map=decoder_cfg.label_map,
regenerate_source_id=decoder_cfg.regenerate_source_id)
else:
raise ValueError('Unknown decoder type: {}!'.format(
params.decoder.type))
parser = detr_input.Parser(
class_offset=self._task_config.losses.class_offset,
output_size=self._task_config.model.input_size[:2],
)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def _compute_cost(self, cls_outputs, box_outputs, cls_targets, box_targets):
# Approximate classification cost with 1 - prob[target class].
# The 1 is a constant that doesn't change the matching, it can be ommitted.
# background: 0
cls_cost = self._task_config.losses.lambda_cls * tf.gather(
-tf.nn.softmax(cls_outputs), cls_targets, batch_dims=1, axis=-1)
# Compute the L1 cost between boxes,
paired_differences = self._task_config.losses.lambda_box * tf.abs(
tf.expand_dims(box_outputs, 2) - tf.expand_dims(box_targets, 1))
box_cost = tf.reduce_sum(paired_differences, axis=-1)
# Compute the giou cost betwen boxes
giou_cost = self._task_config.losses.lambda_giou * -box_ops.bbox_generalized_overlap(
box_ops.cycxhw_to_yxyx(box_outputs),
box_ops.cycxhw_to_yxyx(box_targets))
total_cost = cls_cost + box_cost + giou_cost
max_cost = (
self._task_config.losses.lambda_cls * 0.0 +
self._task_config.losses.lambda_box * 4. +
self._task_config.losses.lambda_giou * 0.0)
# Set pads to large constant
valid = tf.expand_dims(
tf.cast(tf.not_equal(cls_targets, 0), dtype=total_cost.dtype), axis=1)
total_cost = (1 - valid) * max_cost + valid * total_cost
# Set inf of nan to large constant
total_cost = tf.where(
tf.logical_or(tf.math.is_nan(total_cost), tf.math.is_inf(total_cost)),
max_cost * tf.ones_like(total_cost, dtype=total_cost.dtype),
total_cost)
return total_cost
def build_losses(self, outputs, labels, aux_losses=None):
"""Builds DETR losses."""
cls_outputs = outputs['cls_outputs']
box_outputs = outputs['box_outputs']
cls_targets = labels['classes']
box_targets = labels['boxes']
cost = self._compute_cost(
cls_outputs, box_outputs, cls_targets, box_targets)
_, indices = matchers.hungarian_matching(cost)
indices = tf.stop_gradient(indices)
target_index = tf.math.argmax(indices, axis=1)
cls_assigned = tf.gather(cls_outputs, target_index, batch_dims=1, axis=1)
box_assigned = tf.gather(box_outputs, target_index, batch_dims=1, axis=1)
background = tf.equal(cls_targets, 0)
num_boxes = tf.reduce_sum(
tf.cast(tf.logical_not(background), tf.float32), axis=-1)
# Down-weight background to account for class imbalance.
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=cls_targets, logits=cls_assigned)
cls_loss = self._task_config.losses.lambda_cls * tf.where(
background, self._task_config.losses.background_cls_weight * xentropy,
xentropy)
cls_weights = tf.where(
background,
self._task_config.losses.background_cls_weight * tf.ones_like(cls_loss),
tf.ones_like(cls_loss))
# Box loss is only calculated on non-background class.
l_1 = tf.reduce_sum(tf.abs(box_assigned - box_targets), axis=-1)
box_loss = self._task_config.losses.lambda_box * tf.where(
background, tf.zeros_like(l_1), l_1)
# Giou loss is only calculated on non-background class.
giou = tf.linalg.diag_part(1.0 - box_ops.bbox_generalized_overlap(
box_ops.cycxhw_to_yxyx(box_assigned),
box_ops.cycxhw_to_yxyx(box_targets)
))
giou_loss = self._task_config.losses.lambda_giou * tf.where(
background, tf.zeros_like(giou), giou)
# Consider doing all reduce once in train_step to speed up.
num_boxes_per_replica = tf.reduce_sum(num_boxes)
cls_weights_per_replica = tf.reduce_sum(cls_weights)
replica_context = tf.distribute.get_replica_context()
num_boxes_sum, cls_weights_sum = replica_context.all_reduce(
tf.distribute.ReduceOp.SUM,
[num_boxes_per_replica, cls_weights_per_replica])
cls_loss = tf.math.divide_no_nan(
tf.reduce_sum(cls_loss), cls_weights_sum)
box_loss = tf.math.divide_no_nan(
tf.reduce_sum(box_loss), num_boxes_sum)
giou_loss = tf.math.divide_no_nan(
tf.reduce_sum(giou_loss), num_boxes_sum)
aux_losses = tf.add_n(aux_losses) if aux_losses else 0.0
total_loss = cls_loss + box_loss + giou_loss + aux_losses
return total_loss, cls_loss, box_loss, giou_loss
def build_metrics(self, training=True):
"""Builds detection metrics."""
metrics = []
metric_names = ['cls_loss', 'box_loss', 'giou_loss']
for name in metric_names:
metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))
if not training:
self.coco_metric = coco_evaluator.COCOEvaluator(
annotation_file=self._task_config.annotation_file,
include_mask=False,
need_rescale_bboxes=True,
per_category_metrics=self._task_config.per_category_metrics)
return metrics
def train_step(self, inputs, model, optimizer, metrics=None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
with tf.GradientTape() as tape:
outputs = model(features, training=True)
loss = 0.0
cls_loss = 0.0
box_loss = 0.0
giou_loss = 0.0
for output in outputs:
# Computes per-replica loss.
layer_loss, layer_cls_loss, layer_box_loss, layer_giou_loss = self.build_losses(
outputs=output, labels=labels, aux_losses=model.losses)
loss += layer_loss
cls_loss += layer_cls_loss
box_loss += layer_box_loss
giou_loss += layer_giou_loss
# Consider moving scaling logic from build_losses to here.
scaled_loss = loss
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient when LossScaleOptimizer is used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
# Multiply for logging.
# Since we expect the gradient replica sum to happen in the optimizer,
# the loss is scaled with global num_boxes and weights.
# To have it more interpretable/comparable we scale it back when logging.
num_replicas_in_sync = tf.distribute.get_strategy().num_replicas_in_sync
loss *= num_replicas_in_sync
cls_loss *= num_replicas_in_sync
box_loss *= num_replicas_in_sync
giou_loss *= num_replicas_in_sync
# Trainer class handles loss metric for you.
logs = {self.loss: loss}
all_losses = {
'cls_loss': cls_loss,
'box_loss': box_loss,
'giou_loss': giou_loss,
}
# Metric results will be added to logs for you.
if metrics:
for m in metrics:
m.update_state(all_losses[m.name])
return logs
def validation_step(self, inputs, model, metrics=None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
outputs = model(features, training=False)[-1]
loss, cls_loss, box_loss, giou_loss = self.build_losses(
outputs=outputs, labels=labels, aux_losses=model.losses)
# Multiply for logging.
# Since we expect the gradient replica sum to happen in the optimizer,
# the loss is scaled with global num_boxes and weights.
# To have it more interpretable/comparable we scale it back when logging.
num_replicas_in_sync = tf.distribute.get_strategy().num_replicas_in_sync
loss *= num_replicas_in_sync
cls_loss *= num_replicas_in_sync
box_loss *= num_replicas_in_sync
giou_loss *= num_replicas_in_sync
# Evaluator class handles loss metric for you.
logs = {self.loss: loss}
# This is for backward compatibility.
if 'detection_boxes' not in outputs:
detection_boxes = box_ops.cycxhw_to_yxyx(
outputs['box_outputs']) * tf.expand_dims(
tf.concat([
labels['image_info'][:, 1:2, 0], labels['image_info'][:, 1:2,
1],
labels['image_info'][:, 1:2, 0], labels['image_info'][:, 1:2,
1]
],
axis=1),
axis=1)
else:
detection_boxes = outputs['detection_boxes']
detection_scores = tf.math.reduce_max(
tf.nn.softmax(outputs['cls_outputs'])[:, :, 1:], axis=-1
) if 'detection_scores' not in outputs else outputs['detection_scores']
if 'detection_classes' not in outputs:
detection_classes = tf.math.argmax(
outputs['cls_outputs'][:, :, 1:], axis=-1) + 1
else:
detection_classes = outputs['detection_classes']
if 'num_detections' not in outputs:
num_detections = tf.reduce_sum(
tf.cast(
tf.math.greater(
tf.math.reduce_max(outputs['cls_outputs'], axis=-1), 0),
tf.int32),
axis=-1)
else:
num_detections = outputs['num_detections']
predictions = {
'detection_boxes': detection_boxes,
'detection_scores': detection_scores,
'detection_classes': detection_classes,
'num_detections': num_detections,
'source_id': labels['id'],
'image_info': labels['image_info']
}
ground_truths = {
'source_id': labels['id'],
'height': labels['image_info'][:, 0:1, 0],
'width': labels['image_info'][:, 0:1, 1],
'num_detections': tf.reduce_sum(
tf.cast(tf.math.greater(labels['classes'], 0), tf.int32), axis=-1),
'boxes': labels['gt_boxes'],
'classes': labels['classes'],
'is_crowds': labels['is_crowd']
}
logs.update({'predictions': predictions,
'ground_truths': ground_truths})
all_losses = {
'cls_loss': cls_loss,
'box_loss': box_loss,
'giou_loss': giou_loss,
}
# Metric results will be added to logs for you.
if metrics:
for m in metrics:
m.update_state(all_losses[m.name])
return logs
def aggregate_logs(self, state=None, step_outputs=None):
if state is None:
self.coco_metric.reset_states()
state = self.coco_metric
state.update_state(
step_outputs['ground_truths'],
step_outputs['predictions'])
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
return aggregated_logs.result()
| 16,007 | 36.933649 | 89 | py |
models | models-master/official/projects/detr/ops/matchers_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_models.official.projects.detr.ops.matchers."""
import numpy as np
from scipy import optimize
import tensorflow as tf
from official.projects.detr.ops import matchers
class MatchersOpsTest(tf.test.TestCase):
def testLinearSumAssignment(self):
"""Check a simple 2D test case of the Linear Sum Assignment problem.
Ensures that the implementation of the matching algorithm is correct
and functional on TPUs.
"""
cost_matrix = np.array([[[4, 1, 3], [2, 0, 5], [3, 2, 2]]],
dtype=np.float32)
_, adjacency_matrix = matchers.hungarian_matching(tf.constant(cost_matrix))
adjacency_output = adjacency_matrix.numpy()
correct_output = np.array([
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
], dtype=bool)
self.assertAllEqual(adjacency_output[0], correct_output)
def testBatchedLinearSumAssignment(self):
"""Check a batched case of the Linear Sum Assignment Problem.
Ensures that a correct solution is found for all inputted problems within
a batch.
"""
cost_matrix = np.array([
[[4, 1, 3], [2, 0, 5], [3, 2, 2]],
[[1, 4, 3], [0, 2, 5], [2, 3, 2]],
[[1, 3, 4], [0, 5, 2], [2, 2, 3]],
],
dtype=np.float32)
_, adjacency_matrix = matchers.hungarian_matching(tf.constant(cost_matrix))
adjacency_output = adjacency_matrix.numpy()
# Hand solved correct output for the linear sum assignment problem
correct_output = np.array([
[[0, 1, 0], [1, 0, 0], [0, 0, 1]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[1, 0, 0], [0, 0, 1], [0, 1, 0]],
],
dtype=bool)
self.assertAllClose(adjacency_output, correct_output)
def testMaximumBipartiteMatching(self):
"""Check that the maximum bipartite match assigns the correct numbers."""
adj_matrix = tf.cast([[
[1, 0, 0, 0, 1],
[0, 1, 0, 1, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
]], tf.bool)
_, assignment = matchers._maximum_bipartite_matching(adj_matrix)
self.assertEqual(np.sum(assignment.numpy()), 5)
def testAssignmentMatchesScipy(self):
"""Check that the Linear Sum Assignment matches the Scipy implementation."""
batch_size, num_elems = 2, 25
weights = tf.random.uniform((batch_size, num_elems, num_elems),
minval=0.,
maxval=1.)
weights, assignment = matchers.hungarian_matching(weights)
for idx in range(batch_size):
_, scipy_assignment = optimize.linear_sum_assignment(weights.numpy()[idx])
hungarian_assignment = np.where(assignment.numpy()[idx])[1]
self.assertAllEqual(hungarian_assignment, scipy_assignment)
if __name__ == '__main__':
tf.test.main()
| 3,439 | 34.833333 | 80 | py |
models | models-master/official/projects/detr/ops/matchers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow implementation to solve the Linear Sum Assignment problem.
The Linear Sum Assignment problem involves determining the minimum weight
matching for bipartite graphs. For example, this problem can be defined by
a 2D matrix C, where each element i,j determines the cost of matching worker i
with job j. The solution to the problem is a complete assignment of jobs to
workers, such that no job is assigned to more than one work and no worker is
assigned more than one job, with minimum cost.
This implementation builds off of the Hungarian
Matching Algorithm (https://www.cse.ust.hk/~golin/COMP572/Notes/Matching.pdf).
Based on the original implementation by Jiquan Ngiam <[email protected]>.
"""
import tensorflow as tf
from official.modeling import tf_utils
def _prepare(weights):
"""Prepare the cost matrix.
To speed up computational efficiency of the algorithm, all weights are shifted
to be non-negative. Each element is reduced by the row / column minimum. Note
that neither operation will effect the resulting solution but will provide
a better starting point for the greedy assignment. Note this corresponds to
the pre-processing and step 1 of the Hungarian algorithm from Wikipedia.
Args:
weights: A float32 [batch_size, num_elems, num_elems] tensor, where each
inner matrix represents weights to be use for matching.
Returns:
A prepared weights tensor of the same shape and dtype.
"""
# Since every worker needs a job and every job needs a worker, we can subtract
# the minimum from each.
weights -= tf.reduce_min(weights, axis=2, keepdims=True)
weights -= tf.reduce_min(weights, axis=1, keepdims=True)
return weights
def _greedy_assignment(adj_matrix):
"""Greedily assigns workers to jobs based on an adjaceny matrix.
Starting with an adjacency matrix representing the available connections
in the bi-partite graph, this function greedily chooses elements such
that each worker is matched to at most one job (or each job is assigned to
at most one worker). Note, if the adjacency matrix has no available values
for a particular row/column, the corresponding job/worker may go unassigned.
Args:
adj_matrix: A bool [batch_size, num_elems, num_elems] tensor, where each
element of the inner matrix represents whether the worker (row) can be
matched to the job (column).
Returns:
A bool [batch_size, num_elems, num_elems] tensor, where each element of the
inner matrix represents whether the worker has been matched to the job.
Each row and column can have at most one true element. Some of the rows
and columns may not be matched.
"""
_, num_elems, _ = tf_utils.get_shape_list(adj_matrix, expected_rank=3)
adj_matrix = tf.transpose(adj_matrix, [1, 0, 2])
# Create a dynamic TensorArray containing the assignments for each worker/job
assignment = tf.TensorArray(tf.bool, num_elems)
# Store the elements assigned to each column to update each iteration
col_assigned = tf.zeros_like(adj_matrix[0, ...], dtype=tf.bool)
# Iteratively assign each row using tf.foldl. Intuitively, this is a loop
# over rows, where we incrementally assign each row.
def _assign_row(accumulator, row_adj):
# The accumulator tracks the row assignment index.
idx, assignment, col_assigned = accumulator
# Viable candidates cannot already be assigned to another job.
candidates = row_adj & (~col_assigned)
# Deterministically assign to the candidates of the highest index count.
max_candidate_idx = tf.argmax(
tf.cast(candidates, tf.int32), axis=1, output_type=tf.int32)
candidates_indicator = tf.one_hot(
max_candidate_idx,
num_elems,
on_value=True,
off_value=False,
dtype=tf.bool)
candidates_indicator &= candidates
# Make assignment to the column.
col_assigned |= candidates_indicator
assignment = assignment.write(idx, candidates_indicator)
return (idx + 1, assignment, col_assigned)
_, assignment, _ = tf.foldl(
_assign_row, adj_matrix, (0, assignment, col_assigned), back_prop=False)
assignment = assignment.stack()
assignment = tf.transpose(assignment, [1, 0, 2])
return assignment
def _find_augmenting_path(assignment, adj_matrix):
"""Finds an augmenting path given an assignment and an adjacency matrix.
The augmenting path search starts from the unassigned workers, then goes on
to find jobs (via an unassigned pairing), then back again to workers (via an
existing pairing), and so on. The path alternates between unassigned and
existing pairings. Returns the state after the search.
Note: In the state the worker and job, indices are 1-indexed so that we can
use 0 to represent unreachable nodes. State contains the following keys:
- jobs: A [batch_size, 1, num_elems] tensor containing the highest index
unassigned worker that can reach this job through a path.
- jobs_from_worker: A [batch_size, num_elems] tensor containing the worker
reached immediately before this job.
- workers: A [batch_size, num_elems, 1] tensor containing the highest index
unassigned worker that can reach this worker through a path.
- workers_from_job: A [batch_size, num_elems] tensor containing the job
reached immediately before this worker.
- new_jobs: A bool [batch_size, num_elems] tensor containing True if the
unassigned job can be reached via a path.
State can be used to recover the path via backtracking.
Args:
assignment: A bool [batch_size, num_elems, num_elems] tensor, where each
element of the inner matrix represents whether the worker has been matched
to the job. This may be a partial assignment.
adj_matrix: A bool [batch_size, num_elems, num_elems] tensor, where each
element of the inner matrix represents whether the worker (row) can be
matched to the job (column).
Returns:
A state dict, which represents the outcome of running an augmenting
path search on the graph given the assignment.
"""
batch_size, num_elems, _ = tf_utils.get_shape_list(
assignment, expected_rank=3)
unassigned_workers = ~tf.reduce_any(assignment, axis=2, keepdims=True)
unassigned_jobs = ~tf.reduce_any(assignment, axis=1, keepdims=True)
unassigned_pairings = tf.cast(adj_matrix & ~assignment, tf.int32)
existing_pairings = tf.cast(assignment, tf.int32)
# Initialize unassigned workers to have non-zero ids, assigned workers will
# have ids = 0.
worker_indices = tf.range(1, num_elems + 1, dtype=tf.int32)
init_workers = tf.tile(worker_indices[tf.newaxis, :, tf.newaxis],
[batch_size, 1, 1])
init_workers *= tf.cast(unassigned_workers, tf.int32)
state = {
"jobs": tf.zeros((batch_size, 1, num_elems), dtype=tf.int32),
"jobs_from_worker": tf.zeros((batch_size, num_elems), dtype=tf.int32),
"workers": init_workers,
"workers_from_job": tf.zeros((batch_size, num_elems), dtype=tf.int32)
}
def _has_active_workers(state, curr_workers):
"""Check if there are still active workers."""
del state
return tf.reduce_sum(curr_workers) > 0
def _augment_step(state, curr_workers):
"""Performs one search step."""
# Note: These steps could be potentially much faster if sparse matrices are
# supported. The unassigned_pairings and existing_pairings matrices can be
# very sparse.
# Find potential jobs using current workers.
potential_jobs = curr_workers * unassigned_pairings
curr_jobs = tf.reduce_max(potential_jobs, axis=1, keepdims=True)
curr_jobs_from_worker = 1 + tf.argmax(
potential_jobs, axis=1, output_type=tf.int32)
# Remove already accessible jobs from curr_jobs.
default_jobs = tf.zeros_like(state["jobs"], dtype=state["jobs"].dtype)
curr_jobs = tf.where(state["jobs"] > 0, default_jobs, curr_jobs)
curr_jobs_from_worker *= tf.cast(curr_jobs > 0, tf.int32)[:, 0, :]
# Find potential workers from current jobs.
potential_workers = curr_jobs * existing_pairings
curr_workers = tf.reduce_max(potential_workers, axis=2, keepdims=True)
curr_workers_from_job = 1 + tf.argmax(
potential_workers, axis=2, output_type=tf.int32)
# Remove already accessible workers from curr_workers.
default_workers = tf.zeros_like(state["workers"])
curr_workers = tf.where(
state["workers"] > 0, default_workers, curr_workers)
curr_workers_from_job *= tf.cast(curr_workers > 0, tf.int32)[:, :, 0]
# Update state so that we can backtrack later.
state = state.copy()
state["jobs"] = tf.maximum(state["jobs"], curr_jobs)
state["jobs_from_worker"] = tf.maximum(state["jobs_from_worker"],
curr_jobs_from_worker)
state["workers"] = tf.maximum(state["workers"], curr_workers)
state["workers_from_job"] = tf.maximum(state["workers_from_job"],
curr_workers_from_job)
return state, curr_workers
state, _ = tf.while_loop(
_has_active_workers,
_augment_step, (state, init_workers),
back_prop=False)
# Compute new jobs, this is useful for determnining termnination of the
# maximum bi-partite matching and initialization for backtracking.
new_jobs = (state["jobs"] > 0) & unassigned_jobs
state["new_jobs"] = new_jobs[:, 0, :]
return state
def _improve_assignment(assignment, state):
"""Improves an assignment by backtracking the augmented path using state.
Args:
assignment: A bool [batch_size, num_elems, num_elems] tensor, where each
element of the inner matrix represents whether the worker has been matched
to the job. This may be a partial assignment.
state: A dict, which represents the outcome of running an augmenting path
search on the graph given the assignment.
Returns:
A new assignment matrix of the same shape and type as assignment, where the
assignment has been updated using the augmented path found.
"""
batch_size, num_elems, _ = tf_utils.get_shape_list(assignment, 3)
# We store the current job id and iteratively backtrack using jobs_from_worker
# and workers_from_job until we reach an unassigned worker. We flip all the
# assignments on this path to discover a better overall assignment.
# Note: The indices in state are 1-indexed, where 0 represents that the
# worker / job cannot be reached.
# Obtain initial job indices based on new_jobs.
curr_job_idx = tf.argmax(
tf.cast(state["new_jobs"], tf.int32), axis=1, output_type=tf.int32)
# Track whether an example is actively being backtracked. Since we are
# operating on a batch, not all examples in the batch may be active.
active = tf.gather(state["new_jobs"], curr_job_idx, batch_dims=1)
batch_range = tf.range(0, batch_size, dtype=tf.int32)
# Flip matrix tracks which assignments we need to flip - corresponding to the
# augmenting path taken. We use an integer tensor here so that we can use
# tensor_scatter_nd_add to update the tensor, and then cast it back to bool
# after the loop.
flip_matrix = tf.zeros((batch_size, num_elems, num_elems), dtype=tf.int32)
def _has_active_backtracks(flip_matrix, active, curr_job_idx):
"""Check if there are still active workers."""
del flip_matrix, curr_job_idx
return tf.reduce_any(active)
def _backtrack_one_step(flip_matrix, active, curr_job_idx):
"""Take one step in backtracking."""
# Discover the worker that the job originated from, note that this worker
# must exist by construction.
curr_worker_idx = tf.gather(
state["jobs_from_worker"], curr_job_idx, batch_dims=1) - 1
curr_worker_idx = tf.maximum(curr_worker_idx, 0)
update_indices = tf.stack([batch_range, curr_worker_idx, curr_job_idx],
axis=1)
update_indices = tf.maximum(update_indices, 0)
flip_matrix = tf.tensor_scatter_nd_add(flip_matrix, update_indices,
tf.cast(active, tf.int32))
# Discover the (potential) job that the worker originated from.
curr_job_idx = tf.gather(
state["workers_from_job"], curr_worker_idx, batch_dims=1) - 1
# Note that jobs may not be active, and we track that here (before
# adjusting indices so that they are all >= 0 for gather).
active &= curr_job_idx >= 0
curr_job_idx = tf.maximum(curr_job_idx, 0)
update_indices = tf.stack([batch_range, curr_worker_idx, curr_job_idx],
axis=1)
update_indices = tf.maximum(update_indices, 0)
flip_matrix = tf.tensor_scatter_nd_add(flip_matrix, update_indices,
tf.cast(active, tf.int32))
return flip_matrix, active, curr_job_idx
flip_matrix, _, _ = tf.while_loop(
_has_active_backtracks,
_backtrack_one_step, (flip_matrix, active, curr_job_idx),
back_prop=False)
flip_matrix = tf.cast(flip_matrix, tf.bool)
assignment = tf.math.logical_xor(assignment, flip_matrix)
return assignment
def _maximum_bipartite_matching(adj_matrix, assignment=None):
"""Performs maximum bipartite matching using augmented paths.
Args:
adj_matrix: A bool [batch_size, num_elems, num_elems] tensor, where each
element of the inner matrix represents whether the worker (row) can be
matched to the job (column).
assignment: An optional bool [batch_size, num_elems, num_elems] tensor,
where each element of the inner matrix represents whether the worker has
been matched to the job. This may be a partial assignment. If specified,
this assignment will be used to seed the iterative algorithm.
Returns:
A state dict representing the final augmenting path state search, and
a maximum bipartite matching assignment tensor. Note that the state outcome
can be used to compute a minimum vertex cover for the bipartite graph.
"""
if assignment is None:
assignment = _greedy_assignment(adj_matrix)
state = _find_augmenting_path(assignment, adj_matrix)
def _has_new_jobs(state, assignment):
del assignment
return tf.reduce_any(state["new_jobs"])
def _improve_assignment_and_find_new_path(state, assignment):
assignment = _improve_assignment(assignment, state)
state = _find_augmenting_path(assignment, adj_matrix)
return state, assignment
state, assignment = tf.while_loop(
_has_new_jobs,
_improve_assignment_and_find_new_path, (state, assignment),
back_prop=False)
return state, assignment
def _compute_cover(state, assignment):
"""Computes a cover for the bipartite graph.
We compute a cover using the construction provided at
https://en.wikipedia.org/wiki/K%C5%91nig%27s_theorem_(graph_theory)#Proof
which uses the outcome from the alternating path search.
Args:
state: A state dict, which represents the outcome of running an augmenting
path search on the graph given the assignment.
assignment: An optional bool [batch_size, num_elems, num_elems] tensor,
where each element of the inner matrix represents whether the worker has
been matched to the job. This may be a partial assignment. If specified,
this assignment will be used to seed the iterative algorithm.
Returns:
A tuple of (workers_cover, jobs_cover) corresponding to row and column
covers for the bipartite graph. workers_cover is a boolean tensor of shape
[batch_size, num_elems, 1] and jobs_cover is a boolean tensor of shape
[batch_size, 1, num_elems].
"""
assigned_workers = tf.reduce_any(assignment, axis=2, keepdims=True)
assigned_jobs = tf.reduce_any(assignment, axis=1, keepdims=True)
reachable_workers = state["workers"] > 0
reachable_jobs = state["jobs"] > 0
workers_cover = assigned_workers & (~reachable_workers)
jobs_cover = assigned_jobs & reachable_jobs
return workers_cover, jobs_cover
def _update_weights_using_cover(workers_cover, jobs_cover, weights):
"""Updates weights for hungarian matching using a cover.
We first find the minimum uncovered weight. Then, we subtract this from all
the uncovered weights, and add it to all the doubly covered weights.
Args:
workers_cover: A boolean tensor of shape [batch_size, num_elems, 1].
jobs_cover: A boolean tensor of shape [batch_size, 1, num_elems].
weights: A float32 [batch_size, num_elems, num_elems] tensor, where each
inner matrix represents weights to be use for matching.
Returns:
A new weight matrix with elements adjusted by the cover.
"""
max_value = tf.reduce_max(weights)
covered = workers_cover | jobs_cover
double_covered = workers_cover & jobs_cover
uncovered_weights = tf.where(covered,
tf.ones_like(weights) * max_value, weights)
min_weight = tf.reduce_min(uncovered_weights, axis=[-2, -1], keepdims=True)
add_weight = tf.where(double_covered,
tf.ones_like(weights) * min_weight,
tf.zeros_like(weights))
sub_weight = tf.where(covered, tf.zeros_like(weights),
tf.ones_like(weights) * min_weight)
return weights + add_weight - sub_weight
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
expected_rank_dict = {}
if isinstance(expected_rank, int):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = len(tensor.shape)
if actual_rank not in expected_rank_dict:
raise ValueError(
"For the tensor `%s`, the actual tensor rank `%d` (shape = %s) is not "
"equal to the expected tensor rank `%s`" %
(name, actual_rank, str(tensor.shape), str(expected_rank)))
def hungarian_matching(weights):
"""Computes the minimum linear sum assignment using the Hungarian algorithm.
Args:
weights: A float32 [batch_size, num_elems, num_elems] tensor, where each
inner matrix represents weights to be use for matching.
Returns:
A bool [batch_size, num_elems, num_elems] tensor, where each element of the
inner matrix represents whether the worker has been matched to the job.
The returned matching will always be a perfect match.
"""
batch_size, num_elems, _ = tf_utils.get_shape_list(weights, 3)
weights = _prepare(weights)
adj_matrix = tf.equal(weights, 0.)
state, assignment = _maximum_bipartite_matching(adj_matrix)
workers_cover, jobs_cover = _compute_cover(state, assignment)
def _cover_incomplete(workers_cover, jobs_cover, *args):
del args
cover_sum = (
tf.reduce_sum(tf.cast(workers_cover, tf.int32)) +
tf.reduce_sum(tf.cast(jobs_cover, tf.int32)))
return tf.less(cover_sum, batch_size * num_elems)
def _update_weights_and_match(workers_cover, jobs_cover, weights, assignment):
weights = _update_weights_using_cover(workers_cover, jobs_cover, weights)
adj_matrix = tf.equal(weights, 0.)
state, assignment = _maximum_bipartite_matching(adj_matrix, assignment)
workers_cover, jobs_cover = _compute_cover(state, assignment)
return workers_cover, jobs_cover, weights, assignment
workers_cover, jobs_cover, weights, assignment = tf.while_loop(
_cover_incomplete,
_update_weights_and_match,
(workers_cover, jobs_cover, weights, assignment),
back_prop=False)
return weights, assignment
| 20,320 | 40.471429 | 80 | py |
models | models-master/official/projects/yolo/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/yolo/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Model Garden Vision training driver."""
from absl import app
from absl import flags
from official.common import flags as tfm_flags
from official.projects.yolo.common import registry_imports # pylint: disable=unused-import
from official.vision import train
FLAGS = flags.FLAGS
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(train.main)
| 981 | 31.733333 | 91 | py |
models | models-master/official/projects/yolo/common/registry_imports.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All necessary imports for registration."""
# pylint: disable=unused-import
# pylint: disable=g-bad-import-order
from official.vision import registry_imports
# import configs
from official.projects.yolo.configs import darknet_classification
from official.projects.yolo.configs import yolo as yolo_config
from official.projects.yolo.configs import yolov7 as yolov7_config
# import modeling components
from official.projects.yolo.modeling.backbones import darknet
from official.projects.yolo.modeling.decoders import yolo_decoder
from official.projects.yolo.modeling.backbones import yolov7 as yolov7_backbone
from official.projects.yolo.modeling.decoders import yolov7 as yolov7_decoder
# import tasks
from official.projects.yolo.tasks import image_classification
from official.projects.yolo.tasks import yolo as yolo_task
from official.projects.yolo.tasks import yolov7 as yolov7_task
# import optimization packages
from official.projects.yolo.optimization import optimizer_factory
from official.projects.yolo.optimization.configs import optimizer_config
from official.projects.yolo.optimization.configs import optimization_config
| 1,747 | 41.634146 | 79 | py |
models | models-master/official/projects/yolo/common/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/yolo/serving/export_saved_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""YOLO model export binary for serving/inference.
To export a trained checkpoint in saved_model format (shell script):
CHECKPOINT_PATH = XX
EXPORT_DIR_PATH = XX
CONFIG_FILE_PATH = XX
export_saved_model --export_dir=${EXPORT_DIR_PATH}/ \
--checkpoint_path=${CHECKPOINT_PATH} \
--config_file=${CONFIG_FILE_PATH} \
--batch_size=2 \
--input_image_size=224,224
To serve (python):
export_dir_path = XX
input_type = XX
input_images = XX
imported = tf.saved_model.load(export_dir_path)
model_fn = imported.signatures['serving_default']
output = model_fn(input_images)
"""
from absl import app
from absl import flags
from official.core import exp_factory
from official.modeling import hyperparams
from official.projects.yolo.common import registry_imports # pylint: disable=unused-import
from official.projects.yolo.serving import export_module_factory
from official.vision.serving import export_saved_model_lib
FLAGS = flags.FLAGS
flags.DEFINE_string('experiment', 'scaled_yolo',
'experiment type, e.g. scaled_yolo')
flags.DEFINE_string('export_dir', None, 'The export directory.')
flags.DEFINE_string('checkpoint_path', None, 'Checkpoint path.')
flags.DEFINE_multi_string(
'config_file',
default=None,
help='YAML/JSON files which specifies overrides. The override order '
'follows the order of args. Note that each file '
'can be used as an override template to override the default parameters '
'specified in Python. If the same parameter is specified in both '
'`--config_file` and `--params_override`, `config_file` will be used '
'first, followed by params_override.')
flags.DEFINE_string(
'params_override', '',
'The JSON/YAML file or string which specifies the parameter to be overriden'
' on top of `config_file` template.')
flags.DEFINE_integer('batch_size', 1, 'The batch size.')
flags.DEFINE_string('input_type', 'image_tensor',
'One of `image_tensor`, `image_bytes`, `tf_example`.')
flags.DEFINE_string(
'input_image_size', '224,224',
'The comma-separated string of two integers representing the height,width '
'of the input to the model.')
_EXPORT_SAVED_MODEL_SUBDIR = flags.DEFINE_string(
'export_saved_model_subdir', 'saved_model',
'The subdirectory for saved model.')
_INPUT_NAME = flags.DEFINE_string(
'input_name', None,
'Input tensor name in signature def. Default at None which'
'produces input tensor name `inputs`.')
def main(_):
params = exp_factory.get_exp_config(FLAGS.experiment)
for config_file in FLAGS.config_file or []:
params = hyperparams.override_params_dict(
params, config_file, is_strict=True)
if FLAGS.params_override:
params = hyperparams.override_params_dict(
params, FLAGS.params_override, is_strict=True)
params.validate()
params.lock()
input_image_size = [int(x) for x in FLAGS.input_image_size.split(',')]
export_module = export_module_factory.get_export_module(
params=params,
input_type=FLAGS.input_type,
batch_size=FLAGS.batch_size,
input_image_size=[int(x) for x in FLAGS.input_image_size.split(',')],
num_channels=3,
input_name=_INPUT_NAME.value)
export_saved_model_lib.export_inference_graph(
input_type=FLAGS.input_type,
batch_size=FLAGS.batch_size,
input_image_size=input_image_size,
params=params,
checkpoint_path=FLAGS.checkpoint_path,
export_dir=FLAGS.export_dir,
export_module=export_module,
export_saved_model_subdir=_EXPORT_SAVED_MODEL_SUBDIR.value)
if __name__ == '__main__':
app.run(main)
| 4,285 | 35.948276 | 91 | py |
models | models-master/official/projects/yolo/serving/export_module_factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory for YOLO export modules."""
from typing import Any, Callable, Dict, List, Optional, Text, Union
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import export_base
from official.projects.yolo.configs.yolo import YoloTask
from official.projects.yolo.configs.yolov7 import YoloV7Task
from official.projects.yolo.modeling import factory as yolo_factory
from official.projects.yolo.modeling.backbones import darknet # pylint: disable=unused-import
from official.projects.yolo.modeling.decoders import yolo_decoder # pylint: disable=unused-import
from official.projects.yolo.serving import model_fn as yolo_model_fn
from official.vision import configs
from official.vision.dataloaders import classification_input
from official.vision.modeling import factory
from official.vision.serving import export_utils
class ExportModule(export_base.ExportModule):
"""Base Export Module."""
def __init__(self,
params: cfg.ExperimentConfig,
model: tf.keras.Model,
input_signature: Union[tf.TensorSpec, Dict[str, tf.TensorSpec]],
preprocessor: Optional[Callable[..., Any]] = None,
inference_step: Optional[Callable[..., Any]] = None,
postprocessor: Optional[Callable[..., Any]] = None,
eval_postprocessor: Optional[Callable[..., Any]] = None):
"""Initializes a module for export.
Args:
params: A dataclass for parameters to the module.
model: A tf.keras.Model instance to be exported.
input_signature: tf.TensorSpec, e.g. tf.TensorSpec(shape=[None, 224, 224,
3], dtype=tf.uint8)
preprocessor: An optional callable to preprocess the inputs.
inference_step: An optional callable to forward-pass the model.
postprocessor: An optional callable to postprocess the model outputs.
eval_postprocessor: An optional callable to postprocess model outputs used
for model evaluation.
"""
super().__init__(
params,
model=model,
preprocessor=preprocessor,
inference_step=inference_step,
postprocessor=postprocessor)
self.eval_postprocessor = eval_postprocessor
self.input_signature = input_signature
@tf.function
def serve(self, inputs: Any) -> Any:
x = self.preprocessor(inputs=inputs) if self.preprocessor else inputs
x = self.inference_step(x)
x = self.postprocessor(x) if self.postprocessor else x
return x
@tf.function
def serve_eval(self, inputs: Any) -> Any:
x = self.preprocessor(inputs=inputs) if self.preprocessor else inputs
x = self.inference_step(x)
x = self.eval_postprocessor(x) if self.eval_postprocessor else x
return x
def get_inference_signatures(
self, function_keys: Dict[Text, Text]):
"""Gets defined function signatures.
Args:
function_keys: A dictionary with keys as the function to create signature
for and values as the signature keys when returns.
Returns:
A dictionary with key as signature key and value as concrete functions
that can be used for tf.saved_model.save.
"""
signatures = {}
for _, def_name in function_keys.items():
if 'eval' in def_name and self.eval_postprocessor:
signatures[def_name] = self.serve_eval.get_concrete_function(
self.input_signature)
else:
signatures[def_name] = self.serve.get_concrete_function(
self.input_signature)
return signatures
def create_classification_export_module(
params: cfg.ExperimentConfig,
input_type: str,
batch_size: int,
input_image_size: List[int],
num_channels: int = 3,
input_name: Optional[str] = None) -> ExportModule:
"""Creates classification export module."""
input_signature = export_utils.get_image_input_signatures(
input_type, batch_size, input_image_size, num_channels, input_name)
input_specs = tf.keras.layers.InputSpec(shape=[batch_size] +
input_image_size + [num_channels])
model = factory.build_classification_model(
input_specs=input_specs,
model_config=params.task.model,
l2_regularizer=None)
def preprocess_fn(inputs):
image_tensor = export_utils.parse_image(inputs, input_type,
input_image_size, num_channels)
# If input_type is `tflite`, do not apply image preprocessing.
if input_type == 'tflite':
return image_tensor
def preprocess_image_fn(inputs):
return classification_input.Parser.inference_fn(inputs, input_image_size,
num_channels)
images = tf.map_fn(
preprocess_image_fn,
elems=image_tensor,
fn_output_signature=tf.TensorSpec(
shape=input_image_size + [num_channels], dtype=tf.float32))
return images
def postprocess_fn(logits):
probs = tf.nn.softmax(logits)
return {'logits': logits, 'probs': probs}
export_module = ExportModule(
params,
model=model,
input_signature=input_signature,
preprocessor=preprocess_fn,
postprocessor=postprocess_fn)
return export_module
def create_yolo_export_module(
params: cfg.ExperimentConfig,
input_type: str,
batch_size: int,
input_image_size: List[int],
num_channels: int = 3,
input_name: Optional[str] = None) -> ExportModule:
"""Creates YOLO export module."""
input_signature = export_utils.get_image_input_signatures(
input_type, batch_size, input_image_size, num_channels, input_name)
input_specs = tf.keras.layers.InputSpec(shape=[batch_size] +
input_image_size + [num_channels])
if isinstance(params.task, YoloTask):
model, _ = yolo_factory.build_yolo(
input_specs=input_specs,
model_config=params.task.model,
l2_regularization=None)
elif isinstance(params.task, YoloV7Task):
model = yolo_factory.build_yolov7(
input_specs=input_specs,
model_config=params.task.model,
l2_regularization=None)
def preprocess_fn(inputs):
image_tensor = export_utils.parse_image(inputs, input_type,
input_image_size, num_channels)
def normalize_image_fn(inputs):
image = tf.cast(inputs, dtype=tf.float32)
return image / 255.0
# If input_type is `tflite`, do not apply image preprocessing. Only apply
# normalization.
if input_type == 'tflite':
return normalize_image_fn(image_tensor), None
def preprocess_image_fn(inputs):
image = normalize_image_fn(inputs)
(image, image_info) = yolo_model_fn.letterbox(
image,
input_image_size,
letter_box=params.task.validation_data.parser.letter_box)
return image, image_info
images_spec = tf.TensorSpec(shape=input_image_size + [3], dtype=tf.float32)
image_info_spec = tf.TensorSpec(shape=[4, 2], dtype=tf.float32)
images, image_info = tf.nest.map_structure(
tf.identity,
tf.map_fn(
preprocess_image_fn,
elems=image_tensor,
fn_output_signature=(images_spec, image_info_spec),
parallel_iterations=32))
return images, image_info
def inference_steps(inputs, model):
images, image_info = inputs
detection = model.call(images, training=False)
if input_type != 'tflite':
detection['bbox'] = yolo_model_fn.undo_info(
detection['bbox'],
detection['num_detections'],
image_info,
expand=False,
)
final_outputs = {
'detection_boxes': detection['bbox'],
'detection_scores': detection['confidence'],
'detection_classes': detection['classes'],
'num_detections': detection['num_detections']
}
return final_outputs
export_module = ExportModule(
params,
model=model,
input_signature=input_signature,
preprocessor=preprocess_fn,
inference_step=inference_steps)
return export_module
def get_export_module(params: cfg.ExperimentConfig,
input_type: str,
batch_size: Optional[int],
input_image_size: List[int],
num_channels: int = 3,
input_name: Optional[str] = None) -> ExportModule:
"""Factory for export modules."""
if isinstance(params.task,
configs.image_classification.ImageClassificationTask):
export_module = create_classification_export_module(params, input_type,
batch_size,
input_image_size,
num_channels,
input_name)
elif isinstance(params.task, (YoloTask, YoloV7Task)):
export_module = create_yolo_export_module(params, input_type, batch_size,
input_image_size, num_channels,
input_name)
else:
raise ValueError('Export module not implemented for {} task.'.format(
type(params.task)))
return export_module
| 9,910 | 36.4 | 98 | py |
models | models-master/official/projects/yolo/serving/model_fn.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YOLO input and model functions for serving/inference."""
from typing import List, Tuple
import tensorflow as tf
from official.projects.yolo.ops import preprocessing_ops
from official.vision.ops import box_ops
def letterbox(image: tf.Tensor,
desired_size: List[int],
letter_box: bool = True) -> Tuple[tf.Tensor, tf.Tensor]:
"""Letter box an image for image serving."""
with tf.name_scope('letter_box'):
image_size = tf.cast(preprocessing_ops.get_image_shape(image), tf.float32)
scaled_size = tf.cast(desired_size, image_size.dtype)
if letter_box:
scale = tf.minimum(scaled_size[0] / image_size[0],
scaled_size[1] / image_size[1])
scaled_size = tf.round(image_size * scale)
else:
scale = 1.0
# Computes 2D image_scale.
image_scale = scaled_size / image_size
image_offset = tf.cast((desired_size - scaled_size) * 0.5, tf.int32)
offset = (scaled_size - desired_size) * 0.5
scaled_image = tf.image.resize(
image, tf.cast(scaled_size, tf.int32), method='nearest')
output_image = tf.image.pad_to_bounding_box(scaled_image, image_offset[0],
image_offset[1],
desired_size[0],
desired_size[1])
image_info = tf.stack([
image_size,
tf.cast(desired_size, dtype=tf.float32), image_scale,
tf.cast(offset, tf.float32)
])
return output_image, image_info
def undo_info(boxes: tf.Tensor,
num_detections: int,
info: tf.Tensor,
expand: bool = True) -> tf.Tensor:
"""Clip and normalize boxes for serving."""
mask = tf.sequence_mask(num_detections, maxlen=tf.shape(boxes)[1])
boxes = tf.cast(tf.expand_dims(mask, axis=-1), boxes.dtype) * boxes
if expand:
info = tf.cast(tf.expand_dims(info, axis=0), boxes.dtype)
inshape = tf.expand_dims(info[:, 1, :], axis=1)
ogshape = tf.expand_dims(info[:, 0, :], axis=1)
scale = tf.expand_dims(info[:, 2, :], axis=1)
offset = tf.expand_dims(info[:, 3, :], axis=1)
boxes = box_ops.denormalize_boxes(boxes, inshape)
boxes += tf.tile(offset, [1, 1, 2])
boxes /= tf.tile(scale, [1, 1, 2])
boxes = box_ops.clip_boxes(boxes, ogshape)
boxes = box_ops.normalize_boxes(boxes, ogshape)
return boxes
| 3,008 | 35.253012 | 78 | py |
models | models-master/official/projects/yolo/serving/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/yolo/serving/export_tflite.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Binary to convert a saved model to TFLite model for Yolo model."""
from absl import app
from official.projects.yolo.common import registry_imports # pylint: disable=unused-import
from official.vision.serving import export_tflite
if __name__ == '__main__':
app.run(export_tflite.main)
| 902 | 36.625 | 91 | py |
models | models-master/official/projects/yolo/optimization/optimizer_factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optimizer factory class."""
import gin
from official.modeling.optimization import ema_optimizer
from official.modeling.optimization import optimizer_factory
from official.projects.yolo.optimization import sgd_torch
optimizer_factory.LEGACY_OPTIMIZERS_CLS.update({
'sgd_torch': sgd_torch.SGDTorch,
})
OPTIMIZERS_CLS = optimizer_factory.LEGACY_OPTIMIZERS_CLS
LR_CLS = optimizer_factory.LR_CLS
WARMUP_CLS = optimizer_factory.WARMUP_CLS
class OptimizerFactory(optimizer_factory.OptimizerFactory):
"""Optimizer factory class.
This class builds learning rate and optimizer based on an optimization config.
To use this class, you need to do the following:
(1) Define optimization config, this includes optimizer, and learning rate
schedule.
(2) Initialize the class using the optimization config.
(3) Build learning rate.
(4) Build optimizer.
This is a typical example for using this class:
params = {
'optimizer': {
'type': 'sgd',
'sgd': {'momentum': 0.9}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {'boundaries': [10000, 20000],
'values': [0.1, 0.01, 0.001]}
},
'warmup': {
'type': 'linear',
'linear': {'warmup_steps': 500, 'warmup_learning_rate': 0.01}
}
}
opt_config = OptimizationConfig(params)
opt_factory = OptimizerFactory(opt_config)
lr = opt_factory.build_learning_rate()
optimizer = opt_factory.build_optimizer(lr)
"""
def get_bias_lr_schedule(self, bias_lr):
"""Build learning rate.
Builds learning rate from config. Learning rate schedule is built according
to the learning rate config. If learning rate type is consant,
lr_config.learning_rate is returned.
Args:
bias_lr: learning rate config.
Returns:
tf.keras.optimizers.schedules.LearningRateSchedule instance. If
learning rate type is consant, lr_config.learning_rate is returned.
"""
if self._lr_type == 'constant':
lr = self._lr_config.learning_rate
else:
lr = LR_CLS[self._lr_type](**self._lr_config.as_dict())
if self._warmup_config:
if self._warmup_type != 'linear':
raise ValueError('Smart Bias is only supported currently with a'
'linear warm up.')
warm_up_cfg = self._warmup_config.as_dict()
warm_up_cfg['warmup_learning_rate'] = bias_lr
lr = WARMUP_CLS['linear'](lr, **warm_up_cfg)
return lr
@gin.configurable
def add_ema(self, optimizer):
"""Add EMA to the optimizer independently of the build optimizer method."""
if self._use_ema:
optimizer = ema_optimizer.ExponentialMovingAverage(
optimizer, **self._ema_config.as_dict())
return optimizer
| 3,403 | 33.04 | 80 | py |
models | models-master/official/projects/yolo/optimization/sgd_torch.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SGD PyTorch optimizer."""
import re
from absl import logging
import tensorflow as tf
LearningRateSchedule = tf.keras.optimizers.schedules.LearningRateSchedule
def _var_key(var):
"""Key for representing a primary variable, for looking up slots.
In graph mode the name is derived from the var shared name.
In eager mode the name is derived from the var unique id.
If distribution strategy exists, get the primary variable first.
Args:
var: the variable.
Returns:
the unique name of the variable.
"""
# pylint: disable=protected-access
# Get the distributed variable if it exists.
if hasattr(var, "_distributed_container"):
var = var._distributed_container()
if var._in_graph_mode:
return var._shared_name
return var._unique_id
class SGDTorch(tf.keras.optimizers.legacy.Optimizer):
"""Optimizer that simulates the SGD module used in pytorch.
For details on the differences between the original SGD implemention and the
one in pytorch:
https://pytorch.org/docs/stable/generated/torch.optim.SGD.html.
This optimizer also allow for the usage of a momentum warmup along side a
learning rate warm up, though using this is not required.
Example of usage for training:
```python
opt = SGDTorch(learning_rate, weight_decay = 0.0001)
l2_regularization = None
# iterate all model.trainable_variables and split the variables by key
# into the weights, biases, and others.
optimizer.search_and_set_variable_groups(model.trainable_variables)
# if the learning rate schedule on the biases are different. if lr is not set
# the default schedule used for weights will be used on the biases.
opt.set_bias_lr(<lr schedule>)
# if the learning rate schedule on the others are different. if lr is not set
# the default schedule used for weights will be used on the biases.
opt.set_other_lr(<lr schedule>)
```
"""
_HAS_AGGREGATE_GRAD = True
def __init__(self,
weight_decay=0.0,
learning_rate=0.01,
momentum=0.0,
momentum_start=0.0,
warmup_steps=1000,
nesterov=False,
name="SGD",
weight_keys=("kernel", "weight"),
bias_keys=("bias", "beta"),
**kwargs):
super(SGDTorch, self).__init__(name, **kwargs)
# Create Hyper Params for each group of the LR
self._set_hyper("learning_rate", kwargs.get("lr", learning_rate))
self._set_hyper("bias_learning_rate", kwargs.get("lr", learning_rate))
self._set_hyper("other_learning_rate", kwargs.get("lr", learning_rate))
# SGD decay param
self._set_hyper("decay", self._initial_decay)
# Weight decay param
self._weight_decay = weight_decay != 0.0
self._set_hyper("weight_decay", weight_decay)
# Enable Momentum
self._momentum = False
if isinstance(momentum, tf.Tensor) or callable(momentum) or momentum > 0:
self._momentum = True
if isinstance(momentum, (int, float)) and (momentum < 0 or momentum > 1):
raise ValueError("`momentum` must be between [0, 1].")
self._set_hyper("momentum", momentum)
self._set_hyper("momentum_start", momentum_start)
self._set_hyper("warmup_steps", tf.cast(warmup_steps, tf.int32))
# Enable Nesterov Momentum
self.nesterov = nesterov
# weights, biases, other
self._weight_keys = weight_keys
self._bias_keys = bias_keys
self._variables_set = False
self._wset = set()
self._bset = set()
self._oset = set()
logging.info("Pytorch SGD simulation: ")
logging.info("Weight Decay: %f", weight_decay)
def set_bias_lr(self, lr):
self._set_hyper("bias_learning_rate", lr)
def set_other_lr(self, lr):
self._set_hyper("other_learning_rate", lr)
def _search(self, var, keys):
"""Search all all keys for matches. Return True on match."""
if keys is not None:
# variable group is not ignored so search for the keys.
for r in keys:
if re.search(r, var.name) is not None:
return True
return False
def search_and_set_variable_groups(self, variables):
"""Search all variable for matches at each group."""
weights = []
biases = []
others = []
for var in variables:
if self._search(var, self._weight_keys):
# search for weights
weights.append(var)
elif self._search(var, self._bias_keys):
# search for biases
biases.append(var)
else:
# if all searches fail, add to other group
others.append(var)
self._set_variable_groups(weights, biases, others)
return weights, biases, others
def _set_variable_groups(self, weights, biases, others):
"""Sets the variables to be used in each group."""
if self._variables_set:
logging.warning("_set_variable_groups has been called again indicating"
"that the variable groups have already been set, they"
"will be updated.")
self._wset.update(set([_var_key(w) for w in weights]))
self._bset.update(set([_var_key(b) for b in biases]))
self._oset.update(set([_var_key(o) for o in others]))
self._variables_set = True
return
def _get_variable_group(self, var, coefficients):
if self._variables_set:
# check which groups hold which varaibles, preset.
if _var_key(var) in self._wset:
return True, False, False
elif _var_key(var) in self._bset:
return False, True, False
else:
# search the variables at run time.
if self._search(var, self._weight_keys):
return True, False, False
elif self._search(var, self._bias_keys):
return False, True, False
return False, False, True
def _create_slots(self, var_list):
"""Create a momentum variable for each variable."""
if self._momentum:
for var in var_list:
# check if trainable to support GPU EMA.
if var.trainable:
self.add_slot(var, "momentum")
def _get_momentum(self, iteration):
"""Get the momentum value."""
momentum = self._get_hyper("momentum")
momentum_start = self._get_hyper("momentum_start")
momentum_warm_up_steps = tf.cast(
self._get_hyper("warmup_steps"), iteration.dtype)
value = tf.cond(
(iteration - momentum_warm_up_steps) <= 0,
true_fn=lambda: (momentum_start + # pylint: disable=g-long-lambda
(tf.cast(iteration, momentum.dtype) *
(momentum - momentum_start) / tf.cast(
momentum_warm_up_steps, momentum.dtype))),
false_fn=lambda: momentum)
return value
def _prepare_local(self, var_device, var_dtype, apply_state):
super(SGDTorch, self)._prepare_local(var_device, var_dtype, apply_state) # pytype: disable=attribute-error
weight_decay = self._get_hyper("weight_decay")
apply_state[(var_device,
var_dtype)]["weight_decay"] = tf.cast(weight_decay, var_dtype)
if self._momentum:
momentum = self._get_momentum(self.iterations)
momentum = tf.cast(momentum, var_dtype)
apply_state[(var_device,
var_dtype)]["momentum"] = tf.identity(momentum)
bias_lr = self._get_hyper("bias_learning_rate")
if isinstance(bias_lr, LearningRateSchedule):
bias_lr = bias_lr(self.iterations)
bias_lr = tf.cast(bias_lr, var_dtype)
apply_state[(var_device,
var_dtype)]["bias_lr_t"] = tf.identity(bias_lr)
other_lr = self._get_hyper("other_learning_rate")
if isinstance(other_lr, LearningRateSchedule):
other_lr = other_lr(self.iterations)
other_lr = tf.cast(other_lr, var_dtype)
apply_state[(var_device,
var_dtype)]["other_lr_t"] = tf.identity(other_lr)
return apply_state[(var_device, var_dtype)]
def _apply(self, grad, var, weight_decay, momentum, lr):
"""Uses Pytorch Optimizer with Weight decay SGDW."""
dparams = grad
groups = []
# do not update non-trainable weights
if not var.trainable:
return tf.group(*groups)
if self._weight_decay:
dparams += (weight_decay * var)
if self._momentum:
momentum_var = self.get_slot(var, "momentum")
momentum_update = momentum_var.assign(
momentum * momentum_var + dparams, use_locking=self._use_locking)
groups.append(momentum_update)
if self.nesterov:
dparams += (momentum * momentum_update)
else:
dparams = momentum_update
weight_update = var.assign_add(-lr * dparams, use_locking=self._use_locking)
groups.append(weight_update)
return tf.group(*groups)
def _run_sgd(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype)) or
self._fallback_apply_state(var_device, var_dtype))
weights, bias, others = self._get_variable_group(var, coefficients)
weight_decay = tf.zeros_like(coefficients["weight_decay"])
lr = coefficients["lr_t"]
if weights:
weight_decay = coefficients["weight_decay"]
lr = coefficients["lr_t"]
elif bias:
weight_decay = tf.zeros_like(coefficients["weight_decay"])
lr = coefficients["bias_lr_t"]
elif others:
weight_decay = tf.zeros_like(coefficients["weight_decay"])
lr = coefficients["other_lr_t"]
momentum = coefficients["momentum"]
return self._apply(grad, var, weight_decay, momentum, lr)
def _resource_apply_dense(self, grad, var, apply_state=None):
return self._run_sgd(grad, var, apply_state=apply_state)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
# This method is only needed for momentum optimization.
holder = tf.tensor_scatter_nd_add(
tf.zeros_like(var), tf.expand_dims(indices, axis=-1), grad)
return self._run_sgd(holder, var, apply_state=apply_state)
def get_config(self):
config = super(SGDTorch, self).get_config()
config.update({
"learning_rate": self._serialize_hyperparameter("learning_rate"),
"decay": self._initial_decay,
"momentum": self._serialize_hyperparameter("momentum"),
"momentum_start": self._serialize_hyperparameter("momentum_start"),
"weight_decay": self._serialize_hyperparameter("weight_decay"),
"warmup_steps": self._serialize_hyperparameter("warmup_steps"),
"nesterov": self.nesterov,
})
return config
@property
def learning_rate(self):
return self._optimizer._get_hyper("learning_rate") # pylint: disable=protected-access
| 11,225 | 34.751592 | 111 | py |
models | models-master/official/projects/yolo/optimization/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optimization package definition."""
# pylint: disable=wildcard-import
from official.modeling.optimization.configs.learning_rate_config import *
from official.modeling.optimization.ema_optimizer import ExponentialMovingAverage
from official.projects.yolo.optimization.configs.optimization_config import *
from official.projects.yolo.optimization.configs.optimizer_config import *
from official.projects.yolo.optimization.optimizer_factory import OptimizerFactory as YoloOptimizerFactory
| 1,099 | 46.826087 | 106 | py |
models | models-master/official/projects/yolo/optimization/configs/optimizer_config.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataclasses for optimizer configs."""
import dataclasses
from typing import List, Optional
from official.modeling.hyperparams import base_config
from official.modeling.optimization.configs import optimizer_config
@dataclasses.dataclass
class BaseOptimizerConfig(base_config.Config):
"""Base optimizer config.
Attributes:
clipnorm: float >= 0 or None. If not None, Gradients will be clipped when
their L2 norm exceeds this value.
clipvalue: float >= 0 or None. If not None, Gradients will be clipped when
their absolute value exceeds this value.
global_clipnorm: float >= 0 or None. If not None, gradient of all weights is
clipped so that their global norm is no higher than this value
"""
clipnorm: Optional[float] = None
clipvalue: Optional[float] = None
global_clipnorm: Optional[float] = None
@dataclasses.dataclass
class SGDTorchConfig(optimizer_config.BaseOptimizerConfig):
"""Configuration for SGD optimizer.
The attributes for this class matches the arguments of tf.keras.optimizer.SGD.
Attributes:
name: name of the optimizer.
decay: decay rate for SGD optimizer.
nesterov: nesterov for SGD optimizer.
momentum_start: momentum starting point for SGD optimizer.
momentum: momentum for SGD optimizer.
"""
name: str = "SGD"
decay: float = 0.0
nesterov: bool = False
momentum_start: float = 0.0
momentum: float = 0.9
warmup_steps: int = 0
weight_decay: float = 0.0
weight_keys: Optional[List[str]] = dataclasses.field(
default_factory=lambda: ["kernel", "weight"])
bias_keys: Optional[List[str]] = dataclasses.field(
default_factory=lambda: ["bias", "beta"])
| 2,283 | 34.6875 | 80 | py |
models | models-master/official/projects/yolo/optimization/configs/optimization_config.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataclasses for optimization configs.
This file define the dataclass for optimization configs (OptimizationConfig).
It also has two helper functions get_optimizer_config, and get_lr_config from
an OptimizationConfig class.
"""
import dataclasses
from typing import Optional
from official.modeling.optimization.configs import optimization_config as optimization_cfg
from official.projects.yolo.optimization.configs import optimizer_config as opt_cfg
@dataclasses.dataclass
class OptimizerConfig(optimization_cfg.OptimizerConfig):
"""Configuration for optimizer.
Attributes:
type: 'str', type of optimizer to be used, on the of fields below.
sgd: sgd optimizer config.
adam: adam optimizer config.
adamw: adam with weight decay.
lamb: lamb optimizer.
rmsprop: rmsprop optimizer.
"""
type: Optional[str] = None
sgd_torch: opt_cfg.SGDTorchConfig = dataclasses.field(
default_factory=opt_cfg.SGDTorchConfig
)
@dataclasses.dataclass
class OptimizationConfig(optimization_cfg.OptimizationConfig):
"""Configuration for optimizer and learning rate schedule.
Attributes:
optimizer: optimizer oneof config.
ema: optional exponential moving average optimizer config, if specified, ema
optimizer will be used.
learning_rate: learning rate oneof config.
warmup: warmup oneof config.
"""
type: Optional[str] = None
optimizer: OptimizerConfig = dataclasses.field(
default_factory=OptimizerConfig
)
| 2,084 | 33.180328 | 90 | py |
models | models-master/official/projects/yolo/optimization/configs/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/yolo/configs/yolov7.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YOLOv7 configuration definition."""
import dataclasses
import os
from typing import List, Optional, Union
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.projects.yolo import optimization
from official.projects.yolo.configs import backbones
from official.projects.yolo.configs import decoders
from official.projects.yolo.configs.yolo import AnchorBoxes
from official.projects.yolo.configs.yolo import DataConfig
from official.projects.yolo.configs.yolo import Mosaic
from official.projects.yolo.configs.yolo import Parser
from official.projects.yolo.configs.yolo import YoloDetectionGenerator
from official.vision.configs import common
# pytype: disable=annotation-type-mismatch
MIN_LEVEL = 3
MAX_LEVEL = 5
GLOBAL_SEED = 1000
def _build_dict(min_level, max_level, value):
vals = {str(key): value for key in range(min_level, max_level + 1)}
vals['all'] = None
return lambda: vals
def _build_path_scales(min_level, max_level):
return lambda: {str(key): 2**key for key in range(min_level, max_level + 1)}
# pylint: disable=missing-class-docstring
@dataclasses.dataclass
class TfExampleDecoder(hyperparams.Config):
regenerate_source_id: bool = False
coco91_to_80: bool = True
@dataclasses.dataclass
class TfExampleDecoderLabelMap(hyperparams.Config):
regenerate_source_id: bool = False
label_map: str = ''
@dataclasses.dataclass
class DataDecoder(hyperparams.OneOfConfig):
type: Optional[str] = 'simple_decoder'
simple_decoder: TfExampleDecoder = dataclasses.field(
default_factory=TfExampleDecoder
)
label_map_decoder: TfExampleDecoderLabelMap = dataclasses.field(
default_factory=TfExampleDecoderLabelMap
)
@dataclasses.dataclass
class YoloV7Head(hyperparams.Config):
"""Parameterization for the YOLO Head."""
num_anchors: int = 3
use_separable_conv: bool = False
@dataclasses.dataclass
class YoloV7Loss(hyperparams.Config):
"""Config or YOLOv7 loss."""
alpha: float = 0.0
gamma: float = 0.0
box_weight: float = 0.05
obj_weight: float = 0.7
cls_weight: float = 0.3
label_smoothing: float = 0.0
anchor_threshold: float = 4.0
iou_mix_ratio: float = 1.0
auto_balance: bool = False
use_ota: bool = True
@dataclasses.dataclass
class Box(hyperparams.Config):
box: List[int] = dataclasses.field(default_factory=list)
@dataclasses.dataclass
class YoloV7(hyperparams.Config):
input_size: Optional[List[int]] = dataclasses.field(
default_factory=lambda: [640, 640, 3]
)
backbone: backbones.Backbone = dataclasses.field(
default_factory=lambda: backbones.Backbone( # pylint: disable=g-long-lambda
type='yolov7', yolov7=backbones.YoloV7(model_id='yolov7')
)
)
decoder: decoders.Decoder = dataclasses.field(
default_factory=lambda: decoders.Decoder( # pylint: disable=g-long-lambda
type='yolov7', yolo_decoder=decoders.YoloV7(model_id='yolov7')
)
)
head: YoloV7Head = dataclasses.field(default_factory=YoloV7Head)
detection_generator: YoloDetectionGenerator = dataclasses.field(
default_factory=lambda: YoloDetectionGenerator( # pylint: disable=g-long-lambda
box_type=_build_dict(MIN_LEVEL, MAX_LEVEL, 'scaled')(),
scale_xy=_build_dict(MIN_LEVEL, MAX_LEVEL, 2.0)(),
path_scales=_build_path_scales(MIN_LEVEL, MAX_LEVEL)(),
nms_version='iou',
iou_thresh=0.001,
nms_thresh=0.7,
max_boxes=300,
pre_nms_points=5000,
)
)
loss: YoloV7Loss = dataclasses.field(default_factory=YoloV7Loss)
norm_activation: common.NormActivation = dataclasses.field(
default_factory=lambda: common.NormActivation( # pylint: disable=g-long-lambda
activation='swish',
use_sync_bn=True,
norm_momentum=0.99,
norm_epsilon=0.001,
)
)
num_classes: int = 80
min_level: int = 3
max_level: int = 5
anchor_boxes: AnchorBoxes = dataclasses.field(default_factory=AnchorBoxes)
@dataclasses.dataclass
class YoloV7Task(cfg.TaskConfig):
per_category_metrics: bool = False
smart_bias_lr: float = 0.0
model: YoloV7 = dataclasses.field(default_factory=YoloV7)
train_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=True)
)
validation_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=False)
)
weight_decay: float = 0.0
annotation_file: Optional[str] = None
init_checkpoint: Optional[str] = None
init_checkpoint_modules: Union[str, List[str]] = (
'all' # all, backbone, and/or decoder
)
gradient_clip_norm: float = 0.0
seed = GLOBAL_SEED
# Sets maximum number of boxes to be evaluated by coco eval api.
max_num_eval_detections: int = 100
COCO_INPUT_PATH_BASE = (
'/readahead/200M/placer/prod/home/tensorflow-performance-data/datasets/coco'
)
COCO_TRAIN_EXAMPLES = 118287
COCO_VAL_EXAMPLES = 5000
@exp_factory.register_config_factory('yolov7')
def yolov7() -> cfg.ExperimentConfig:
"""YOLOv7 general config."""
return cfg.ExperimentConfig(
task=YoloV7Task(),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
],
)
@exp_factory.register_config_factory('coco_yolov7')
def coco_yolov7() -> cfg.ExperimentConfig:
"""COCO object detection with YOLOv7."""
train_batch_size = 256
eval_batch_size = 256
train_epochs = 300
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
validation_interval = 5
warmup_steps = 3 * steps_per_epoch
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='float32'),
task=YoloV7Task(
init_checkpoint='',
init_checkpoint_modules='backbone',
annotation_file=None,
weight_decay=0.0,
model=YoloV7(
norm_activation=common.NormActivation(
activation='swish',
norm_momentum=0.03,
norm_epsilon=0.001,
use_sync_bn=True),
head=YoloV7Head(),
loss=YoloV7Loss(),
anchor_boxes=AnchorBoxes(
anchors_per_scale=3,
boxes=[
Box(box=[12, 16]),
Box(box=[19, 36]),
Box(box=[40, 28]),
Box(box=[36, 75]),
Box(box=[76, 55]),
Box(box=[72, 146]),
Box(box=[142, 110]),
Box(box=[192, 243]),
Box(box=[459, 401]),
],
),
),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
dtype='float32',
parser=Parser(
max_num_instances=300,
letter_box=True,
random_flip=True,
random_pad=False,
jitter=0.0,
aug_scale_min=1.0,
aug_scale_max=1.0,
aug_rand_translate=0.2,
aug_rand_saturation=0.7,
aug_rand_brightness=0.4,
aug_rand_hue=0.015,
aug_rand_angle=0.0,
aug_rand_perspective=0.0,
use_tie_breaker=True,
best_match_only=True,
anchor_thresh=4.0,
area_thresh=0.0,
mosaic=Mosaic(
mosaic_frequency=1.0,
mosaic9_frequency=0.2,
mixup_frequency=0.15,
mosaic_crop_mode='scale',
mosaic_center=0.25,
mosaic9_center=0.33,
aug_scale_min=0.1,
aug_scale_max=1.9,
),
),
),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size,
drop_remainder=True,
dtype='float32',
parser=Parser(
max_num_instances=300,
letter_box=True,
use_tie_breaker=True,
best_match_only=True,
anchor_thresh=4.0,
area_thresh=0.0,
),
),
smart_bias_lr=0.1,
),
trainer=cfg.TrainerConfig(
best_checkpoint_export_subdir='best_ckpt',
best_checkpoint_eval_metric='AP',
best_checkpoint_metric_comp='higher',
train_steps=train_epochs * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=validation_interval * steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'ema': {
'average_decay': 0.9999,
'trainable_weights_only': False,
'dynamic_decay': True,
},
'optimizer': {
'type': 'sgd_torch',
'sgd_torch': {
'momentum': 0.937,
'momentum_start': 0.8,
'nesterov': True,
'warmup_steps': warmup_steps,
# Scale up the weight decay by batch size.
'weight_decay': 0.0005 * train_batch_size / 64,
},
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate': 0.01,
'alpha': 0.1,
'decay_steps': train_epochs * steps_per_epoch,
},
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': warmup_steps,
'warmup_learning_rate': 0.0,
},
},
}),
),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
],
)
return config
@exp_factory.register_config_factory('coco_yolov7tiny')
def coco_yolov7_tiny() -> cfg.ExperimentConfig:
"""COCO object detection with YOLOv7-tiny."""
config = coco_yolov7()
config.task.model.input_size = [416, 416, 3]
config.task.model.backbone.yolov7.model_id = 'yolov7-tiny'
config.task.model.decoder.yolov7.model_id = 'yolov7-tiny'
config.task.model.norm_activation.activation = 'leaky'
config.task.model.anchor_boxes.boxes = [
Box(box=[10, 13]),
Box(box=[16, 30]),
Box(box=[33, 23]),
Box(box=[30, 61]),
Box(box=[62, 45]),
Box(box=[59, 119]),
Box(box=[116, 90]),
Box(box=[156, 198]),
Box(box=[373, 326]),
]
config.task.model.loss.cls_weight = 0.5
config.task.model.loss.obj_weight = 1.0
config.task.train_data.parser.aug_rand_translate = 0.1
config.task.train_data.parser.mosaic.mixup_frequency = 0.05
config.task.train_data.parser.mosaic.aug_scale_min = 0.5
config.task.train_data.parser.mosaic.aug_scale_max = 1.5
config.trainer.optimizer_config.learning_rate.cosine.alpha = 0.01
return config
@exp_factory.register_config_factory('coco91_yolov7tiny')
def coco91_yolov7_tiny() -> cfg.ExperimentConfig:
"""COCO object detection with YOLOv7-tiny using 91 classes."""
config = coco_yolov7_tiny()
config.task.model.num_classes = 91
config.task.model.decoder.yolov7.use_separable_conv = True
config.task.model.head.use_separable_conv = True
config.task.train_data.coco91_to_80 = False
config.task.validation_data.coco91_to_80 = False
return config
@exp_factory.register_config_factory('coco_yolov7x')
def coco_yolov7x() -> cfg.ExperimentConfig:
config = coco_yolov7()
config.task.model.backbone.yolov7.model_id = 'yolov7x'
config.task.model.decoder.yolov7.model_id = 'yolov7x'
return config
| 12,932 | 33.034211 | 86 | py |
models | models-master/official/projects/yolo/configs/yolo.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YOLO configuration definition."""
import dataclasses
import os
from typing import Any, List, Optional, Union
import numpy as np
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.projects.yolo import optimization
from official.projects.yolo.configs import backbones
from official.projects.yolo.configs import decoders
from official.vision.configs import common
# pytype: disable=annotation-type-mismatch
MIN_LEVEL = 1
MAX_LEVEL = 7
GLOBAL_SEED = 1000
def _build_dict(min_level, max_level, value):
vals = {str(key): value for key in range(min_level, max_level + 1)}
vals['all'] = None
return lambda: vals
def _build_path_scales(min_level, max_level):
return lambda: {str(key): 2**key for key in range(min_level, max_level + 1)}
@dataclasses.dataclass
class FPNConfig(hyperparams.Config):
"""FPN config."""
all: Optional[Any] = None
def get(self):
"""Allow for a key for each level or a single key for all the levels."""
values = self.as_dict()
if 'all' in values and values['all'] is not None:
for key in values:
if key != 'all':
values[key] = values['all']
return values
# pylint: disable=missing-class-docstring
@dataclasses.dataclass
class TfExampleDecoder(hyperparams.Config):
regenerate_source_id: bool = False
coco91_to_80: bool = True
@dataclasses.dataclass
class TfExampleDecoderLabelMap(hyperparams.Config):
regenerate_source_id: bool = False
label_map: str = ''
@dataclasses.dataclass
class DataDecoder(hyperparams.OneOfConfig):
type: Optional[str] = 'simple_decoder'
simple_decoder: TfExampleDecoder = dataclasses.field(
default_factory=TfExampleDecoder
)
label_map_decoder: TfExampleDecoderLabelMap = dataclasses.field(
default_factory=TfExampleDecoderLabelMap
)
@dataclasses.dataclass
class Mosaic(hyperparams.Config):
mosaic_frequency: float = 0.0
mosaic9_frequency: float = 0.0
mixup_frequency: float = 0.0
mosaic_center: float = 0.2
mosaic9_center: float = 0.33
mosaic_crop_mode: Optional[str] = None
aug_scale_min: float = 1.0
aug_scale_max: float = 1.0
jitter: float = 0.0
@dataclasses.dataclass
class Parser(hyperparams.Config):
max_num_instances: int = 200
letter_box: Optional[bool] = True
random_flip: bool = True
random_pad: float = False
jitter: float = 0.0
aug_scale_min: float = 1.0
aug_scale_max: float = 1.0
aug_rand_saturation: float = 0.0
aug_rand_brightness: float = 0.0
aug_rand_hue: float = 0.0
aug_rand_angle: float = 0.0
aug_rand_translate: float = 0.0
aug_rand_perspective: float = 0.0
use_tie_breaker: bool = True
best_match_only: bool = False
anchor_thresh: float = -0.01
area_thresh: float = 0.1
mosaic: Mosaic = dataclasses.field(default_factory=Mosaic)
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""Input config for training."""
global_batch_size: int = 64
input_path: str = ''
tfds_name: str = ''
tfds_split: str = ''
is_training: bool = True
dtype: str = 'float16'
decoder: DataDecoder = dataclasses.field(default_factory=DataDecoder)
parser: Parser = dataclasses.field(default_factory=Parser)
shuffle_buffer_size: int = 10000
tfds_download: bool = True
cache: bool = False
drop_remainder: bool = True
file_type: str = 'tfrecord'
@dataclasses.dataclass
class YoloHead(hyperparams.Config):
"""Parameterization for the YOLO Head."""
smart_bias: bool = True
@dataclasses.dataclass
class YoloDetectionGenerator(hyperparams.Config):
apply_nms: bool = True
box_type: FPNConfig = dataclasses.field(
default_factory=_build_dict(MIN_LEVEL, MAX_LEVEL, 'original'))
scale_xy: FPNConfig = dataclasses.field(
default_factory=_build_dict(MIN_LEVEL, MAX_LEVEL, 1.0))
path_scales: FPNConfig = dataclasses.field(
default_factory=_build_path_scales(MIN_LEVEL, MAX_LEVEL))
# Choose from v1, v2, iou and greedy.
nms_version: str = 'greedy'
iou_thresh: float = 0.001
nms_thresh: float = 0.6
max_boxes: int = 200
pre_nms_points: int = 5000
# Only works when nms_version='v2'.
use_class_agnostic_nms: Optional[bool] = False
@dataclasses.dataclass
class YoloLoss(hyperparams.Config):
ignore_thresh: FPNConfig = dataclasses.field(
default_factory=_build_dict(MIN_LEVEL, MAX_LEVEL, 0.0))
truth_thresh: FPNConfig = dataclasses.field(
default_factory=_build_dict(MIN_LEVEL, MAX_LEVEL, 1.0))
box_loss_type: FPNConfig = dataclasses.field(
default_factory=_build_dict(MIN_LEVEL, MAX_LEVEL, 'ciou'))
iou_normalizer: FPNConfig = dataclasses.field(
default_factory=_build_dict(MIN_LEVEL, MAX_LEVEL, 1.0))
cls_normalizer: FPNConfig = dataclasses.field(
default_factory=_build_dict(MIN_LEVEL, MAX_LEVEL, 1.0))
object_normalizer: FPNConfig = dataclasses.field(
default_factory=_build_dict(MIN_LEVEL, MAX_LEVEL, 1.0))
max_delta: FPNConfig = dataclasses.field(
default_factory=_build_dict(MIN_LEVEL, MAX_LEVEL, np.inf))
objectness_smooth: FPNConfig = dataclasses.field(
default_factory=_build_dict(MIN_LEVEL, MAX_LEVEL, 0.0))
label_smoothing: float = 0.0
use_scaled_loss: bool = True
update_on_repeat: bool = True
@dataclasses.dataclass
class Box(hyperparams.Config):
box: List[int] = dataclasses.field(default_factory=list)
@dataclasses.dataclass
class AnchorBoxes(hyperparams.Config):
boxes: Optional[List[Box]] = None
level_limits: Optional[List[int]] = None
anchors_per_scale: int = 3
generate_anchors: bool = False
scaling_mode: str = 'sqrt'
box_generation_mode: str = 'per_level'
num_samples: int = 1024
def get(self, min_level, max_level):
"""Distribute them in order to each level.
Args:
min_level: `int` the lowest output level.
max_level: `int` the heighest output level.
Returns:
anchors_per_level: A `Dict[List[int]]` of the anchor boxes for each level.
self.level_limits: A `List[int]` of the box size limits to link to each
level under anchor free conditions.
"""
if self.level_limits is None:
boxes = [box.box for box in self.boxes]
else:
boxes = [[1.0, 1.0]] * ((max_level - min_level) + 1)
self.anchors_per_scale = 1
anchors_per_level = dict()
start = 0
for i in range(min_level, max_level + 1):
anchors_per_level[str(i)] = boxes[start:start + self.anchors_per_scale]
start += self.anchors_per_scale
return anchors_per_level, self.level_limits
def set_boxes(self, boxes):
self.boxes = [Box(box=box) for box in boxes]
@dataclasses.dataclass
class Yolo(hyperparams.Config):
input_size: Optional[List[int]] = dataclasses.field(
default_factory=lambda: [512, 512, 3])
backbone: backbones.Backbone = dataclasses.field(
default_factory=lambda: backbones.Backbone( # pylint: disable=g-long-lambda
type='darknet', darknet=backbones.Darknet(model_id='cspdarknet53')
)
)
decoder: decoders.Decoder = dataclasses.field(
default_factory=lambda: decoders.Decoder( # pylint: disable=g-long-lambda
type='yolo_decoder',
yolo_decoder=decoders.YoloDecoder(version='v4', type='regular'),
)
)
head: YoloHead = dataclasses.field(default_factory=YoloHead)
detection_generator: YoloDetectionGenerator = dataclasses.field(
default_factory=YoloDetectionGenerator
)
loss: YoloLoss = dataclasses.field(default_factory=YoloLoss)
norm_activation: common.NormActivation = dataclasses.field(
default_factory=lambda: common.NormActivation( # pylint: disable=g-long-lambda
activation='mish',
use_sync_bn=True,
norm_momentum=0.99,
norm_epsilon=0.001,
)
)
num_classes: int = 80
anchor_boxes: AnchorBoxes = dataclasses.field(default_factory=AnchorBoxes)
darknet_based_model: bool = False
@dataclasses.dataclass
class YoloTask(cfg.TaskConfig):
per_category_metrics: bool = False
smart_bias_lr: float = 0.0
model: Yolo = dataclasses.field(default_factory=Yolo)
train_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=True)
)
validation_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=False)
)
weight_decay: float = 0.0
annotation_file: Optional[str] = None
init_checkpoint: Optional[str] = None
init_checkpoint_modules: Union[
str, List[str]] = 'all' # all, backbone, and/or decoder
gradient_clip_norm: float = 0.0
seed = GLOBAL_SEED
# Sets maximum number of boxes to be evaluated by coco eval api.
max_num_eval_detections: int = 100
COCO_INPUT_PATH_BASE = 'coco'
COCO_TRAIN_EXAMPLES = 118287
COCO_VAL_EXAMPLES = 5000
@exp_factory.register_config_factory('yolo')
def yolo() -> cfg.ExperimentConfig:
"""Yolo general config."""
return cfg.ExperimentConfig(
task=YoloTask(),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
@exp_factory.register_config_factory('yolo_darknet')
def yolo_darknet() -> cfg.ExperimentConfig:
"""COCO object detection with YOLOv3 and v4."""
train_batch_size = 256
eval_batch_size = 8
train_epochs = 300
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
validation_interval = 5
max_num_instances = 200
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=YoloTask(
smart_bias_lr=0.1,
init_checkpoint='',
init_checkpoint_modules='backbone',
annotation_file=None,
weight_decay=0.0,
model=Yolo(
darknet_based_model=True,
norm_activation=common.NormActivation(use_sync_bn=True),
head=YoloHead(smart_bias=True),
loss=YoloLoss(use_scaled_loss=False, update_on_repeat=True),
anchor_boxes=AnchorBoxes(
anchors_per_scale=3,
boxes=[
Box(box=[12, 16]),
Box(box=[19, 36]),
Box(box=[40, 28]),
Box(box=[36, 75]),
Box(box=[76, 55]),
Box(box=[72, 146]),
Box(box=[142, 110]),
Box(box=[192, 243]),
Box(box=[459, 401])
])),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
dtype='float32',
parser=Parser(
letter_box=False,
aug_rand_saturation=1.5,
aug_rand_brightness=1.5,
aug_rand_hue=0.1,
use_tie_breaker=True,
best_match_only=False,
anchor_thresh=0.4,
area_thresh=0.1,
max_num_instances=max_num_instances,
mosaic=Mosaic(
mosaic_frequency=0.75,
mixup_frequency=0.0,
mosaic_crop_mode='crop',
mosaic_center=0.2))),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size,
drop_remainder=True,
dtype='float32',
parser=Parser(
letter_box=False,
use_tie_breaker=True,
best_match_only=False,
anchor_thresh=0.4,
area_thresh=0.1,
max_num_instances=max_num_instances,
))),
trainer=cfg.TrainerConfig(
train_steps=train_epochs * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=validation_interval * steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'ema': {
'average_decay': 0.9998,
'trainable_weights_only': False,
'dynamic_decay': True,
},
'optimizer': {
'type': 'sgd_torch',
'sgd_torch': {
'momentum': 0.949,
'momentum_start': 0.949,
'nesterov': True,
'warmup_steps': 1000,
'weight_decay': 0.0005,
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
240 * steps_per_epoch
],
'values': [
0.00131 * train_batch_size / 64.0,
0.000131 * train_batch_size / 64.0,
]
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 1000,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('scaled_yolo')
def scaled_yolo() -> cfg.ExperimentConfig:
"""COCO object detection with YOLOv4-csp and v4."""
train_batch_size = 256
eval_batch_size = 256
train_epochs = 300
warmup_epochs = 3
validation_interval = 5
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
max_num_instances = 300
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=YoloTask(
smart_bias_lr=0.1,
init_checkpoint_modules='',
weight_decay=0.0,
annotation_file=None,
model=Yolo(
darknet_based_model=False,
norm_activation=common.NormActivation(
activation='mish',
use_sync_bn=True,
norm_epsilon=0.001,
norm_momentum=0.97),
head=YoloHead(smart_bias=True),
loss=YoloLoss(use_scaled_loss=True),
anchor_boxes=AnchorBoxes(
anchors_per_scale=3,
boxes=[
Box(box=[12, 16]),
Box(box=[19, 36]),
Box(box=[40, 28]),
Box(box=[36, 75]),
Box(box=[76, 55]),
Box(box=[72, 146]),
Box(box=[142, 110]),
Box(box=[192, 243]),
Box(box=[459, 401])
])),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
dtype='float32',
parser=Parser(
aug_rand_saturation=0.7,
aug_rand_brightness=0.4,
aug_rand_hue=0.015,
letter_box=True,
use_tie_breaker=True,
best_match_only=True,
anchor_thresh=4.0,
random_pad=False,
area_thresh=0.1,
max_num_instances=max_num_instances,
mosaic=Mosaic(
mosaic_crop_mode='scale',
mosaic_frequency=1.0,
mixup_frequency=0.0,
))),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size,
drop_remainder=False,
dtype='float32',
parser=Parser(
letter_box=True,
use_tie_breaker=True,
best_match_only=True,
anchor_thresh=4.0,
area_thresh=0.1,
max_num_instances=max_num_instances,
))),
trainer=cfg.TrainerConfig(
train_steps=train_epochs * steps_per_epoch,
validation_steps=20,
validation_interval=validation_interval * steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=5 * steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'ema': {
'average_decay': 0.9999,
'trainable_weights_only': False,
'dynamic_decay': True,
},
'optimizer': {
'type': 'sgd_torch',
'sgd_torch': {
'momentum': 0.937,
'momentum_start': 0.8,
'nesterov': True,
'warmup_steps': steps_per_epoch * warmup_epochs,
'weight_decay': 0.0005,
}
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate': 0.01,
'alpha': 0.2,
'decay_steps': train_epochs * steps_per_epoch,
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': steps_per_epoch * warmup_epochs,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
| 18,648 | 33.155678 | 85 | py |
models | models-master/official/projects/yolo/configs/darknet_classification.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image classification with darknet configs."""
import dataclasses
from typing import List, Optional
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.projects.yolo.configs import backbones
from official.vision.configs import common
from official.vision.configs import image_classification as imc
@dataclasses.dataclass
class ImageClassificationModel(hyperparams.Config):
"""Image classification model config."""
num_classes: int = 0
input_size: List[int] = dataclasses.field(default_factory=lambda: [224, 224])
backbone: backbones.Backbone = backbones.Backbone(
type='darknet', darknet=backbones.Darknet())
dropout_rate: float = 0.0
norm_activation: common.NormActivation = common.NormActivation()
# Adds a Batch Normalization layer pre-GlobalAveragePooling in classification.
add_head_batch_norm: bool = False
kernel_initializer: str = 'VarianceScaling'
@dataclasses.dataclass
class Losses(hyperparams.Config):
one_hot: bool = True
label_smoothing: float = 0.0
l2_weight_decay: float = 0.0
loss_weight: float = 1.0
soft_labels: bool = False
use_binary_cross_entropy: bool = False
@dataclasses.dataclass
class ImageClassificationTask(cfg.TaskConfig):
"""The model config."""
model: ImageClassificationModel = ImageClassificationModel()
train_data: imc.DataConfig = imc.DataConfig(is_training=True)
validation_data: imc.DataConfig = imc.DataConfig(is_training=False)
evaluation: imc.Evaluation = imc.Evaluation()
losses: Losses = Losses()
gradient_clip_norm: float = 0.0
logging_dir: Optional[str] = None
freeze_backbone: bool = False
@exp_factory.register_config_factory('darknet_classification')
def darknet_classification() -> cfg.ExperimentConfig:
"""Image classification general."""
return cfg.ExperimentConfig(
task=ImageClassificationTask(),
trainer=cfg.TrainerConfig(
optimizer_config=optimization.OptimizationConfig({
'optimizer': {'type': 'sgd', 'sgd': {'momentum': 0.9}},
'learning_rate': {
'type': 'polynomial',
'initial_learning_rate': 0.1,
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_learning_rate': 0,
},
},
})
),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
],
)
| 3,212 | 34.307692 | 80 | py |
models | models-master/official/projects/yolo/configs/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/yolo/configs/backbones.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Backbones configurations."""
import dataclasses
from official.modeling import hyperparams
from official.vision.configs import backbones
@dataclasses.dataclass
class Darknet(hyperparams.Config):
"""DarkNet config."""
model_id: str = 'cspdarknet53'
width_scale: float = 1.0
depth_scale: float = 1.0
dilate: bool = False
min_level: int = 3
max_level: int = 5
use_separable_conv: bool = False
use_reorg_input: bool = False
@dataclasses.dataclass
class YoloV7(hyperparams.Config):
model_id: str = 'yolov7'
min_level: int = 3
max_level: int = 5
@dataclasses.dataclass
class Backbone(backbones.Backbone):
darknet: Darknet = dataclasses.field(default_factory=Darknet)
yolov7: YoloV7 = dataclasses.field(default_factory=YoloV7)
| 1,365 | 29.355556 | 74 | py |
models | models-master/official/projects/yolo/configs/decoders.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decoders configurations."""
import dataclasses
from typing import Optional
from official.modeling import hyperparams
from official.vision.configs import decoders
@dataclasses.dataclass
class YoloDecoder(hyperparams.Config):
"""Builds Yolo decoder.
If the name is specified, or version is specified we ignore input parameters
and use version and name defaults.
"""
version: Optional[str] = None
type: Optional[str] = None
use_fpn: Optional[bool] = None
use_spatial_attention: bool = False
use_separable_conv: bool = False
csp_stack: Optional[bool] = None
fpn_depth: Optional[int] = None
max_fpn_depth: Optional[int] = None
max_csp_stack: Optional[int] = None
fpn_filter_scale: Optional[int] = None
path_process_len: Optional[int] = None
max_level_process_len: Optional[int] = None
embed_spp: Optional[bool] = None
activation: Optional[str] = 'same'
@dataclasses.dataclass
class YoloV7(hyperparams.Config):
model_id: str = 'yolov7'
use_separable_conv: bool = False
@dataclasses.dataclass
class Decoder(decoders.Decoder):
type: Optional[str] = 'yolo_decoder'
yolo_decoder: YoloDecoder = dataclasses.field(default_factory=YoloDecoder)
yolov7: YoloV7 = dataclasses.field(default_factory=YoloV7)
| 1,855 | 32.142857 | 78 | py |
models | models-master/official/projects/yolo/dataloaders/tf_example_decoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow Example proto decoder for object detection.
A decoder to decode string tensors containing serialized tensorflow.Example
protos for object detection.
"""
import tensorflow as tf
from official.vision.dataloaders import tf_example_decoder
def _coco91_to_80(classif, box, areas, iscrowds):
"""Function used to reduce COCO 91 to COCO 80 (2017 to 2014 format)."""
# Vector where index i coralates to the class at index[i].
class_ids = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85,
86, 87, 88, 89, 90
]
new_classes = tf.expand_dims(tf.convert_to_tensor(class_ids), axis=0)
# Resahpe the classes to in order to build a class mask.
classes = tf.expand_dims(classif, axis=-1)
# One hot the classificiations to match the 80 class format.
ind = classes == tf.cast(new_classes, classes.dtype)
# Select the max values.
selected_class = tf.reshape(
tf.math.argmax(tf.cast(ind, tf.float32), axis=-1), [-1])
ind = tf.where(tf.reduce_any(ind, axis=-1))
# Gather the valuable instances.
classif = tf.gather_nd(selected_class, ind)
box = tf.gather_nd(box, ind)
areas = tf.gather_nd(areas, ind)
iscrowds = tf.gather_nd(iscrowds, ind)
# Restate the number of viable detections, ideally it should be the same.
num_detections = tf.shape(classif)[0]
return classif, box, areas, iscrowds, num_detections
class TfExampleDecoder(tf_example_decoder.TfExampleDecoder):
"""Tensorflow Example proto decoder."""
def __init__(self,
coco91_to_80=None,
include_mask=False,
regenerate_source_id=False,
mask_binarize_threshold=None):
"""Initialize the example decoder.
Args:
coco91_to_80: `bool` indicating whether to convert coco from its 91 class
format to the 80 class format.
include_mask: `bool` indicating if the decoder should also decode instance
masks for instance segmentation.
regenerate_source_id: `bool` indicating if the source id needs to be
recreated for each image sample.
mask_binarize_threshold: `float` for binarizing mask values.
"""
if coco91_to_80 and include_mask:
raise ValueError('If masks are included you cannot convert coco from the'
'91 class format to the 80 class format.')
self._coco91_to_80 = coco91_to_80
super().__init__(
include_mask=include_mask,
regenerate_source_id=regenerate_source_id,
mask_binarize_threshold=mask_binarize_threshold)
def decode(self, serialized_example):
"""Decode the serialized example.
Args:
serialized_example: a single serialized tf.Example string.
Returns:
decoded_tensors: a dictionary of tensors with the following fields:
- source_id: a string scalar tensor.
- image: a uint8 tensor of shape [None, None, 3].
- height: an integer scalar tensor.
- width: an integer scalar tensor.
- groundtruth_classes: a int64 tensor of shape [None].
- groundtruth_is_crowd: a bool tensor of shape [None].
- groundtruth_area: a float32 tensor of shape [None].
- groundtruth_boxes: a float32 tensor of shape [None, 4].
- groundtruth_instance_masks: a float32 tensor of shape
[None, None, None].
- groundtruth_instance_masks_png: a string tensor of shape [None].
"""
decoded_tensors = super().decode(serialized_example)
if self._coco91_to_80:
(decoded_tensors['groundtruth_classes'],
decoded_tensors['groundtruth_boxes'],
decoded_tensors['groundtruth_area'],
decoded_tensors['groundtruth_is_crowd'],
_) = _coco91_to_80(decoded_tensors['groundtruth_classes'],
decoded_tensors['groundtruth_boxes'],
decoded_tensors['groundtruth_area'],
decoded_tensors['groundtruth_is_crowd'])
return decoded_tensors
| 4,810 | 39.091667 | 80 | py |
models | models-master/official/projects/yolo/dataloaders/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 610 | 37.1875 | 74 | py |
models | models-master/official/projects/yolo/dataloaders/yolo_input.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Detection Data parser and processing for YOLO."""
import tensorflow as tf
from official.projects.yolo.ops import anchor
from official.projects.yolo.ops import preprocessing_ops
from official.vision.dataloaders import parser
from official.vision.dataloaders import utils
from official.vision.ops import box_ops as bbox_ops
from official.vision.ops import preprocess_ops
class Parser(parser.Parser):
"""Parse the dataset in to the YOLO model format."""
def __init__(self,
output_size,
anchors,
expanded_strides,
level_limits=None,
max_num_instances=200,
area_thresh=0.1,
aug_rand_hue=1.0,
aug_rand_saturation=1.0,
aug_rand_brightness=1.0,
letter_box=False,
random_pad=True,
random_flip=True,
jitter=0.0,
aug_scale_min=1.0,
aug_scale_max=1.0,
aug_rand_translate=0.0,
aug_rand_perspective=0.0,
aug_rand_angle=0.0,
anchor_t=4.0,
scale_xy=None,
best_match_only=False,
darknet=False,
use_tie_breaker=True,
dtype='float32',
seed=None):
"""Initializes parameters for parsing annotations in the dataset.
Args:
output_size: `Tensor` or `List` for [height, width] of output image. The
output_size should be divided by the largest feature stride 2^max_level.
anchors: `Dict[List[Union[int, float]]]` of anchor boxes to be bes used in
each level.
expanded_strides: `Dict[int]` for how much the model scales down the
images at the largest level. For example, level 3 down samples the image
by a factor of 16, in the expanded strides dictionary, we will pass
along {3: 16} indicating that relative to the original image, the shapes
must be reduced by a factor of 16 to compute the loss.
level_limits: `List` the box sizes that will be allowed at each FPN level
as is done in the FCOS and YOLOX paper for anchor free box assignment.
max_num_instances: `int` for the number of boxes to compute loss on.
area_thresh: `float` for the minimum area of a box to allow to pass
through for optimization.
aug_rand_hue: `float` indicating the maximum scaling value for hue.
saturation will be scaled between 1 - value and 1 + value.
aug_rand_saturation: `float` indicating the maximum scaling value for
saturation. saturation will be scaled between 1/value and value.
aug_rand_brightness: `float` indicating the maximum scaling value for
brightness. brightness will be scaled between 1/value and value.
letter_box: `boolean` indicating whether upon start of the data pipeline
regardless of the preprocessing ops that are used, the aspect ratio of
the images should be preserved.
random_pad: `bool` indiccating wether to use padding to apply random
translation, true for darknet yolo false for scaled yolo.
random_flip: `boolean` indicating whether or not to randomly flip the
image horizontally.
jitter: `float` for the maximum change in aspect ratio expected in each
preprocessing step.
aug_scale_min: `float` indicating the minimum scaling value for image
scale jitter.
aug_scale_max: `float` indicating the maximum scaling value for image
scale jitter.
aug_rand_translate: `float` ranging from 0 to 1 indicating the maximum
amount to randomly translate an image.
aug_rand_perspective: `float` ranging from 0.000 to 0.001 indicating how
much to prespective warp the image.
aug_rand_angle: `float` indicating the maximum angle value for angle.
angle will be changes between 0 and value.
anchor_t: `float` indicating the threshold over which an anchor will be
considered for prediction, at zero, all the anchors will be used and at
1.0 only the best will be used. for anchor thresholds larger than 1.0 we
stop using the IOU for anchor comparison and resort directly to
comparing the width and height, this is used for the scaled models.
scale_xy: dictionary `float` values inidcating how far each pixel can see
outside of its containment of 1.0. a value of 1.2 indicates there is a
20% extended radius around each pixel that this specific pixel can
predict values for a center at. the center can range from 0 - value/2 to
1 + value/2, this value is set in the yolo filter, and resused here.
there should be one value for scale_xy for each level from min_level to
max_level.
best_match_only: `boolean` indicating how boxes are selected for
optimization.
darknet: `boolean` indicating which data pipeline to use. Setting to True
swaps the pipeline to output images realtive to Yolov4 and older.
use_tie_breaker: `boolean` indicating whether to use the anchor threshold
value.
dtype: `str` indicating the output datatype of the datapipeline selecting
from {"float32", "float16", "bfloat16"}.
seed: `int` the seed for random number generation.
"""
for key in anchors:
# Assert that the width and height is viable
assert output_size[1] % expanded_strides[str(key)] == 0
assert output_size[0] % expanded_strides[str(key)] == 0
# Set the width and height properly and base init:
self._image_w = output_size[1]
self._image_h = output_size[0]
self._max_num_instances = max_num_instances
# Image scaling params
self._jitter = 0.0 if jitter is None else jitter
self._aug_scale_min = aug_scale_min
self._aug_scale_max = aug_scale_max
self._aug_rand_translate = aug_rand_translate
self._aug_rand_perspective = aug_rand_perspective
# Image spatial distortion
self._random_flip = random_flip
self._letter_box = letter_box
self._random_pad = random_pad
self._aug_rand_angle = aug_rand_angle
# Color space distortion of the image
self._aug_rand_saturation = aug_rand_saturation
self._aug_rand_brightness = aug_rand_brightness
self._aug_rand_hue = aug_rand_hue
# Set the per level values needed for operation
self._darknet = darknet
self._area_thresh = area_thresh
self._level_limits = level_limits
self._seed = seed
self._dtype = dtype
self._label_builder = anchor.YoloAnchorLabeler(
anchors=anchors,
anchor_free_level_limits=level_limits,
level_strides=expanded_strides,
center_radius=scale_xy,
max_num_instances=max_num_instances,
match_threshold=anchor_t,
best_matches_only=best_match_only,
use_tie_breaker=use_tie_breaker,
darknet=darknet,
dtype=dtype)
def _pad_infos_object(self, image):
"""Get a Tensor to pad the info object list."""
shape_ = tf.shape(image)
val = tf.stack([
tf.cast(shape_[:2], tf.float32),
tf.cast(shape_[:2], tf.float32),
tf.ones_like(tf.cast(shape_[:2], tf.float32)),
tf.zeros_like(tf.cast(shape_[:2], tf.float32)),
])
return val
def _jitter_scale(self, image, shape, letter_box, jitter, random_pad,
aug_scale_min, aug_scale_max, translate, angle,
perspective):
"""Distort and scale each input image."""
infos = []
if (aug_scale_min != 1.0 or aug_scale_max != 1.0):
crop_only = True
# jitter gives you only one info object, resize and crop gives you one,
# if crop only then there can be 1 form jitter and 1 from crop
infos.append(self._pad_infos_object(image))
else:
crop_only = False
image, crop_info, _ = preprocessing_ops.resize_and_jitter_image(
image,
shape,
letter_box=letter_box,
jitter=jitter,
crop_only=crop_only,
random_pad=random_pad,
seed=self._seed,
)
infos.extend(crop_info)
image, _, affine = preprocessing_ops.affine_warp_image(
image,
shape,
scale_min=aug_scale_min,
scale_max=aug_scale_max,
translate=translate,
degrees=angle,
perspective=perspective,
random_pad=random_pad,
seed=self._seed,
)
return image, infos, affine
def _parse_train_data(self, data):
"""Parses data for training."""
# Initialize the shape constants.
image = data['image']
boxes = data['groundtruth_boxes']
classes = data['groundtruth_classes']
if self._random_flip:
# Randomly flip the image horizontally.
image, boxes, _ = preprocess_ops.random_horizontal_flip(
image, boxes, seed=self._seed)
if not data['is_mosaic']:
image, infos, affine = self._jitter_scale(
image, [self._image_h, self._image_w], self._letter_box, self._jitter,
self._random_pad, self._aug_scale_min, self._aug_scale_max,
self._aug_rand_translate, self._aug_rand_angle,
self._aug_rand_perspective)
# Clip and clean boxes.
boxes, inds = preprocessing_ops.transform_and_clip_boxes(
boxes,
infos,
affine=affine,
shuffle_boxes=False,
area_thresh=self._area_thresh,
filter_and_clip_boxes=True,
seed=self._seed)
classes = tf.gather(classes, inds)
info = infos[-1]
else:
image = tf.image.resize(
image, (self._image_h, self._image_w), method='nearest')
output_size = tf.cast([self._image_h, self._image_w], tf.float32)
boxes_ = bbox_ops.denormalize_boxes(boxes, output_size)
inds = bbox_ops.get_non_empty_box_indices(boxes_)
boxes = tf.gather(boxes, inds)
classes = tf.gather(classes, inds)
info = self._pad_infos_object(image)
# Apply scaling to the hue saturation and brightness of an image.
image = tf.cast(image, dtype=self._dtype)
image = image / 255.0
image = preprocessing_ops.image_rand_hsv(
image,
self._aug_rand_hue,
self._aug_rand_saturation,
self._aug_rand_brightness,
seed=self._seed,
darknet=self._darknet or self._level_limits is not None)
# Cast the image to the selcted datatype.
image, labels = self._build_label(
image, boxes, classes, info, inds, data, is_training=True)
return image, labels
def _parse_eval_data(self, data):
"""Parses data for evaluation."""
# Get the image shape constants and cast the image to the selcted datatype.
image = tf.cast(data['image'], dtype=self._dtype)
boxes = data['groundtruth_boxes']
classes = data['groundtruth_classes']
image, infos, _ = preprocessing_ops.resize_and_jitter_image(
image, [self._image_h, self._image_w],
letter_box=self._letter_box,
random_pad=False,
shiftx=0.5,
shifty=0.5,
jitter=0.0)
# Clip and clean boxes.
image = image / 255.0
boxes, inds = preprocessing_ops.transform_and_clip_boxes(
boxes, infos, shuffle_boxes=False, area_thresh=0.0,
filter_and_clip_boxes=False)
classes = tf.gather(classes, inds)
info = infos[-1]
image, labels = self._build_label(
image, boxes, classes, info, inds, data, is_training=False)
return image, labels
def set_shape(self, values, pad_axis=0, pad_value=0, inds=None):
"""Calls set shape for all input objects."""
if inds is not None:
values = tf.gather(values, inds)
vshape = values.get_shape().as_list()
values = preprocessing_ops.pad_max_instances(
values, self._max_num_instances, pad_axis=pad_axis, pad_value=pad_value)
vshape[pad_axis] = self._max_num_instances
values.set_shape(vshape)
return values
def _build_label(self,
image,
gt_boxes,
gt_classes,
info,
inds,
data,
is_training=True):
"""Label construction for both the train and eval data."""
width = self._image_w
height = self._image_h
# Set the image shape.
imshape = image.get_shape().as_list()
imshape[-1] = 3
image.set_shape(imshape)
labels = dict()
(labels['inds'], labels['upds'],
labels['true_conf']) = self._label_builder(gt_boxes, gt_classes, width,
height)
# Set/fix the boxes shape.
boxes = self.set_shape(gt_boxes, pad_axis=0, pad_value=0)
classes = self.set_shape(gt_classes, pad_axis=0, pad_value=-1)
# Build the dictionary set.
labels.update({
'source_id': utils.process_source_id(data['source_id']),
'bbox': tf.cast(boxes, dtype=self._dtype),
'classes': tf.cast(classes, dtype=self._dtype),
# For OTA loss.
'image_info': info,
})
# Update the labels dictionary.
if not is_training:
# Sets up groundtruth data for evaluation.
groundtruths = {
'source_id': labels['source_id'],
'height': data['height'],
'width': data['width'],
'num_detections': tf.shape(data['groundtruth_boxes'])[0],
'image_info': info,
'boxes': bbox_ops.denormalize_boxes(
data['groundtruth_boxes'],
tf.cast([data['height'], data['width']], gt_boxes.dtype)),
'classes': data['groundtruth_classes'],
'areas': data['groundtruth_area'],
'is_crowds': tf.cast(
tf.gather(data['groundtruth_is_crowd'], inds), tf.int32),
}
groundtruths['source_id'] = utils.process_source_id(
groundtruths['source_id'])
groundtruths = utils.pad_groundtruths_to_fixed_size(
groundtruths, self._max_num_instances)
labels['groundtruths'] = groundtruths
return image, labels
| 14,606 | 38.585366 | 80 | py |
models | models-master/official/projects/yolo/dataloaders/classification_input.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classification decoder and parser."""
import tensorflow as tf
from official.vision.dataloaders import classification_input
from official.vision.ops import preprocess_ops
class Parser(classification_input.Parser):
"""Parser to parse an image and its annotations into a dictionary of tensors."""
def _parse_train_image(self, decoded_tensors):
"""Parses image data for training."""
image_bytes = decoded_tensors[self._image_field_key]
if self._decode_jpeg_only:
image_shape = tf.image.extract_jpeg_shape(image_bytes)
# Crops image.
cropped_image = preprocess_ops.random_crop_image_v2(
image_bytes, image_shape)
image = tf.cond(
tf.reduce_all(tf.equal(tf.shape(cropped_image), image_shape)),
lambda: preprocess_ops.center_crop_image_v2(image_bytes, image_shape),
lambda: cropped_image)
else:
# Decodes image.
image = tf.io.decode_image(image_bytes, channels=3)
image.set_shape([None, None, 3])
# Crops image.
cropped_image = preprocess_ops.random_crop_image(image)
image = tf.cond(
tf.reduce_all(tf.equal(tf.shape(cropped_image), tf.shape(image))),
lambda: preprocess_ops.center_crop_image(image),
lambda: cropped_image)
if self._aug_rand_hflip:
image = tf.image.random_flip_left_right(image)
# Resizes image.
image = tf.image.resize(
image, self._output_size, method=tf.image.ResizeMethod.BILINEAR)
image.set_shape([self._output_size[0], self._output_size[1], 3])
# Apply autoaug or randaug.
if self._augmenter is not None:
image = self._augmenter.distort(image)
# Convert image to self._dtype.
image = tf.image.convert_image_dtype(image, self._dtype)
image = image / 255.0
return image
def _parse_eval_image(self, decoded_tensors):
"""Parses image data for evaluation."""
image_bytes = decoded_tensors[self._image_field_key]
if self._decode_jpeg_only:
image_shape = tf.image.extract_jpeg_shape(image_bytes)
# Center crops.
image = preprocess_ops.center_crop_image_v2(image_bytes, image_shape)
else:
# Decodes image.
image = tf.io.decode_image(image_bytes, channels=3)
image.set_shape([None, None, 3])
# Center crops.
image = preprocess_ops.center_crop_image(image)
image = tf.image.resize(
image, self._output_size, method=tf.image.ResizeMethod.BILINEAR)
image.set_shape([self._output_size[0], self._output_size[1], 3])
# Convert image to self._dtype.
image = tf.image.convert_image_dtype(image, self._dtype)
image = image / 255.0
return image
| 3,279 | 34.268817 | 82 | py |
models | models-master/official/projects/yolo/modeling/factory_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for factory.py."""
import numpy as np
import tensorflow as tf
# pylint: disable=unused-import
from official.projects.yolo.configs import backbones
from official.projects.yolo.configs import yolo
from official.projects.yolo.configs import yolov7
from official.projects.yolo.modeling import factory
from official.projects.yolo.modeling.backbones import darknet
from official.projects.yolo.modeling.backbones import yolov7 as yolov7_backbone
from official.projects.yolo.modeling.decoders import yolo_decoder
from official.projects.yolo.modeling.decoders import yolov7 as yolov7_decoder
from official.projects.yolo.modeling.heads import yolo_head as heads
from official.projects.yolo.modeling.heads import yolov7_head
from official.projects.yolo.modeling.layers import detection_generator
# pylint: enable=unused-import
class FactoryTest(tf.test.TestCase):
def test_yolo_builder(self):
num_classes = 3
input_size = 640
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size, input_size, 3])
model_config = yolo.Yolo(
num_classes=num_classes,
head=yolo.YoloHead(smart_bias=True),
anchor_boxes=yolo.AnchorBoxes(
anchors_per_scale=3,
boxes=[
yolo.Box(box=[12, 16]),
yolo.Box(box=[19, 36]),
yolo.Box(box=[40, 28]),
yolo.Box(box=[36, 75]),
yolo.Box(box=[76, 55]),
yolo.Box(box=[72, 146]),
yolo.Box(box=[142, 110]),
yolo.Box(box=[192, 243]),
yolo.Box(box=[459, 401])
]))
l2_regularizer = tf.keras.regularizers.l2(5e-5)
yolo_model, _ = factory.build_yolo(
input_specs=input_specs,
model_config=model_config,
l2_regularization=l2_regularizer)
# Do forward pass.
inputs = np.random.rand(2, input_size, input_size, 3)
_ = yolo_model(inputs)
def test_yolov7_builder(self):
num_classes = 3
input_size = 640
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size, input_size, 3]
)
model_config = yolov7.YoloV7(
num_classes=num_classes,
head=yolov7.YoloV7Head(),
anchor_boxes=yolo.AnchorBoxes(
anchors_per_scale=3,
boxes=[
yolo.Box(box=[12, 16]),
yolo.Box(box=[19, 36]),
yolo.Box(box=[40, 28]),
yolo.Box(box=[36, 75]),
yolo.Box(box=[76, 55]),
yolo.Box(box=[72, 146]),
yolo.Box(box=[142, 110]),
yolo.Box(box=[192, 243]),
yolo.Box(box=[459, 401]),
],
),
)
l2_regularizer = tf.keras.regularizers.l2(5e-5)
yolo_model = factory.build_yolov7(
input_specs=input_specs,
model_config=model_config,
l2_regularization=l2_regularizer,
)
# Do forward pass.
inputs = np.random.rand(2, input_size, input_size, 3)
_ = yolo_model(inputs)
if __name__ == '__main__':
tf.test.main()
| 3,665 | 32.944444 | 79 | py |
models | models-master/official/projects/yolo/modeling/yolov7_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YOLOv7 models."""
from typing import Mapping, Union, Any, Dict
from absl import logging
import tensorflow as tf
from official.projects.yolo.modeling.layers import nn_blocks
class YoloV7(tf.keras.Model):
"""The YOLOv7 model class."""
def __init__(self, backbone, decoder, head, detection_generator, **kwargs):
"""Detection initialization function.
Args:
backbone: `tf.keras.Model` a backbone network.
decoder: `tf.keras.Model` a decoder network.
head: `RetinaNetHead`, the RetinaNet head.
detection_generator: the detection generator.
**kwargs: keyword arguments to be passed.
"""
super().__init__(**kwargs)
self._config_dict = {
'backbone': backbone,
'decoder': decoder,
'head': head,
'detection_generator': detection_generator
}
# model components
self._backbone = backbone
self._decoder = decoder
self._head = head
self._detection_generator = detection_generator
self._fused = False
return
def call(self,
inputs: tf.Tensor,
training: bool = None,
mask: Any = None) -> Dict[str, tf.Tensor]:
backbone_outputs = self.backbone(inputs)
decoder_outputs = self.decoder(backbone_outputs)
raw_outputs = self.head(decoder_outputs)
if training:
return {'raw_output': raw_outputs}
else:
# Post-processing.
predictions = self.detection_generator(raw_outputs)
predictions.update({'raw_output': raw_outputs})
return predictions
@property
def backbone(self):
return self._backbone
@property
def decoder(self):
return self._decoder
@property
def head(self):
return self._head
@property
def detection_generator(self):
return self._detection_generator
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
@property
def checkpoint_items(
self) -> Mapping[str, Union[tf.keras.Model, tf.keras.layers.Layer]]:
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(backbone=self.backbone, head=self.head)
if self.decoder is not None:
items.update(decoder=self.decoder)
return items
def fuse(self):
"""Performs re-parameterization on ConvBN and RepConv layers."""
logging.info('Fusing ConvBN and RepConv layers.')
if not self._fused:
self._fused = True
for layer in self.submodules:
if isinstance(layer, (nn_blocks.ConvBN, nn_blocks.RepConv)):
layer.fuse()
self.summary()
return
| 3,205 | 28.145455 | 77 | py |
models | models-master/official/projects/yolo/modeling/factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains common factory functions yolo neural networks."""
from absl import logging
from official.projects.yolo.configs import yolo
from official.projects.yolo.modeling import yolo_model
from official.projects.yolo.modeling import yolov7_model
from official.projects.yolo.modeling.heads import yolo_head
from official.projects.yolo.modeling.heads import yolov7_head
from official.projects.yolo.modeling.layers import detection_generator
from official.vision.modeling.backbones import factory as backbone_factory
from official.vision.modeling.decoders import factory as decoder_factory
def build_yolo_detection_generator(model_config: yolo.Yolo, anchor_boxes):
"""Builds yolo detection generator."""
model = detection_generator.YoloLayer(
classes=model_config.num_classes,
anchors=anchor_boxes,
iou_thresh=model_config.detection_generator.iou_thresh,
nms_thresh=model_config.detection_generator.nms_thresh,
max_boxes=model_config.detection_generator.max_boxes,
pre_nms_points=model_config.detection_generator.pre_nms_points,
nms_version=model_config.detection_generator.nms_version,
box_type=model_config.detection_generator.box_type.get(),
path_scale=model_config.detection_generator.path_scales.get(),
scale_xy=model_config.detection_generator.scale_xy.get(),
label_smoothing=model_config.loss.label_smoothing,
use_scaled_loss=model_config.loss.use_scaled_loss,
update_on_repeat=model_config.loss.update_on_repeat,
truth_thresh=model_config.loss.truth_thresh.get(),
loss_type=model_config.loss.box_loss_type.get(),
max_delta=model_config.loss.max_delta.get(),
iou_normalizer=model_config.loss.iou_normalizer.get(),
cls_normalizer=model_config.loss.cls_normalizer.get(),
object_normalizer=model_config.loss.object_normalizer.get(),
ignore_thresh=model_config.loss.ignore_thresh.get(),
objectness_smooth=model_config.loss.objectness_smooth.get(),
use_class_agnostic_nms=model_config.detection_generator.use_class_agnostic_nms,
)
return model
def build_yolo_head(input_specs, model_config: yolo.Yolo, l2_regularization):
"""Builds yolo head."""
min_level = min(map(int, input_specs.keys()))
max_level = max(map(int, input_specs.keys()))
head = yolo_head.YoloHead(
min_level=min_level,
max_level=max_level,
classes=model_config.num_classes,
boxes_per_level=model_config.anchor_boxes.anchors_per_scale,
norm_momentum=model_config.norm_activation.norm_momentum,
norm_epsilon=model_config.norm_activation.norm_epsilon,
kernel_regularizer=l2_regularization,
smart_bias=model_config.head.smart_bias)
return head
def build_yolo(input_specs, model_config, l2_regularization):
"""Builds yolo model."""
backbone = model_config.backbone.get()
anchor_dict, _ = model_config.anchor_boxes.get(
backbone.min_level, backbone.max_level)
backbone = backbone_factory.build_backbone(input_specs, model_config.backbone,
model_config.norm_activation,
l2_regularization)
decoder = decoder_factory.build_decoder(backbone.output_specs, model_config,
l2_regularization)
head = build_yolo_head(decoder.output_specs, model_config, l2_regularization)
detection_generator_obj = build_yolo_detection_generator(model_config,
anchor_dict)
model = yolo_model.Yolo(
backbone=backbone,
decoder=decoder,
head=head,
detection_generator=detection_generator_obj)
losses = detection_generator_obj.get_losses()
return model, losses
def build_yolov7_detection_generator(model_config: yolo.Yolo, anchor_boxes):
"""Builds yolo detection generator."""
model = detection_generator.YoloLayer(
classes=model_config.num_classes,
anchors=anchor_boxes,
iou_thresh=model_config.detection_generator.iou_thresh,
nms_thresh=model_config.detection_generator.nms_thresh,
max_boxes=model_config.detection_generator.max_boxes,
pre_nms_points=model_config.detection_generator.pre_nms_points,
nms_version=model_config.detection_generator.nms_version,
box_type=model_config.detection_generator.box_type.get(),
path_scale=model_config.detection_generator.path_scales.get(),
scale_xy=model_config.detection_generator.scale_xy.get(),
use_class_agnostic_nms=model_config.detection_generator.use_class_agnostic_nms,
)
return model
def build_yolov7(input_specs, model_config, l2_regularization):
"""Builds yolov7 model."""
norm_activation_config = model_config.norm_activation
backbone = backbone_factory.build_backbone(
input_specs,
model_config.backbone,
norm_activation_config,
l2_regularization,
)
decoder = decoder_factory.build_decoder(
backbone.output_specs,
model_config,
l2_regularization,
)
decoder_output_specs = decoder.output_specs
min_level = min(map(int, decoder_output_specs.keys()))
max_level = max(map(int, decoder_output_specs.keys()))
if min_level != model_config.min_level:
logging.warning(
(
'The `min_level` does not match! Expects min_level=%d but got '
'min_level=%d. Expected value will be used.'
),
min_level,
model_config.min_level,
)
if max_level != model_config.max_level:
logging.warning(
(
'The `max_level` does not match! Expects max_level=%d but got'
'max_level=%d. Expected value will be used.'
),
max_level,
model_config.max_level,
)
anchor_dict, _ = model_config.anchor_boxes.get(min_level, max_level)
num_anchors = len(anchor_dict[str(min_level)])
head = yolov7_head.YoloV7DetectionHead(
model_config.num_classes,
min_level,
max_level,
num_anchors,
kernel_regularizer=l2_regularization,
use_separable_conv=model_config.head.use_separable_conv,
)
# YOLOv7 and YOLOv4 share the same detection generator.
detection_generator_obj = build_yolov7_detection_generator(
model_config, anchor_dict
)
model = yolov7_model.YoloV7(
backbone, decoder, head, detection_generator=detection_generator_obj
)
return model
| 6,972 | 39.540698 | 85 | py |
models | models-master/official/projects/yolo/modeling/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/yolo/modeling/yolo_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Yolo models."""
from typing import Mapping, Union, Any, Dict
import tensorflow as tf
from official.projects.yolo.modeling.layers import nn_blocks
class Yolo(tf.keras.Model):
"""The YOLO model class."""
def __init__(self,
backbone,
decoder,
head,
detection_generator,
**kwargs):
"""Detection initialization function.
Args:
backbone: `tf.keras.Model` a backbone network.
decoder: `tf.keras.Model` a decoder network.
head: `RetinaNetHead`, the RetinaNet head.
detection_generator: the detection generator.
**kwargs: keyword arguments to be passed.
"""
super(Yolo, self).__init__(**kwargs)
self._config_dict = {
'backbone': backbone,
'decoder': decoder,
'head': head,
'detection_generator': detection_generator
}
# model components
self._backbone = backbone
self._decoder = decoder
self._head = head
self._detection_generator = detection_generator
self._fused = False
def call(self,
inputs: tf.Tensor,
training: bool = None,
mask: Any = None) -> Dict[str, tf.Tensor]:
maps = self.backbone(inputs)
decoded_maps = self.decoder(maps)
raw_predictions = self.head(decoded_maps)
if training:
return {'raw_output': raw_predictions}
else:
# Post-processing.
predictions = self.detection_generator(raw_predictions)
predictions.update({'raw_output': raw_predictions})
return predictions
@property
def backbone(self):
return self._backbone
@property
def decoder(self):
return self._decoder
@property
def head(self):
return self._head
@property
def detection_generator(self):
return self._detection_generator
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
@property
def checkpoint_items(
self) -> Mapping[str, Union[tf.keras.Model, tf.keras.layers.Layer]]:
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(backbone=self.backbone, head=self.head)
if self.decoder is not None:
items.update(decoder=self.decoder)
return items
def fuse(self):
"""Fuses all Convolution and Batchnorm layers to get better latency."""
print('Fusing Conv Batch Norm Layers.')
if not self._fused:
self._fused = True
for layer in self.submodules:
if isinstance(layer, nn_blocks.ConvBN):
layer.fuse()
self.summary()
return
| 3,210 | 27.415929 | 75 | py |
models | models-master/official/projects/yolo/modeling/decoders/yolov7.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains decoder architectures for YOLOv7 families.
The models are built with ELAN and E-ELAN.
ELAN was proposed in:
[1] Wang, Chien-Yao and Liao, Hong-Yuan Mark and Yeh, I-Hau
Designing Network Design Strategies Through Gradient Path Analysis
arXiv:2211.04800
E-ELAN is proposed in YOLOv7 paper:
[1] Wang, Chien-Yao and Bochkovskiy, Alexey and Liao, Hong-Yuan Mark
YOLOv7: Trainable bag-of-freebies sets new state-of-the-art for real-time
object detectors
arXiv:2207.02696
"""
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.yolo.modeling.layers import nn_blocks
from official.projects.yolo.ops import initializer_ops
from official.vision.modeling.decoders import factory
# Required block functions for YOLOv7 decoder familes.
_BLOCK_FNS = {
'convbn': nn_blocks.ConvBN,
'upsample2d': tf.keras.layers.UpSampling2D,
'maxpool2d': tf.keras.layers.MaxPooling2D,
'concat': tf.keras.layers.Concatenate,
'sppcspc': nn_blocks.SPPCSPC,
'repconv': nn_blocks.RepConv,
}
# Names for key arguments needed by each block function.
# Note that for field `from`, it can be either an integer or a str. Use of int
# means that the previous layer comes from a decoder intermediate output, while
# str means that the previous layer comes from the backbone output at a specific
# level.
_BLOCK_SPEC_SCHEMAS = {
'convbn': [
'block_fn',
'from',
'kernel_size',
'strides',
'filters',
'is_output',
],
'upsample2d': [
'block_fn',
'from',
'size',
'interpolation',
'is_output',
],
'maxpool2d': [
'block_fn',
'from',
'pool_size',
'strides',
'padding',
'is_output',
],
'concat': [
'block_fn',
'from',
'axis',
'is_output',
],
'sppcspc': ['block_fn', 'from', 'filters', 'is_output'],
'repconv': [
'block_fn',
'from',
'kernel_size',
'strides',
'filters',
'is_output',
],
}
# Define specs for YOLOv7-tiny variant. It is recommended to use together with
# YOLOv7-tiny backbone.
_YoloV7Tiny = [
['convbn', -1, 1, 1, 256, False],
['convbn', -2, 1, 1, 256, False],
['maxpool2d', -1, 5, 1, 'same', False],
['maxpool2d', -2, 9, 1, 'same', False],
['maxpool2d', -3, 13, 1, 'same', False],
['concat', [-1, -2, -3, -4], -1, False],
['convbn', -1, 1, 1, 256, False],
['concat', [-1, -7], -1, False],
['convbn', -1, 1, 1, 256, False], # 8
['convbn', -1, 1, 1, 128, False],
['upsample2d', -1, 2, 'nearest', False],
['convbn', '4', 1, 1, 128, False], # route from backbone P4
['concat', [-1, -2], -1, False],
['convbn', -1, 1, 1, 64, False],
['convbn', -2, 1, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['concat', [-1, -2, -3, -4], -1, False],
['convbn', -1, 1, 1, 128, False], # 18
['convbn', -1, 1, 1, 64, False],
['upsample2d', -1, 2, 'nearest', False],
['convbn', '3', 1, 1, 64, False], # route from backbone P3
['concat', [-1, -2], -1, False],
['convbn', -1, 1, 1, 32, False],
['convbn', -2, 1, 1, 32, False],
['convbn', -1, 3, 1, 32, False],
['convbn', -1, 3, 1, 32, False],
['concat', [-1, -2, -3, -4], -1, False],
['convbn', -1, 1, 1, 64, False], # 28
['convbn', -1, 3, 2, 128, False],
['concat', [-1, 18], -1, False],
['convbn', -1, 1, 1, 64, False],
['convbn', -2, 1, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['concat', [-1, -2, -3, -4], -1, False],
['convbn', -1, 1, 1, 128, False], # 36
['convbn', -1, 3, 2, 256, False],
['concat', [-1, 8], -1, False],
['convbn', -1, 1, 1, 128, False],
['convbn', -2, 1, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['concat', [-1, -2, -3, -4], -1, False],
['convbn', -1, 1, 1, 256, False], # 44
['convbn', 28, 1, 1, 128, True],
['convbn', 36, 1, 1, 256, True],
['convbn', 44, 1, 1, 512, True],
]
# Define specs YOLOv7 variant. The spec schema is defined above.
# It is recommended to use together with YOLOv7 backbone.
_YoloV7 = [
['sppcspc', -1, 512, False], # 0
['convbn', -1, 1, 1, 256, False],
['upsample2d', -1, 2, 'nearest', False],
['convbn', '4', 1, 1, 256, False], # route from backbone P4
['concat', [-1, -2], -1, False],
['convbn', -1, 1, 1, 256, False],
['convbn', -2, 1, 1, 256, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['concat', [-1, -2, -3, -4, -5, -6], -1, False],
['convbn', -1, 1, 1, 256, False], # 12
['convbn', -1, 1, 1, 128, False],
['upsample2d', -1, 2, 'nearest', False],
['convbn', '3', 1, 1, 128, False], # route from backbone P3
['concat', [-1, -2], -1, False],
['convbn', -1, 1, 1, 128, False],
['convbn', -2, 1, 1, 128, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['concat', [-1, -2, -3, -4, -5, -6], -1, False],
['convbn', -1, 1, 1, 128, False], # 24
['maxpool2d', -1, 2, 2, 'same', False],
['convbn', -1, 1, 1, 128, False],
['convbn', -3, 1, 1, 128, False],
['convbn', -1, 3, 2, 128, False],
['concat', [-1, -3, 12], -1, False],
['convbn', -1, 1, 1, 256, False],
['convbn', -2, 1, 1, 256, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['concat', [-1, -2, -3, -4, -5, -6], -1, False],
['convbn', -1, 1, 1, 256, False], # 37
['maxpool2d', -1, 2, 2, 'same', False],
['convbn', -1, 1, 1, 256, False],
['convbn', -3, 1, 1, 256, False],
['convbn', -1, 3, 2, 256, False],
['concat', [-1, -3, 0], -1, False],
['convbn', -1, 1, 1, 512, False],
['convbn', -2, 1, 1, 512, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['concat', [-1, -2, -3, -4, -5, -6], -1, False],
['convbn', -1, 1, 1, 512, False], # 50
['repconv', 24, 3, 1, 256, True],
['repconv', 37, 3, 1, 512, True],
['repconv', 50, 3, 1, 1024, True],
]
_YoloV7X = [
['sppcspc', -1, 640, False], # 0
['convbn', -1, 1, 1, 320, False],
['upsample2d', -1, 2, 'nearest', False],
['convbn', '4', 1, 1, 320, False], # route from backbone P4
['concat', [-1, -2], -1, False],
['convbn', -1, 1, 1, 256, False],
['convbn', -2, 1, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['concat', [-1, -3, -5, -7, -8], -1, False],
['convbn', -1, 1, 1, 320, False], # 14
['convbn', -1, 1, 1, 160, False],
['upsample2d', -1, 2, 'nearest', False],
['convbn', '3', 1, 1, 160, False], # route from backbone P3
['concat', [-1, -2], -1, False],
['convbn', -1, 1, 1, 128, False],
['convbn', -2, 1, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['concat', [-1, -3, -5, -7, -8], -1, False],
['convbn', -1, 1, 1, 160, False], # 28
['maxpool2d', -1, 2, 2, 'same', False],
['convbn', -1, 1, 1, 160, False],
['convbn', -3, 1, 1, 160, False],
['convbn', -1, 3, 2, 160, False],
['concat', [-1, -3, 14], -1, False],
['convbn', -1, 1, 1, 256, False],
['convbn', -2, 1, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['concat', [-1, -3, -5, -7, -8], -1, False],
['convbn', -1, 1, 1, 320, False], # 43
['maxpool2d', -1, 2, 2, 'same', False],
['convbn', -1, 1, 1, 320, False],
['convbn', -3, 1, 1, 320, False],
['convbn', -1, 3, 2, 320, False],
['concat', [-1, -3, 0], -1, False],
['convbn', -1, 1, 1, 512, False],
['convbn', -2, 1, 1, 512, False],
['convbn', -1, 3, 1, 512, False],
['convbn', -1, 3, 1, 512, False],
['convbn', -1, 3, 1, 512, False],
['convbn', -1, 3, 1, 512, False],
['convbn', -1, 3, 1, 512, False],
['convbn', -1, 3, 1, 512, False],
['concat', [-1, -3, -5, -7, -8], -1, False],
['convbn', -1, 1, 1, 640, False], # 58
['repconv', 28, 3, 1, 320, True],
['repconv', 43, 3, 1, 640, True],
['repconv', 58, 3, 1, 1280, True],
]
# Aggregates all variants for YOLOv7 decoders.
DECODERS = {
'yolov7-tiny': _YoloV7Tiny,
'yolov7': _YoloV7,
'yolov7x': _YoloV7X,
}
class YoloV7(tf.keras.Model):
"""YOLOv7 decoder architecture."""
def __init__(
self,
input_specs,
model_id='yolov7',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='swish',
use_separable_conv=False,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_initializer='zeros',
bias_regularizer=None,
**kwargs,
):
"""Initializes the YOLOv7 decoder.
Args:
input_specs: a dictionary of `tf.TensorShape` from backbone outputs.
model_id: a `str` represents the model variants.
use_sync_bn: if set to `True`, use synchronized batch normalization.
norm_momentum: a `float` of normalization momentum for the moving average.
norm_epsilon: a small `float` added to variance to avoid dividing by zero.
activation: a `str` name of the activation function.
use_separable_conv: `bool` wether to use separable convs.
kernel_initializer: a `str` for kernel initializer of convolutional
layers.
kernel_regularizer: a `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_initializer: a `str` for bias initializer of convolutional layers.
bias_regularizer: a `tf.keras.regularizers.Regularizer` object for Conv2D.
Default to None.
**kwargs: Additional keyword arguments to be passed.
"""
self._input_specs = input_specs
self._model_id = model_id
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._activation = activation
self._use_separable_conv = use_separable_conv
self._kernel_initializer = initializer_ops.pytorch_kernel_initializer(
kernel_initializer
)
self._kernel_regularizer = kernel_regularizer
self._bias_initializer = bias_initializer
self._bias_regularizer = bias_regularizer
inputs = self._generate_inputs(input_specs)
outputs = []
endpoints = {}
level = int(min(inputs.keys()))
block_specs = DECODERS[model_id.lower()]
for spec in block_specs:
block_kwargs = dict(zip(_BLOCK_SPEC_SCHEMAS[spec[0]], spec))
block_fn_str = block_kwargs.pop('block_fn')
from_index = block_kwargs.pop('from')
is_output = block_kwargs.pop('is_output')
x = self._group_layer_inputs(from_index, inputs, outputs)
if block_fn_str in ['convbn', 'sppcspc', 'repconv']:
block_kwargs.update({
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._activation,
'use_separable_conv': self._use_separable_conv,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
})
block_fn = _BLOCK_FNS[block_fn_str](**block_kwargs)
x = block_fn(x)
outputs.append(x)
if is_output:
endpoints[str(level)] = x
level += 1
self._output_specs = {k: v.get_shape() for k, v in endpoints.items()}
super().__init__(inputs=inputs, outputs=endpoints, **kwargs)
def _generate_inputs(self, input_specs):
inputs = {}
for level, input_shape in input_specs.items():
inputs[level] = tf.keras.layers.Input(shape=input_shape[1:])
return inputs
def _group_layer_inputs(self, from_index, inputs, outputs):
if isinstance(from_index, list):
return [self._group_layer_inputs(i, inputs, outputs) for i in from_index]
if isinstance(from_index, int):
# Need last layer output from backbone.
if len(outputs) + from_index == -1:
return inputs[max(inputs.keys())]
return outputs[from_index]
return inputs[from_index] # from_index is a string.
def get_config(self):
config_dict = {
'input_specs': self._input_specs,
'model_id': self._model_id,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._activation,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
}
return config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_decoder_builder('yolov7')
def build_yolov7(
input_specs: tf.keras.layers.InputSpec,
model_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None,
) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds YOLOv7 decoder."""
decoder_config = model_config.decoder
norm_activation_config = model_config.norm_activation
assert (
decoder_config.type == 'yolov7'
), f'Inconsistent decoder type {decoder_config.type}.'
decoder_config = decoder_config.get()
assert (
decoder_config.model_id in DECODERS
), f'Unsupported decoder {decoder_config.model_id}.'
model = YoloV7(
model_id=decoder_config.model_id,
input_specs=input_specs,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
activation=norm_activation_config.activation,
kernel_regularizer=l2_regularizer,
use_separable_conv=decoder_config.use_separable_conv,
)
return model
| 15,634 | 32.623656 | 80 | py |
models | models-master/official/projects/yolo/modeling/decoders/yolo_decoder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Feature Pyramid Network and Path Aggregation variants used in YOLO."""
from typing import Mapping, Optional, Union
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.yolo.modeling.layers import nn_blocks
from official.vision.modeling.decoders import factory
# model configurations
# the structure is as follows. model version, {v3, v4, v#, ... etc}
# the model config type {regular, tiny, small, large, ... etc}
YOLO_MODELS = {
'v4':
dict(
regular=dict(
embed_spp=False,
use_fpn=True,
max_level_process_len=None,
path_process_len=6),
tiny=dict(
embed_spp=False,
use_fpn=False,
max_level_process_len=2,
path_process_len=1),
csp=dict(
embed_spp=False,
use_fpn=True,
max_level_process_len=None,
csp_stack=5,
fpn_depth=5,
path_process_len=6),
csp_large=dict(
embed_spp=False,
use_fpn=True,
max_level_process_len=None,
csp_stack=7,
fpn_depth=7,
max_fpn_depth=5,
max_csp_stack=5,
path_process_len=8,
fpn_filter_scale=1),
csp_xlarge=dict(
embed_spp=False,
use_fpn=True,
max_level_process_len=None,
csp_stack=7,
fpn_depth=7,
path_process_len=8,
fpn_filter_scale=1),
),
'v3':
dict(
regular=dict(
embed_spp=False,
use_fpn=False,
max_level_process_len=None,
path_process_len=6),
tiny=dict(
embed_spp=False,
use_fpn=False,
max_level_process_len=2,
path_process_len=1),
spp=dict(
embed_spp=True,
use_fpn=False,
max_level_process_len=2,
path_process_len=1),
),
}
class _IdentityRoute(tf.keras.layers.Layer):
def call(self, inputs): # pylint: disable=arguments-differ
return None, inputs
class YoloFPN(tf.keras.layers.Layer):
"""YOLO Feature pyramid network."""
def __init__(self,
fpn_depth=4,
max_fpn_depth=None,
max_csp_stack=None,
use_spatial_attention=False,
csp_stack=False,
activation='leaky',
fpn_filter_scale=1,
use_sync_bn=False,
use_separable_conv=False,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
**kwargs):
"""Yolo FPN initialization function (Yolo V4).
Args:
fpn_depth: `int`, number of layers to use in each FPN path
if you choose to use an FPN.
max_fpn_depth: `int`, number of layers to use in each FPN path
if you choose to use an FPN along the largest FPN level.
max_csp_stack: `int`, number of layers to use for CSP on the largest_path
only.
use_spatial_attention: `bool`, use the spatial attention module.
csp_stack: `bool`, CSPize the FPN.
activation: `str`, the activation function to use typically leaky or mish.
fpn_filter_scale: `int`, scaling factor for the FPN filters.
use_sync_bn: if True, use synchronized batch normalization.
use_separable_conv: `bool` whether to use separable convs.
norm_momentum: `float`, normalization momentum for the moving average.
norm_epsilon: `float`, small float added to variance to avoid dividing by
zero.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
**kwargs: keyword arguments to be passed.
"""
super().__init__(**kwargs)
self._fpn_depth = fpn_depth
self._max_fpn_depth = max_fpn_depth or self._fpn_depth
self._activation = activation
self._use_sync_bn = use_sync_bn
self._use_separable_conv = use_separable_conv
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._use_spatial_attention = use_spatial_attention
self._filter_scale = fpn_filter_scale
self._csp_stack = csp_stack
self._max_csp_stack = max_csp_stack or min(self._max_fpn_depth, csp_stack)
self._base_config = dict(
activation=self._activation,
use_sync_bn=self._use_sync_bn,
use_separable_conv=self._use_separable_conv,
kernel_regularizer=self._kernel_regularizer,
kernel_initializer=self._kernel_initializer,
bias_regularizer=self._bias_regularizer,
norm_epsilon=self._norm_epsilon,
norm_momentum=self._norm_momentum)
def get_raw_depths(self, minimum_depth, inputs):
"""Calculates the unscaled depths of the FPN branches.
Args:
minimum_depth (int): depth of the smallest branch of the FPN.
inputs (dict): dictionary of the shape of input args as a dictionary of
lists.
Returns:
The unscaled depths of the FPN branches.
"""
depths = []
for i in range(self._min_level, self._max_level + 1):
depths.append(inputs[str(i)][-1] / self._filter_scale)
return list(reversed(depths))
def build(self, inputs):
"""Use config dictionary to generate all important attributes for head.
Args:
inputs: dictionary of the shape of input args as a dictionary of lists.
"""
keys = [int(key) for key in inputs.keys()]
self._min_level = min(keys)
self._max_level = max(keys)
self._min_depth = inputs[str(self._min_level)][-1]
self._depths = self.get_raw_depths(self._min_depth, inputs)
# directly connect to an input path and process it
self.preprocessors = dict()
# resample an input and merge it with the output of another path
# inorder to aggregate backbone outputs
self.resamples = dict()
# set of convoltion layers and upsample layers that are used to
# prepare the FPN processors for output
for level, depth in zip(
reversed(range(self._min_level, self._max_level + 1)), self._depths):
if level == self._min_level:
self.resamples[str(level)] = nn_blocks.PathAggregationBlock(
filters=depth // 2,
inverted=True,
upsample=True,
drop_final=self._csp_stack == 0,
upsample_size=2,
**self._base_config)
self.preprocessors[str(level)] = _IdentityRoute()
elif level != self._max_level:
self.resamples[str(level)] = nn_blocks.PathAggregationBlock(
filters=depth // 2,
inverted=True,
upsample=True,
drop_final=False,
upsample_size=2,
**self._base_config)
self.preprocessors[str(level)] = nn_blocks.DarkRouteProcess(
filters=depth,
repetitions=self._fpn_depth - int(level == self._min_level),
block_invert=True,
insert_spp=False,
csp_stack=self._csp_stack,
**self._base_config)
else:
self.preprocessors[str(level)] = nn_blocks.DarkRouteProcess(
filters=depth,
repetitions=self._max_fpn_depth + 1 * int(self._csp_stack == 0),
insert_spp=True,
block_invert=False,
csp_stack=min(self._csp_stack, self._max_fpn_depth),
**self._base_config)
def call(self, inputs):
outputs = dict()
layer_in = inputs[str(self._max_level)]
for level in reversed(range(self._min_level, self._max_level + 1)):
_, x = self.preprocessors[str(level)](layer_in)
outputs[str(level)] = x
if level > self._min_level:
x_next = inputs[str(level - 1)]
_, layer_in = self.resamples[str(level - 1)]([x_next, x])
return outputs
class YoloPAN(tf.keras.layers.Layer):
"""YOLO Path Aggregation Network."""
def __init__(self,
path_process_len=6,
max_level_process_len=None,
embed_spp=False,
use_spatial_attention=False,
csp_stack=False,
activation='leaky',
use_sync_bn=False,
use_separable_conv=False,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
fpn_input=True,
fpn_filter_scale=1.0,
**kwargs):
"""Yolo Path Aggregation Network initialization function (Yolo V3 and V4).
Args:
path_process_len: `int`, number of layers ot use in each Decoder path.
max_level_process_len: `int`, number of layers ot use in the largest
processing path, or the backbones largest output if it is different.
embed_spp: `bool`, use the SPP found in the YoloV3 and V4 model.
use_spatial_attention: `bool`, use the spatial attention module.
csp_stack: `bool`, CSPize the FPN.
activation: `str`, the activation function to use typically leaky or mish.
use_sync_bn: if True, use synchronized batch normalization.
use_separable_conv: `bool` whether to use separable convs.
norm_momentum: `float`, normalization omentum for the moving average.
norm_epsilon: `float`, small float added to variance to avoid dividing
by zero.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
fpn_input: `bool`, for whether the input into this fucntion is an FPN or
a backbone.
fpn_filter_scale: `int`, scaling factor for the FPN filters.
**kwargs: keyword arguments to be passed.
"""
super().__init__(**kwargs)
self._path_process_len = path_process_len
self._embed_spp = embed_spp
self._use_spatial_attention = use_spatial_attention
self._activation = activation
self._use_sync_bn = use_sync_bn
self._use_separable_conv = use_separable_conv
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._fpn_input = fpn_input
self._max_level_process_len = max_level_process_len
self._csp_stack = csp_stack
self._fpn_filter_scale = fpn_filter_scale
if max_level_process_len is None:
self._max_level_process_len = path_process_len
self._base_config = dict(
activation=self._activation,
use_sync_bn=self._use_sync_bn,
use_separable_conv=self._use_separable_conv,
kernel_regularizer=self._kernel_regularizer,
kernel_initializer=self._kernel_initializer,
bias_regularizer=self._bias_regularizer,
norm_epsilon=self._norm_epsilon,
norm_momentum=self._norm_momentum)
def build(self, inputs):
"""Use config dictionary to generate all important attributes for head.
Args:
inputs: dictionary of the shape of input args as a dictionary of lists.
"""
# define the key order
keys = [int(key) for key in inputs.keys()]
self._min_level = min(keys)
self._max_level = max(keys)
self._min_depth = inputs[str(self._min_level)][-1]
self._depths = self.get_raw_depths(self._min_depth, inputs)
# directly connect to an input path and process it
self.preprocessors = dict()
# resample an input and merge it with the output of another path
# inorder to aggregate backbone outputs
self.resamples = dict()
# FPN will reverse the key process order for the backbone, so we need
# adjust the order that objects are created and processed to adjust for
# this. not using an FPN will directly connect the decoder to the backbone
# therefore the object creation order needs to be done from the largest
# to smallest level.
if self._fpn_input:
# process order {... 3, 4, 5}
self._iterator = range(self._min_level, self._max_level + 1)
self._check = lambda x: x < self._max_level
self._key_shift = lambda x: x + 1
self._input = self._min_level
downsample = True
upsample = False
else:
# process order {5, 4, 3, ...}
self._iterator = list(
reversed(range(self._min_level, self._max_level + 1)))
self._check = lambda x: x > self._min_level
self._key_shift = lambda x: x - 1
self._input = self._max_level
downsample = False
upsample = True
for level, depth in zip(self._iterator, self._depths):
if level > 5:
proc_filters = lambda x: x * 2
resample_filters = lambda x: x
elif self._csp_stack == 0:
proc_filters = lambda x: x
resample_filters = lambda x: x // 2
else:
proc_filters = lambda x: x * 2
resample_filters = lambda x: x
if level == self._input:
self.preprocessors[str(level)] = nn_blocks.DarkRouteProcess(
filters=proc_filters(depth),
repetitions=self._max_level_process_len,
insert_spp=self._embed_spp,
block_invert=False,
insert_sam=self._use_spatial_attention,
csp_stack=self._csp_stack,
**self._base_config)
else:
self.resamples[str(level)] = nn_blocks.PathAggregationBlock(
filters=resample_filters(depth),
upsample=upsample,
downsample=downsample,
inverted=False,
drop_final=self._csp_stack == 0,
**self._base_config)
self.preprocessors[str(level)] = nn_blocks.DarkRouteProcess(
filters=proc_filters(depth),
repetitions=self._path_process_len,
insert_spp=False,
insert_sam=self._use_spatial_attention,
csp_stack=self._csp_stack,
**self._base_config)
def get_raw_depths(self, minimum_depth, inputs):
"""Calculates the unscaled depths of the FPN branches.
Args:
minimum_depth: `int` depth of the smallest branch of the FPN.
inputs: `dict[str, tf.InputSpec]` of the shape of input args as a
dictionary of lists.
Returns:
The unscaled depths of the FPN branches.
"""
depths = []
if len(inputs.keys()) > 3 or self._fpn_filter_scale > 1:
for i in range(self._min_level, self._max_level + 1):
depths.append(inputs[str(i)][-1])
else:
for _ in range(self._min_level, self._max_level + 1):
depths.append(minimum_depth)
minimum_depth *= 2
if self._fpn_input:
return depths
return list(reversed(depths))
def call(self, inputs):
outputs = dict()
layer_in = inputs[str(self._input)]
for level in self._iterator:
x_route, x = self.preprocessors[str(level)](layer_in)
outputs[str(level)] = x
if self._check(level):
x_next = inputs[str(self._key_shift(level))]
_, layer_in = self.resamples[str(
self._key_shift(level))]([x_route, x_next])
return outputs
class YoloDecoder(tf.keras.Model):
"""Darknet Backbone Decoder."""
def __init__(self,
input_specs,
use_fpn=False,
use_spatial_attention=False,
csp_stack=False,
fpn_depth=4,
max_fpn_depth=None,
max_csp_stack=None,
fpn_filter_scale=1,
path_process_len=6,
max_level_process_len=None,
embed_spp=False,
activation='leaky',
use_sync_bn=False,
use_separable_conv=False,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
**kwargs):
"""Yolo Decoder initialization function.
A unified model that ties all decoder components into a conditionally build
YOLO decoder.
Args:
input_specs: `dict[str, tf.InputSpec]`: input specs of each of the inputs
to the heads.
use_fpn: `bool`, use the FPN found in the YoloV4 model.
use_spatial_attention: `bool`, use the spatial attention module.
csp_stack: `bool`, CSPize the FPN.
fpn_depth: `int`, number of layers ot use in each FPN path if you choose
to use an FPN.
max_fpn_depth: `int`, maximum fpn depth.
max_csp_stack: `int`, maximum csp stack.
fpn_filter_scale: `int`, scaling factor for the FPN filters.
path_process_len: `int`, number of layers ot use in each Decoder path.
max_level_process_len: `int`, number of layers ot use in the largest
processing path, or the backbones largest output if it is different.
embed_spp: `bool`, use the SPP found in the YoloV3 and V4 model.
activation: `str`, the activation function to use typically leaky or mish.
use_sync_bn: if True, use synchronized batch normalization.
use_separable_conv: `bool` wether to use separable convs.
norm_momentum: `float`, normalization omentum for the moving average.
norm_epsilon: `float`, small float added to variance to avoid dividing by
zero.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
**kwargs: keyword arguments to be passed.
"""
self._input_specs = input_specs
self._use_fpn = use_fpn
self._fpn_depth = fpn_depth
self._max_fpn_depth = max_fpn_depth
self._max_csp_stack = max_csp_stack
self._path_process_len = path_process_len
self._max_level_process_len = max_level_process_len
self._embed_spp = embed_spp
self._activation = activation
self._use_sync_bn = use_sync_bn
self._use_separable_conv = use_separable_conv
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._base_config = dict(
use_spatial_attention=use_spatial_attention,
csp_stack=csp_stack,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
use_separable_conv=self._use_separable_conv,
fpn_filter_scale=fpn_filter_scale,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._decoder_config = dict(
path_process_len=self._path_process_len,
max_level_process_len=self._max_level_process_len,
embed_spp=self._embed_spp,
fpn_input=self._use_fpn,
**self._base_config)
inputs = {
key: tf.keras.layers.Input(shape=value[1:])
for key, value in input_specs.items()
}
if self._use_fpn:
inter_outs = YoloFPN(
fpn_depth=self._fpn_depth,
max_fpn_depth=self._max_fpn_depth,
max_csp_stack=self._max_csp_stack,
**self._base_config)(inputs)
outputs = YoloPAN(**self._decoder_config)(inter_outs)
else:
inter_outs = None
outputs = YoloPAN(**self._decoder_config)(inputs)
self._output_specs = {key: value.shape for key, value in outputs.items()}
super().__init__(inputs=inputs, outputs=outputs, name='YoloDecoder')
@property
def use_fpn(self):
return self._use_fpn
@property
def output_specs(self):
return self._output_specs
def get_config(self):
config = dict(
input_specs=self._input_specs,
use_fpn=self._use_fpn,
fpn_depth=self._fpn_depth,
**self._decoder_config)
return config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@factory.register_decoder_builder('yolo_decoder')
def build_yolo_decoder(
input_specs: Mapping[str, tf.TensorShape],
model_config: hyperparams.Config,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs) -> Union[None, tf.keras.Model, tf.keras.layers.Layer]:
"""Builds Yolo FPN/PAN decoder from a config.
Args:
input_specs: A `dict` of input specifications. A dictionary consists of
{level: TensorShape} from a backbone.
model_config: A OneOfConfig. Model config.
l2_regularizer: A `tf.keras.regularizers.Regularizer` instance. Default to
None.
**kwargs: Additional kwargs arguments.
Returns:
A `tf.keras.Model` instance of the Yolo FPN/PAN decoder.
"""
decoder_cfg = model_config.decoder.get()
norm_activation_config = model_config.norm_activation
activation = (
decoder_cfg.activation if decoder_cfg.activation != 'same' else
norm_activation_config.activation)
if decoder_cfg.version is None: # custom yolo
raise ValueError('Decoder version cannot be None, specify v3 or v4.')
if decoder_cfg.version not in YOLO_MODELS:
raise ValueError(
'Unsupported model version please select from {v3, v4}, '
'or specify a custom decoder config using YoloDecoder in you yaml')
if decoder_cfg.type is None:
decoder_cfg.type = 'regular'
if decoder_cfg.type not in YOLO_MODELS[decoder_cfg.version]:
raise ValueError('Unsupported model type please select from '
'{yolo_model.YOLO_MODELS[decoder_cfg.version].keys()}'
'or specify a custom decoder config using YoloDecoder.')
base_model = YOLO_MODELS[decoder_cfg.version][decoder_cfg.type].copy()
cfg_dict = decoder_cfg.as_dict()
for key in base_model:
if cfg_dict[key] is not None:
base_model[key] = cfg_dict[key]
base_dict = dict(
activation=activation,
use_spatial_attention=decoder_cfg.use_spatial_attention,
use_separable_conv=decoder_cfg.use_separable_conv,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
base_model.update(base_dict)
model = YoloDecoder(input_specs, **base_model, **kwargs)
return model
| 23,635 | 36.222047 | 80 | py |
models | models-master/official/projects/yolo/modeling/decoders/yolov7_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yolov7 decoder."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.projects.yolo.modeling.backbones import yolov7 as backbone
from official.projects.yolo.modeling.decoders import yolov7 as decoder
_INPUT_SIZE = (224, 224)
class YoloV7DecoderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('yolov7',),
)
def test_network_creation(self, model_id):
"""Tests declaration of YOLOv7 decoder variants."""
tf.keras.backend.set_image_data_format('channels_last')
backbone_network = backbone.YoloV7(model_id)
decoder_network = decoder.YoloV7(backbone_network.output_specs, model_id)
self.assertEqual(decoder_network.get_config()['model_id'], model_id)
inputs = tf.keras.Input(shape=(*_INPUT_SIZE, 3), batch_size=1)
outputs = decoder_network(backbone_network(inputs))
for level, level_output in outputs.items():
scale = 2 ** int(level)
input_size = (_INPUT_SIZE[0] // scale, _INPUT_SIZE[1] // scale)
self.assertAllEqual((1, *input_size), level_output.shape.as_list()[:-1])
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
)
)
def test_sync_bn_multiple_devices(self, strategy):
"""Test for sync bn on TPU and GPU devices."""
inputs = np.random.rand(1, *_INPUT_SIZE, 3)
tf.keras.backend.set_image_data_format('channels_last')
with strategy.scope():
backbone_network = backbone.YoloV7(model_id='yolov7', use_sync_bn=True)
decoder_network = decoder.YoloV7(
backbone_network.output_specs, model_id='yolov7', use_sync_bn=True)
_ = decoder_network(backbone_network(inputs))
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
model_id='yolov7',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='swish',
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_initializer='zeros',
bias_regularizer=None,
)
backbone_network = backbone.YoloV7(**kwargs)
kwargs['input_specs'] = backbone_network.output_specs
decoder_network = decoder.YoloV7(**kwargs)
# Create another network object from the first object's config.
new_network = decoder.YoloV7.from_config(decoder_network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(decoder_network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 3,560 | 34.969697 | 79 | py |
models | models-master/official/projects/yolo/modeling/decoders/yolo_decoder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for YOLO."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.projects.yolo.modeling.decoders import yolo_decoder as decoders
class YoloDecoderTest(parameterized.TestCase, tf.test.TestCase):
def _build_yolo_decoder(self, input_specs, name='1'):
# Builds 4 different arbitrary decoders.
if name == '1':
model = decoders.YoloDecoder(
input_specs=input_specs,
embed_spp=False,
use_fpn=False,
max_level_process_len=2,
path_process_len=1,
activation='mish')
elif name == '6spp':
model = decoders.YoloDecoder(
input_specs=input_specs,
embed_spp=True,
use_fpn=False,
max_level_process_len=None,
path_process_len=6,
activation='mish')
elif name == '6sppfpn':
model = decoders.YoloDecoder(
input_specs=input_specs,
embed_spp=True,
use_fpn=True,
max_level_process_len=None,
path_process_len=6,
activation='mish')
elif name == '6':
model = decoders.YoloDecoder(
input_specs=input_specs,
embed_spp=False,
use_fpn=False,
max_level_process_len=None,
path_process_len=6,
activation='mish')
else:
raise NotImplementedError(f'YOLO decoder test {type} not implemented.')
return model
@parameterized.parameters('1', '6spp', '6sppfpn', '6')
def test_network_creation(self, version):
"""Test creation of ResNet family models."""
tf.keras.backend.set_image_data_format('channels_last')
input_shape = {
'3': [1, 52, 52, 256],
'4': [1, 26, 26, 512],
'5': [1, 13, 13, 1024]
}
decoder = self._build_yolo_decoder(input_shape, version)
inputs = {}
for key in input_shape:
inputs[key] = tf.ones(input_shape[key], dtype=tf.float32)
endpoints = decoder.call(inputs)
for key in endpoints.keys():
self.assertAllEqual(endpoints[key].shape.as_list(), input_shape[key])
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
use_sync_bn=[False, True],
))
def test_sync_bn_multiple_devices(self, strategy, use_sync_bn):
"""Test for sync bn on TPU and GPU devices."""
tf.keras.backend.set_image_data_format('channels_last')
with strategy.scope():
input_shape = {
'3': [1, 52, 52, 256],
'4': [1, 26, 26, 512],
'5': [1, 13, 13, 1024]
}
decoder = self._build_yolo_decoder(input_shape, '6')
inputs = {}
for key in input_shape:
inputs[key] = tf.ones(input_shape[key], dtype=tf.float32)
_ = decoder.call(inputs)
@parameterized.parameters(1, 3, 4)
def test_input_specs(self, input_dim):
"""Test different input feature dimensions."""
tf.keras.backend.set_image_data_format('channels_last')
input_shape = {
'3': [1, 52, 52, 256],
'4': [1, 26, 26, 512],
'5': [1, 13, 13, 1024]
}
decoder = self._build_yolo_decoder(input_shape, '6')
inputs = {}
for key in input_shape:
inputs[key] = tf.ones(input_shape[key], dtype=tf.float32)
_ = decoder(inputs)
def test_serialize_deserialize(self):
"""Create a network object that sets all of its config options."""
tf.keras.backend.set_image_data_format('channels_last')
input_shape = {
'3': [1, 52, 52, 256],
'4': [1, 26, 26, 512],
'5': [1, 13, 13, 1024]
}
decoder = self._build_yolo_decoder(input_shape, '6')
inputs = {}
for key in input_shape:
inputs[key] = tf.ones(input_shape[key], dtype=tf.float32)
_ = decoder(inputs)
config = decoder.get_config()
decoder_from_config = decoders.YoloDecoder.from_config(config)
self.assertAllEqual(decoder.get_config(), decoder_from_config.get_config())
if __name__ == '__main__':
tf.test.main()
| 4,804 | 30.405229 | 79 | py |
models | models-master/official/projects/yolo/modeling/decoders/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/yolo/modeling/layers/detection_generator.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains common building blocks for yolo layer (detection layer)."""
from typing import Optional
import tensorflow as tf
from official.projects.yolo.losses import yolo_loss
from official.projects.yolo.ops import box_ops
from official.projects.yolo.ops import loss_utils
from official.vision.modeling.layers import detection_generator
class YoloLayer(tf.keras.layers.Layer):
"""Yolo layer (detection generator)."""
def __init__(
self,
anchors,
classes,
apply_nms=True,
iou_thresh=0.0,
ignore_thresh=0.7,
truth_thresh=1.0,
nms_thresh=0.6,
max_delta=10.0,
loss_type='ciou',
iou_normalizer=1.0,
cls_normalizer=1.0,
object_normalizer=1.0,
use_scaled_loss=False,
update_on_repeat=False,
pre_nms_points=5000,
label_smoothing=0.0,
max_boxes=200,
box_type='original',
path_scale=None,
scale_xy=None,
nms_version='greedy',
objectness_smooth=False,
use_class_agnostic_nms: Optional[bool] = False,
**kwargs
):
"""Parameters for the loss functions used at each detection head output.
Args:
anchors: `List[List[int]]` for the anchor boxes that are used in the
model.
classes: `int` for the number of classes.
apply_nms: A boolean indicating whether to apply NMS.
iou_thresh: `float` to use many anchors per object if IoU(Obj, Anchor) >
iou_thresh.
ignore_thresh: `float` for the IOU value over which the loss is not
propagated, and a detection is assumed to have been made.
truth_thresh: `float` for the IOU value over which the loss is propagated
despite a detection being made'.
nms_thresh: `float` for the minimum IOU value for an overlap.
max_delta: gradient clipping to apply to the box loss.
loss_type: `str` for the typeof iou loss to use with in {ciou, diou, giou,
iou}.
iou_normalizer: `float` for how much to scale the loss on the IOU or the
boxes.
cls_normalizer: `float` for how much to scale the loss on the classes.
object_normalizer: `float` for how much to scale loss on the detection
map.
use_scaled_loss: `bool` for whether to use the scaled loss or the
traditional loss.
update_on_repeat: `bool` indicating how you would like to handle repeated
indexes in a given [j, i] index. Setting this to True will give more
consistent MAP, setting it to falls will improve recall by 1-2% but will
sacrifice some MAP.
pre_nms_points: `int` number of top candidate detections per class before
NMS.
label_smoothing: `float` for how much to smooth the loss on the classes.
max_boxes: `int` for the maximum number of boxes retained over all
classes.
box_type: `str`, there are 3 different box types that will affect training
differently {original, scaled and anchor_free}. The original method
decodes the boxes by applying an exponential to the model width and
height maps, then scaling the maps by the anchor boxes. This method is
used in Yolo-v4, Yolo-v3, and all its counterparts. The Scale method
squares the width and height and scales both by a fixed factor of 4.
This method is used in the Scale Yolo models, as well as Yolov4-CSP.
Finally, anchor_free is like the original method but will not apply an
activation function to the boxes, this is used for some of the newer
anchor free versions of YOLO.
path_scale: `dict` for the size of the input tensors. Defaults to
precalulated values from the `mask`.
scale_xy: dictionary `float` values inidcating how far each pixel can see
outside of its containment of 1.0. a value of 1.2 indicates there is a
20% extended radius around each pixel that this specific pixel can
predict values for a center at. the center can range from 0 - value/2 to
1 + value/2, this value is set in the yolo filter, and resused here.
there should be one value for scale_xy for each level from min_level to
max_level.
nms_version: `str` for which non max suppression to use.
objectness_smooth: `float` for how much to smooth the loss on the
detection map.
use_class_agnostic_nms: A `bool` of whether non max suppression is
operated on all the boxes using max scores across all classes. Only
valid when nms_version is v2.
**kwargs: Addtional keyword arguments.
"""
super().__init__(**kwargs)
self._anchors = anchors
self._apply_nms = apply_nms
self._thresh = iou_thresh
self._ignore_thresh = ignore_thresh
self._truth_thresh = truth_thresh
self._iou_normalizer = iou_normalizer
self._cls_normalizer = cls_normalizer
self._object_normalizer = object_normalizer
self._objectness_smooth = objectness_smooth
self._nms_thresh = nms_thresh
self._max_boxes = max_boxes
self._max_delta = max_delta
self._classes = classes
self._loss_type = loss_type
self._use_class_agnostic_nms = use_class_agnostic_nms
self._use_scaled_loss = use_scaled_loss
self._update_on_repeat = update_on_repeat
self._pre_nms_points = pre_nms_points
self._label_smoothing = label_smoothing
self._keys = list(anchors.keys())
self._len_keys = len(self._keys)
self._box_type = box_type
self._path_scale = path_scale or {key: 2**int(key) for key in self._keys}
self._nms_version = nms_version
self._scale_xy = scale_xy or {key: 1.0 for key, _ in anchors.items()}
self._generator = {}
self._len_mask = {}
for key in self._keys:
anchors = self._anchors[key]
self._generator[key] = loss_utils.GridGenerator(
anchors, scale_anchors=self._path_scale[key])
self._len_mask[key] = len(anchors)
return
def parse_prediction_path(self, key, inputs):
shape_ = tf.shape(inputs)
shape = inputs.get_shape().as_list()
batchsize, height, width = shape_[0], shape[1], shape[2]
if height is None or width is None:
height, width = shape_[1], shape_[2]
generator = self._generator[key]
len_mask = self._len_mask[key]
scale_xy = self._scale_xy[key]
# Reshape the yolo output to (batchsize,
# width,
# height,
# number_anchors,
# remaining_points)
data = tf.reshape(inputs, [-1, height, width, len_mask, self._classes + 5])
# Use the grid generator to get the formatted anchor boxes and grid points
# in shape [1, height, width, 2].
centers, anchors = generator(height, width, batchsize, dtype=data.dtype)
# Split the yolo detections into boxes, object score map, classes.
boxes, obns_scores, class_scores = tf.split(
data, [4, 1, self._classes], axis=-1)
# Determine the number of classes.
classes = class_scores.get_shape().as_list()[-1]
# Configurable to use the new coordinates in scaled Yolo v4 or not.
_, _, boxes = loss_utils.get_predicted_box(
tf.cast(height, data.dtype),
tf.cast(width, data.dtype),
boxes,
anchors,
centers,
scale_xy,
stride=self._path_scale[key],
darknet=False,
box_type=self._box_type[key])
# Convert boxes from yolo(x, y, w. h) to tensorflow(ymin, xmin, ymax, xmax).
boxes = box_ops.xcycwh_to_yxyx(boxes)
# Activate and detection map
obns_scores = tf.math.sigmoid(obns_scores)
# Convert detection map to class detection probabilities.
class_scores = tf.math.sigmoid(class_scores) * obns_scores
# Flatten predictions to [batchsize, N, -1] for non max supression.
fill = height * width * len_mask
boxes = tf.reshape(boxes, [-1, fill, 4])
class_scores = tf.reshape(class_scores, [-1, fill, classes])
obns_scores = tf.reshape(obns_scores, [-1, fill])
return obns_scores, boxes, class_scores
def __call__(self, inputs):
boxes = []
class_scores = []
object_scores = []
levels = list(inputs.keys())
min_level = int(min(levels))
max_level = int(max(levels))
# Aggregate boxes over each scale.
for i in range(min_level, max_level + 1):
key = str(i)
object_scores_, boxes_, class_scores_ = self.parse_prediction_path(
key, inputs[key])
boxes.append(boxes_)
class_scores.append(class_scores_)
object_scores.append(object_scores_)
# Collate all predicitons.
boxes = tf.concat(boxes, axis=1)
object_scores = tf.concat(object_scores, axis=1)
class_scores = tf.concat(class_scores, axis=1)
# Get masks to threshold all the predicitons.
object_mask = tf.cast(object_scores > self._thresh, object_scores.dtype)
class_mask = tf.cast(class_scores > self._thresh, class_scores.dtype)
# Apply thresholds mask to all the predictions.
object_scores *= object_mask
class_scores *= (tf.expand_dims(object_mask, axis=-1) * class_mask)
# Make a copy of the original dtype.
dtype = object_scores.dtype
if not self._apply_nms:
return {
'bbox': tf.expand_dims(tf.cast(boxes, dtype=tf.float32), axis=-2),
'classes': tf.cast(class_scores, dtype=tf.float32),
'confidence': object_scores,
'num_detections': self._max_boxes,
}
# Apply nms.
if self._nms_version == 'greedy':
# Greedy NMS.
boxes, object_scores, class_scores, num_detections = (
tf.image.combined_non_max_suppression(
tf.expand_dims(tf.cast(boxes, dtype=tf.float32), axis=-2),
tf.cast(class_scores, dtype=tf.float32),
self._pre_nms_points,
self._max_boxes,
iou_threshold=self._nms_thresh,
score_threshold=self._thresh,
)
)
elif self._nms_version == 'v1':
(boxes, object_scores, class_scores, num_detections, _) = (
detection_generator._generate_detections_v1( # pylint:disable=protected-access
tf.expand_dims(tf.cast(boxes, dtype=tf.float32), axis=-2),
tf.cast(class_scores, dtype=tf.float32),
pre_nms_top_k=self._pre_nms_points,
max_num_detections=self._max_boxes,
nms_iou_threshold=self._nms_thresh,
pre_nms_score_threshold=self._thresh,
)
)
elif self._nms_version == 'v2' or self._nms_version == 'iou':
(boxes, object_scores, class_scores, num_detections) = (
detection_generator._generate_detections_v2( # pylint:disable=protected-access
tf.expand_dims(tf.cast(boxes, dtype=tf.float32), axis=-2),
tf.cast(class_scores, dtype=tf.float32),
pre_nms_top_k=self._pre_nms_points,
max_num_detections=self._max_boxes,
nms_iou_threshold=self._nms_thresh,
pre_nms_score_threshold=self._thresh,
use_class_agnostic_nms=self._use_class_agnostic_nms,
)
)
# Cast the boxes and predicitons back to original datatype.
boxes = tf.cast(boxes, dtype)
class_scores = tf.cast(class_scores, dtype)
object_scores = tf.cast(object_scores, dtype)
# Format and return
return {
'bbox': boxes,
'classes': class_scores,
'confidence': object_scores,
'num_detections': num_detections,
}
def get_losses(self):
"""Generates a dictionary of losses to apply to each path.
Done in the detection generator because all parameters are the same
across both loss and detection generator.
Returns:
Dict[str, tf.Tensor] of losses
"""
loss = yolo_loss.YoloLoss(
keys=self._keys,
classes=self._classes,
anchors=self._anchors,
path_strides=self._path_scale,
truth_thresholds=self._truth_thresh,
ignore_thresholds=self._ignore_thresh,
loss_types=self._loss_type,
iou_normalizers=self._iou_normalizer,
cls_normalizers=self._cls_normalizer,
object_normalizers=self._object_normalizer,
objectness_smooths=self._objectness_smooth,
box_types=self._box_type,
max_deltas=self._max_delta,
scale_xys=self._scale_xy,
use_scaled_loss=self._use_scaled_loss,
update_on_repeat=self._update_on_repeat,
label_smoothing=self._label_smoothing)
return loss
def get_config(self):
return {
'anchors': [list(a) for a in self._anchors],
'thresh': self._thresh,
'max_boxes': self._max_boxes,
}
| 13,274 | 38.044118 | 89 | py |
models | models-master/official/projects/yolo/modeling/layers/detection_generator_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yolo detection generator."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.yolo.modeling.layers import detection_generator
class YoloDecoderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('v1', None),
('v2', False),
('v2', True),
('greedy', None),
)
def test_network_creation(self, nms_version, use_class_agnostic_nms):
"""Test creation of ResNet family models."""
tf.keras.backend.set_image_data_format('channels_last')
input_shape = {
'3': [1, 52, 52, 255],
'4': [1, 26, 26, 255],
'5': [1, 13, 13, 255]
}
classes = 80
anchors = {
'3': [[12.0, 19.0], [31.0, 46.0], [96.0, 54.0]],
'4': [[46.0, 114.0], [133.0, 127.0], [79.0, 225.0]],
'5': [[301.0, 150.0], [172.0, 286.0], [348.0, 340.0]]
}
box_type = {key: 'scaled' for key in anchors.keys()}
layer = detection_generator.YoloLayer(
anchors,
classes,
box_type=box_type,
max_boxes=10,
use_class_agnostic_nms=use_class_agnostic_nms,
nms_version=nms_version,
)
inputs = {}
for key in input_shape:
inputs[key] = tf.ones(input_shape[key], dtype=tf.float32)
endpoints = layer(inputs)
boxes = endpoints['bbox']
classes = endpoints['classes']
self.assertAllEqual(boxes.shape.as_list(), [1, 10, 4])
self.assertAllEqual(classes.shape.as_list(), [1, 10])
if __name__ == '__main__':
tf.test.main()
| 2,139 | 29.140845 | 74 | py |
models | models-master/official/projects/yolo/modeling/layers/nn_blocks_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.projects.yolo.modeling.layers import nn_blocks
class CSPConnectTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('same', 224, 224, 64, 1),
('downsample', 224, 224, 64, 2))
def test_pass_through(self, width, height, filters, mod):
x = tf.keras.Input(shape=(width, height, filters))
test_layer = nn_blocks.CSPRoute(filters=filters, filter_scale=mod)
test_layer2 = nn_blocks.CSPConnect(filters=filters, filter_scale=mod)
outx, px = test_layer(x)
outx = test_layer2([outx, px])
print(outx)
print(outx.shape.as_list())
self.assertAllEqual(
outx.shape.as_list(),
[None, np.ceil(width // 2),
np.ceil(height // 2), (filters)])
@parameterized.named_parameters(('same', 224, 224, 64, 1),
('downsample', 224, 224, 128, 2))
def test_gradient_pass_though(self, filters, width, height, mod):
loss = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.SGD()
test_layer = nn_blocks.CSPRoute(filters, filter_scale=mod)
path_layer = nn_blocks.CSPConnect(filters, filter_scale=mod)
init = tf.random_normal_initializer()
x = tf.Variable(
initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))
y = tf.Variable(
initial_value=init(
shape=(1, int(np.ceil(width // 2)), int(np.ceil(height // 2)),
filters),
dtype=tf.float32))
with tf.GradientTape() as tape:
x_hat, x_prev = test_layer(x)
x_hat = path_layer([x_hat, x_prev])
grad_loss = loss(x_hat, y)
grad = tape.gradient(grad_loss, test_layer.trainable_variables)
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad)
class CSPRouteTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('same', 224, 224, 64, 1),
('downsample', 224, 224, 64, 2))
def test_pass_through(self, width, height, filters, mod):
x = tf.keras.Input(shape=(width, height, filters))
test_layer = nn_blocks.CSPRoute(filters=filters, filter_scale=mod)
outx, _ = test_layer(x)
print(outx)
print(outx.shape.as_list())
self.assertAllEqual(
outx.shape.as_list(),
[None, np.ceil(width // 2),
np.ceil(height // 2), (filters / mod)])
@parameterized.named_parameters(('same', 224, 224, 64, 1),
('downsample', 224, 224, 128, 2))
def test_gradient_pass_though(self, filters, width, height, mod):
loss = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.SGD()
test_layer = nn_blocks.CSPRoute(filters, filter_scale=mod)
path_layer = nn_blocks.CSPConnect(filters, filter_scale=mod)
init = tf.random_normal_initializer()
x = tf.Variable(
initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))
y = tf.Variable(
initial_value=init(
shape=(1, int(np.ceil(width // 2)), int(np.ceil(height // 2)),
filters),
dtype=tf.float32))
with tf.GradientTape() as tape:
x_hat, x_prev = test_layer(x)
x_hat = path_layer([x_hat, x_prev])
grad_loss = loss(x_hat, y)
grad = tape.gradient(grad_loss, test_layer.trainable_variables)
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad)
class ConvBNTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('valid', (3, 3), 'valid', (1, 1)), ('same', (3, 3), 'same', (1, 1)),
('downsample', (3, 3), 'same', (2, 2)), ('test', (1, 1), 'valid', (1, 1)))
def test_pass_through(self, kernel_size, padding, strides):
if padding == 'same':
pad_const = 1
else:
pad_const = 0
x = tf.keras.Input(shape=(224, 224, 3))
test_layer = nn_blocks.ConvBN(
filters=64,
kernel_size=kernel_size,
padding=padding,
strides=strides,
trainable=False)
outx = test_layer(x)
print(outx.shape.as_list())
test = [
None,
int((224 - kernel_size[0] + (2 * pad_const)) / strides[0] + 1),
int((224 - kernel_size[1] + (2 * pad_const)) / strides[1] + 1), 64
]
print(test)
self.assertAllEqual(outx.shape.as_list(), test)
@parameterized.named_parameters(('filters', 3))
def test_gradient_pass_though(self, filters):
loss = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.SGD()
with tf.device('/CPU:0'):
test_layer = nn_blocks.ConvBN(filters, kernel_size=(3, 3), padding='same')
init = tf.random_normal_initializer()
x = tf.Variable(
initial_value=init(shape=(1, 224, 224, 3), dtype=tf.float32))
y = tf.Variable(
initial_value=init(shape=(1, 224, 224, filters), dtype=tf.float32))
with tf.GradientTape() as tape:
x_hat = test_layer(x)
grad_loss = loss(x_hat, y)
grad = tape.gradient(grad_loss, test_layer.trainable_variables)
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad)
class DarkResidualTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('same', 224, 224, 64, False),
('downsample', 223, 223, 32, True),
('oddball', 223, 223, 32, False))
def test_pass_through(self, width, height, filters, downsample):
mod = 1
if downsample:
mod = 2
x = tf.keras.Input(shape=(width, height, filters))
test_layer = nn_blocks.DarkResidual(filters=filters, downsample=downsample)
outx = test_layer(x)
print(outx)
print(outx.shape.as_list())
self.assertAllEqual(
outx.shape.as_list(),
[None, np.ceil(width / mod),
np.ceil(height / mod), filters])
@parameterized.named_parameters(('same', 64, 224, 224, False),
('downsample', 32, 223, 223, True),
('oddball', 32, 223, 223, False))
def test_gradient_pass_though(self, filters, width, height, downsample):
loss = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.SGD()
test_layer = nn_blocks.DarkResidual(filters, downsample=downsample)
if downsample:
mod = 2
else:
mod = 1
init = tf.random_normal_initializer()
x = tf.Variable(
initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))
y = tf.Variable(
initial_value=init(
shape=(1, int(np.ceil(width / mod)), int(np.ceil(height / mod)),
filters),
dtype=tf.float32))
with tf.GradientTape() as tape:
x_hat = test_layer(x)
grad_loss = loss(x_hat, y)
grad = tape.gradient(grad_loss, test_layer.trainable_variables)
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad)
class DarkSppTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('RouteProcessSpp', 224, 224, 3, [5, 9, 13]),
('test1', 300, 300, 10, [2, 3, 4, 5]),
('test2', 256, 256, 5, [10]))
def test_pass_through(self, width, height, channels, sizes):
x = tf.keras.Input(shape=(width, height, channels))
test_layer = nn_blocks.SPP(sizes=sizes)
outx = test_layer(x)
self.assertAllEqual(outx.shape.as_list(),
[None, width, height, channels * (len(sizes) + 1)])
return
@parameterized.named_parameters(('RouteProcessSpp', 224, 224, 3, [5, 9, 13]),
('test1', 300, 300, 10, [2, 3, 4, 5]),
('test2', 256, 256, 5, [10]))
def test_gradient_pass_though(self, width, height, channels, sizes):
loss = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.SGD()
test_layer = nn_blocks.SPP(sizes=sizes)
init = tf.random_normal_initializer()
x = tf.Variable(
initial_value=init(
shape=(1, width, height, channels), dtype=tf.float32))
y = tf.Variable(
initial_value=init(
shape=(1, width, height, channels * (len(sizes) + 1)),
dtype=tf.float32))
with tf.GradientTape() as tape:
x_hat = test_layer(x)
grad_loss = loss(x_hat, y)
grad = tape.gradient(grad_loss, test_layer.trainable_variables)
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad)
return
class DarkRouteProcessTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('test1', 224, 224, 64, 7, False), ('test2', 223, 223, 32, 3, False),
('tiny', 223, 223, 16, 1, False), ('spp', 224, 224, 64, 7, False))
def test_pass_through(self, width, height, filters, repetitions, spp):
x = tf.keras.Input(shape=(width, height, filters))
test_layer = nn_blocks.DarkRouteProcess(
filters=filters, repetitions=repetitions, insert_spp=spp)
outx = test_layer(x)
self.assertLen(outx, 2, msg='len(outx) != 2')
if repetitions == 1:
filter_y1 = filters
else:
filter_y1 = filters // 2
self.assertAllEqual(
outx[1].shape.as_list(), [None, width, height, filter_y1])
self.assertAllEqual(
filters % 2,
0,
msg='Output of a DarkRouteProcess layer has an odd number of filters')
self.assertAllEqual(outx[0].shape.as_list(), [None, width, height, filters])
@parameterized.named_parameters(
('test1', 224, 224, 64, 7, False), ('test2', 223, 223, 32, 3, False),
('tiny', 223, 223, 16, 1, False), ('spp', 224, 224, 64, 7, False))
def test_gradient_pass_though(self, width, height, filters, repetitions, spp):
loss = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.SGD()
test_layer = nn_blocks.DarkRouteProcess(
filters=filters, repetitions=repetitions, insert_spp=spp)
if repetitions == 1:
filter_y1 = filters
else:
filter_y1 = filters // 2
init = tf.random_normal_initializer()
x = tf.Variable(
initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))
y_0 = tf.Variable(
initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))
y_1 = tf.Variable(
initial_value=init(
shape=(1, width, height, filter_y1), dtype=tf.float32))
with tf.GradientTape() as tape:
x_hat_0, x_hat_1 = test_layer(x)
grad_loss_0 = loss(x_hat_0, y_0)
grad_loss_1 = loss(x_hat_1, y_1)
grad = tape.gradient([grad_loss_0, grad_loss_1],
test_layer.trainable_variables)
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad)
return
class SPPCSPCTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('SPPCSPC', 224, 224, 8, [5, 9, 13], 0.5),
('test1', 300, 300, 32, [2, 3, 4, 5], 1.0),
('test2', 256, 256, 16, [10], 2.0))
def test_pass_through(self, width, height, filters, pool_sizes, scale):
x = tf.keras.Input(shape=(width, height, filters))
test_layer = nn_blocks.SPPCSPC(filters, pool_sizes, scale)
out = test_layer(x)
self.assertAllEqual(out.shape.as_list(), [None, width, height, filters])
@parameterized.named_parameters(('SPPCSPC', 224, 224, 8, [5, 9, 13], 0.5),
('test1', 300, 300, 32, [2, 3, 4, 5], 1.0),
('test2', 256, 256, 16, [10], 2.0))
def test_gradient_pass_though(
self, width, height, filters, pool_sizes, scale):
loss = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.SGD()
test_layer = nn_blocks.SPPCSPC(filters, pool_sizes, scale)
init = tf.random_normal_initializer()
x = tf.Variable(
initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))
y = tf.Variable(
initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))
with tf.GradientTape() as tape:
x_hat = test_layer(x)
grad_loss = loss(x_hat, y)
grad = tape.gradient(grad_loss, test_layer.trainable_variables)
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad)
return
class RepConvTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('RepConv', 224, 224, 8, 1),
('test1', 300, 300, 32, 2),
('test2', 256, 256, 16, 4))
def test_pass_through(self, width, height, filters, strides):
x = tf.keras.Input(shape=(width, height, filters))
test_layer = nn_blocks.RepConv(filters, strides=strides)
out = test_layer(x)
self.assertAllEqual(out.shape.as_list(),
[None, width // strides, height // strides, filters])
@parameterized.named_parameters(('RepConv', 224, 224, 8, 1),
('test1', 300, 300, 32, 2),
('test2', 256, 256, 16, 4))
def test_gradient_pass_though(self, width, height, filters, strides):
loss = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.SGD()
test_layer = nn_blocks.RepConv(filters, strides=strides)
init = tf.random_normal_initializer()
x = tf.Variable(
initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))
y = tf.Variable(
initial_value=init(
shape=(1, width // strides, height // strides, filters),
dtype=tf.float32,
)
)
with tf.GradientTape() as tape:
x_hat = test_layer(x)
grad_loss = loss(x_hat, y)
grad = tape.gradient(grad_loss, test_layer.trainable_variables)
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad)
return
if __name__ == '__main__':
tf.test.main()
| 14,811 | 37.87664 | 80 | py |
models | models-master/official/projects/yolo/modeling/layers/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/yolo/modeling/layers/nn_blocks.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains common building blocks for yolo neural networks."""
import functools
from typing import Callable, List, Tuple
import tensorflow as tf
from official.modeling import tf_utils
from official.vision.ops import spatial_transform_ops
class Identity(tf.keras.layers.Layer):
def call(self, inputs):
return inputs
class ConvBN(tf.keras.layers.Layer):
"""ConvBN block.
Modified Convolution layer to match that of the Darknet Library.
The Layer is a standards combination of Conv BatchNorm Activation,
however, the use of bias in the conv is determined by the use of batch
normalization.
Cross Stage Partial networks (CSPNets) were proposed in:
[1] Chien-Yao Wang, Hong-Yuan Mark Liao, I-Hau Yeh, Yueh-Hua Wu,
Ping-Yang Chen, Jun-Wei Hsieh
CSPNet: A New Backbone that can Enhance Learning Capability of CNN.
arXiv:1911.11929
"""
def __init__(self,
filters=1,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
dilation_rate=(1, 1),
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
bias_regularizer=None,
kernel_regularizer=None,
use_separable_conv=False,
use_bn=True,
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='leaky',
leaky_alpha=0.1,
**kwargs):
"""ConvBN initializer.
Args:
filters: integer for output depth, or the number of features to learn.
kernel_size: integer or tuple for the shape of the weight matrix or kernel
to learn.
strides: integer of tuple how much to move the kernel after each kernel
use.
padding: string 'valid' or 'same', if same, then pad the image, else do
not.
dilation_rate: tuple to indicate how much to modulate kernel weights and
how many pixels in a feature map to skip.
kernel_initializer: string to indicate which function to use to initialize
weights.
bias_initializer: string to indicate which function to use to initialize
bias.
bias_regularizer: string to indicate which function to use to regularizer
bias.
kernel_regularizer: string to indicate which function to use to
regularizer weights.
use_separable_conv: `bool` wether to use separable convs.
use_bn: boolean for whether to use batch normalization.
use_sync_bn: boolean for whether sync batch normalization statistics
of all batch norm layers to the models global statistics
(across all input batches).
norm_momentum: float for moment to use for batch normalization.
norm_epsilon: float for batch normalization epsilon.
activation: string or None for activation function to use in layer,
if None activation is replaced by linear.
leaky_alpha: float to use as alpha if activation function is leaky.
**kwargs: Keyword Arguments.
"""
# convolution params
self._filters = filters
self._kernel_size = kernel_size
self._strides = strides
self._padding = padding
self._dilation_rate = dilation_rate
if kernel_initializer == 'VarianceScaling':
# to match pytorch initialization method
self._kernel_initializer = tf.keras.initializers.VarianceScaling(
scale=1 / 3, mode='fan_in', distribution='uniform')
else:
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
# batch normalization params
self._use_bn = use_bn
self._use_separable_conv = use_separable_conv
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
ksize = self._kernel_size
if not isinstance(ksize, List) and not isinstance(ksize, Tuple):
ksize = [ksize]
if use_separable_conv and not all([a == 1 for a in ksize]):
self._conv_base = tf.keras.layers.SeparableConv2D
else:
self._conv_base = tf.keras.layers.Conv2D
self._bn_base = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
# format: (batch_size, height, width, channels)
self._bn_axis = -1
else:
# format: (batch_size, channels, width, height)
self._bn_axis = 1
# activation params
self._activation = activation
self._leaky_alpha = leaky_alpha
self._fuse = False
super().__init__(**kwargs)
def build(self, input_shape):
use_bias = not self._use_bn
self.conv = self._conv_base(
filters=self._filters,
kernel_size=self._kernel_size,
strides=self._strides,
padding=self._padding,
dilation_rate=self._dilation_rate,
use_bias=use_bias,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
if self._use_bn:
self.bn = self._bn_base(
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
axis=self._bn_axis,
synchronized=self._use_sync_bn)
else:
self.bn = None
if self._activation == 'leaky':
self._activation_fn = tf.keras.layers.LeakyReLU(alpha=self._leaky_alpha)
elif self._activation == 'mish':
self._activation_fn = lambda x: x * tf.math.tanh(tf.math.softplus(x))
else:
self._activation_fn = tf_utils.get_activation(self._activation)
def call(self, x):
x = self.conv(x)
if self._use_bn and not self._fuse:
x = self.bn(x)
x = self._activation_fn(x)
return x
def fuse(self):
if self.bn is not None and not self._use_separable_conv:
# Fuse convolution and batchnorm, gives me +2 to 3 FPS 2ms latency.
# layers: https://tehnokv.com/posts/fusing-batchnorm-and-conv/
if self._fuse:
return
self._fuse = True
conv_weights = self.conv.get_weights()[0]
gamma, beta, moving_mean, moving_variance = self.bn.get_weights()
self.conv.use_bias = True
infilters = conv_weights.shape[-2]
self.conv.build([None, None, None, infilters])
base = tf.sqrt(self._norm_epsilon + moving_variance)
w_conv_base = tf.transpose(conv_weights, perm=(3, 2, 0, 1))
w_conv = tf.reshape(w_conv_base, [conv_weights.shape[-1], -1])
w_bn = tf.linalg.diag(gamma / base)
w_conv = tf.reshape(tf.matmul(w_bn, w_conv), w_conv_base.get_shape())
w_conv = tf.transpose(w_conv, perm=(2, 3, 1, 0))
b_bn = beta - gamma * moving_mean / base
self.conv.set_weights([w_conv, b_bn])
del self.bn
self.trainable = False
self.conv.trainable = False
self.bn = None
return
def get_config(self):
# used to store/share parameters to reconstruct the model
layer_config = {
'filters': self._filters,
'kernel_size': self._kernel_size,
'strides': self._strides,
'padding': self._padding,
'dilation_rate': self._dilation_rate,
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
'kernel_regularizer': self._kernel_regularizer,
'use_bn': self._use_bn,
'use_sync_bn': self._use_sync_bn,
'use_separable_conv': self._use_separable_conv,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._activation,
'leaky_alpha': self._leaky_alpha
}
layer_config.update(super().get_config())
return layer_config
class DarkResidual(tf.keras.layers.Layer):
"""Darknet block with Residual connection for Yolo v3 Backbone."""
def __init__(self,
filters=1,
filter_scale=2,
dilation_rate=1,
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
use_bn=True,
use_sync_bn=False,
use_separable_conv=False,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='leaky',
leaky_alpha=0.1,
sc_activation='linear',
downsample=False,
**kwargs):
"""Dark Residual initializer.
Args:
filters: integer for output depth, or the number of features to learn.
filter_scale: `int` for filter scale.
dilation_rate: tuple to indicate how much to modulate kernel weights and
how many pixels in a feature map to skip.
kernel_initializer: string to indicate which function to use to initialize
weights.
bias_initializer: string to indicate which function to use to initialize
bias.
kernel_regularizer: string to indicate which function to use to
regularizer weights.
bias_regularizer: string to indicate which function to use to regularizer
bias.
use_bn: boolean for whether to use batch normalization.
use_sync_bn: boolean for whether sync batch normalization statistics.
of all batch norm layers to the models global statistics
(across all input batches).
use_separable_conv: `bool` wether to use separable convs.
norm_momentum: float for moment to use for batch normalization.
norm_epsilon: float for batch normalization epsilon.
activation: string or None for activation function to use in layer,
if None activation is replaced by linear.
leaky_alpha: float to use as alpha if activation function is leaky.
sc_activation: string for activation function to use in layer.
downsample: boolean for if image input is larger than layer output, set
downsample to True so the dimensions are forced to match.
**kwargs: Keyword Arguments.
"""
# downsample
self._downsample = downsample
# ConvBN params
self._filters = filters
self._filter_scale = filter_scale
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._bias_regularizer = bias_regularizer
self._use_bn = use_bn
self._use_sync_bn = use_sync_bn
self._use_separable_conv = use_separable_conv
self._kernel_regularizer = kernel_regularizer
# normal params
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._dilation_rate = dilation_rate if isinstance(dilation_rate,
int) else dilation_rate[0]
# activation params
self._conv_activation = activation
self._leaky_alpha = leaky_alpha
self._sc_activation = sc_activation
super().__init__(**kwargs)
def build(self, input_shape):
dark_conv_args = {
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
'use_bn': self._use_bn,
'use_sync_bn': self._use_sync_bn,
'use_separable_conv': self._use_separable_conv,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._conv_activation,
'kernel_regularizer': self._kernel_regularizer,
'leaky_alpha': self._leaky_alpha
}
if self._downsample:
if self._dilation_rate > 1:
dilation_rate = 1
if self._dilation_rate // 2 > 0:
dilation_rate = self._dilation_rate // 2
down_stride = 1
else:
dilation_rate = 1
down_stride = 2
self._dconv = ConvBN(
filters=self._filters,
kernel_size=(3, 3),
strides=down_stride,
dilation_rate=dilation_rate,
padding='same',
**dark_conv_args)
self._conv1 = ConvBN(
filters=self._filters // self._filter_scale,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
**dark_conv_args)
self._conv2 = ConvBN(
filters=self._filters,
kernel_size=(3, 3),
strides=(1, 1),
dilation_rate=self._dilation_rate,
padding='same',
**dark_conv_args)
self._shortcut = tf.keras.layers.Add()
if self._sc_activation == 'leaky':
self._activation_fn = tf.keras.layers.LeakyReLU(alpha=self._leaky_alpha)
elif self._sc_activation == 'mish':
self._activation_fn = lambda x: x * tf.math.tanh(tf.math.softplus(x))
else:
self._activation_fn = tf_utils.get_activation(self._sc_activation)
super().build(input_shape)
def call(self, inputs, training=None):
if self._downsample:
inputs = self._dconv(inputs)
x = self._conv1(inputs)
x = self._conv2(x)
x = self._shortcut([x, inputs])
return self._activation_fn(x)
def get_config(self):
# used to store/share parameters to reconstruct the model
layer_config = {
'filters': self._filters,
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'kernel_regularizer': self._kernel_regularizer,
'dilation_rate': self._dilation_rate,
'use_bn': self._use_bn,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._conv_activation,
'leaky_alpha': self._leaky_alpha,
'sc_activation': self._sc_activation,
'downsample': self._downsample,
}
layer_config.update(super().get_config())
return layer_config
class CSPTiny(tf.keras.layers.Layer):
"""CSP Tiny layer.
A Small size convolution block proposed in the CSPNet. The layer uses
shortcuts, routing(concatnation), and feature grouping in order to improve
gradient variablity and allow for high efficency, low power residual learning
for small networtf.keras.
Cross Stage Partial networks (CSPNets) were proposed in:
[1] Chien-Yao Wang, Hong-Yuan Mark Liao, I-Hau Yeh, Yueh-Hua Wu,
Ping-Yang Chen, Jun-Wei Hsieh
CSPNet: A New Backbone that can Enhance Learning Capability of CNN.
arXiv:1911.11929
"""
def __init__(self,
filters=1,
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
bias_regularizer=None,
kernel_regularizer=None,
use_bn=True,
dilation_rate=1,
use_sync_bn=False,
use_separable_conv=False,
group_id=1,
groups=2,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='leaky',
downsample=True,
leaky_alpha=0.1,
**kwargs):
"""Initializer for CSPTiny block.
Args:
filters: integer for output depth, or the number of features to learn.
kernel_initializer: string to indicate which function to use to initialize
weights.
bias_initializer: string to indicate which function to use to initialize
bias.
bias_regularizer: string to indicate which function to use to regularizer
bias.
kernel_regularizer: string to indicate which function to use to
regularizer weights.
use_bn: boolean for whether to use batch normalization.
dilation_rate: `int`, dilation rate for conv layers.
use_sync_bn: boolean for whether sync batch normalization statistics
of all batch norm layers to the models global statistics
(across all input batches).
use_separable_conv: `bool` wether to use separable convs.
group_id: integer for which group of features to pass through the csp
tiny stack.
groups: integer for how many splits there should be in the convolution
feature stack output.
norm_momentum: float for moment to use for batch normalization.
norm_epsilon: float for batch normalization epsilon.
activation: string or None for activation function to use in layer,
if None activation is replaced by linear.
downsample: boolean for if image input is larger than layer output, set
downsample to True so the dimensions are forced to match.
leaky_alpha: float to use as alpha if activation function is leaky.
**kwargs: Keyword Arguments.
"""
# ConvBN params
self._filters = filters
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._bias_regularizer = bias_regularizer
self._use_bn = use_bn
self._dilation_rate = dilation_rate
self._use_sync_bn = use_sync_bn
self._use_separable_conv = use_separable_conv
self._kernel_regularizer = kernel_regularizer
self._groups = groups
self._group_id = group_id
self._downsample = downsample
# normal params
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
# activation params
self._conv_activation = activation
self._leaky_alpha = leaky_alpha
super().__init__(**kwargs)
def build(self, input_shape):
dark_conv_args = {
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'use_bn': self._use_bn,
'use_sync_bn': self._use_sync_bn,
'use_separable_conv': self._use_separable_conv,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._conv_activation,
'leaky_alpha': self._leaky_alpha
}
self._convlayer1 = ConvBN(
filters=self._filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
**dark_conv_args)
self._convlayer2 = ConvBN(
filters=self._filters // 2,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
**dark_conv_args)
self._convlayer3 = ConvBN(
filters=self._filters // 2,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
**dark_conv_args)
self._convlayer4 = ConvBN(
filters=self._filters,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
**dark_conv_args)
if self._downsample:
self._maxpool = tf.keras.layers.MaxPool2D(
pool_size=2, strides=2, padding='same', data_format=None)
super().build(input_shape)
def call(self, inputs, training=None):
x1 = self._convlayer1(inputs)
x1_group = tf.split(x1, self._groups, axis=-1)[self._group_id]
x2 = self._convlayer2(x1_group) # grouping
x3 = self._convlayer3(x2)
x4 = tf.concat([x3, x2], axis=-1) # csp partial using grouping
x5 = self._convlayer4(x4)
x = tf.concat([x1, x5], axis=-1) # csp connect
if self._downsample:
x = self._maxpool(x)
return x, x5
class CSPRoute(tf.keras.layers.Layer):
"""CSPRoute block.
Down sampling layer to take the place of down sampleing done in Residual
networks. This is the first of 2 layers needed to convert any Residual Network
model to a CSPNet. At the start of a new level change, this CSPRoute layer
creates a learned identity that will act as a cross stage connection,
that is used to inform the inputs to the next stage. It is called cross stage
partial because the number of filters required in every intermitent Residual
layer is reduced by half. The sister layer will take the partial generated by
this layer and concatnate it with the output of the final residual layer in
the stack to create a fully feature level output. This concatnation merges the
partial blocks of 2 levels as input to the next allowing the gradients of each
level to be more unique, and reducing the number of parameters required by
each level by 50% while keeping accuracy consistent.
Cross Stage Partial networks (CSPNets) were proposed in:
[1] Chien-Yao Wang, Hong-Yuan Mark Liao, I-Hau Yeh, Yueh-Hua Wu,
Ping-Yang Chen, Jun-Wei Hsieh
CSPNet: A New Backbone that can Enhance Learning Capability of CNN.
arXiv:1911.11929
"""
def __init__(self,
filters,
filter_scale=2,
activation='mish',
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
bias_regularizer=None,
kernel_regularizer=None,
dilation_rate=1,
use_bn=True,
use_sync_bn=False,
use_separable_conv=False,
norm_momentum=0.99,
norm_epsilon=0.001,
downsample=True,
leaky_alpha=0.1,
**kwargs):
"""CSPRoute layer initializer.
Args:
filters: integer for output depth, or the number of features to learn
filter_scale: integer dictating (filters//2) or the number of filters in
the partial feature stack.
activation: string for activation function to use in layer.
kernel_initializer: string to indicate which function to use to
initialize weights.
bias_initializer: string to indicate which function to use to initialize
bias.
bias_regularizer: string to indicate which function to use to regularizer
bias.
kernel_regularizer: string to indicate which function to use to
regularizer weights.
dilation_rate: dilation rate for conv layers.
use_bn: boolean for whether to use batch normalization.
use_sync_bn: boolean for whether sync batch normalization statistics
of all batch norm layers to the models global statistics
(across all input batches).
use_separable_conv: `bool` wether to use separable convs.
norm_momentum: float for moment to use for batch normalization.
norm_epsilon: float for batch normalization epsilon.
downsample: down_sample the input.
leaky_alpha: `float`, for leaky alpha value.
**kwargs: Keyword Arguments.
"""
super().__init__(**kwargs)
# layer params
self._filters = filters
self._filter_scale = filter_scale
self._activation = activation
# convoultion params
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._dilation_rate = dilation_rate
self._use_bn = use_bn
self._use_sync_bn = use_sync_bn
self._use_separable_conv = use_separable_conv
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._downsample = downsample
self._leaky_alpha = leaky_alpha
def build(self, input_shape):
dark_conv_args = {
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
'use_bn': self._use_bn,
'use_sync_bn': self._use_sync_bn,
'use_separable_conv': self._use_separable_conv,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._activation,
'kernel_regularizer': self._kernel_regularizer,
'leaky_alpha': self._leaky_alpha,
}
if self._downsample:
if self._dilation_rate > 1:
dilation_rate = 1
if self._dilation_rate // 2 > 0:
dilation_rate = self._dilation_rate // 2
down_stride = 1
else:
dilation_rate = 1
down_stride = 2
self._conv1 = ConvBN(
filters=self._filters,
kernel_size=(3, 3),
strides=down_stride,
dilation_rate=dilation_rate,
**dark_conv_args)
self._conv2 = ConvBN(
filters=self._filters // self._filter_scale,
kernel_size=(1, 1),
strides=(1, 1),
**dark_conv_args)
self._conv3 = ConvBN(
filters=self._filters // self._filter_scale,
kernel_size=(1, 1),
strides=(1, 1),
**dark_conv_args)
def call(self, inputs, training=None):
if self._downsample:
inputs = self._conv1(inputs)
y = self._conv2(inputs)
x = self._conv3(inputs)
return (x, y)
class CSPConnect(tf.keras.layers.Layer):
"""CSPConnect block.
Sister Layer to the CSPRoute layer. Merges the partial feature stacks
generated by the CSPDownsampling layer, and the finaly output of the
residual stack. Suggested in the CSPNet paper.
Cross Stage Partial networks (CSPNets) were proposed in:
[1] Chien-Yao Wang, Hong-Yuan Mark Liao, I-Hau Yeh, Yueh-Hua Wu,
Ping-Yang Chen, Jun-Wei Hsieh
CSPNet: A New Backbone that can Enhance Learning Capability of CNN.
arXiv:1911.11929
"""
def __init__(self,
filters,
filter_scale=2,
drop_final=False,
drop_first=False,
activation='mish',
kernel_size=(1, 1),
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
bias_regularizer=None,
kernel_regularizer=None,
dilation_rate=1,
use_bn=True,
use_sync_bn=False,
use_separable_conv=False,
norm_momentum=0.99,
norm_epsilon=0.001,
leaky_alpha=0.1,
**kwargs):
"""Initializer for CSPConnect block.
Args:
filters: integer for output depth, or the number of features to learn.
filter_scale: integer dictating (filters//2) or the number of filters in
the partial feature stack.
drop_final: `bool`, whether to drop final conv layer.
drop_first: `bool`, whether to drop first conv layer.
activation: string for activation function to use in layer.
kernel_size: `Tuple`, kernel size for conv layers.
kernel_initializer: string to indicate which function to use to initialize
weights.
bias_initializer: string to indicate which function to use to initialize
bias.
bias_regularizer: string to indicate which function to use to regularizer
bias.
kernel_regularizer: string to indicate which function to use to
regularizer weights.
dilation_rate: `int`, dilation rate for conv layers.
use_bn: boolean for whether to use batch normalization.
use_sync_bn: boolean for whether sync batch normalization statistics
of all batch norm layers to the models global
statistics (across all input batches).
use_separable_conv: `bool` wether to use separable convs.
norm_momentum: float for moment to use for batch normalization.
norm_epsilon: float for batch normalization epsilon.
leaky_alpha: `float`, for leaky alpha value.
**kwargs: Keyword Arguments.
"""
super().__init__(**kwargs)
# layer params
self._filters = filters
self._filter_scale = filter_scale
self._activation = activation
# convoultion params
self._kernel_size = kernel_size
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._use_bn = use_bn
self._use_sync_bn = use_sync_bn
self._use_separable_conv = use_separable_conv
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._drop_final = drop_final
self._drop_first = drop_first
self._leaky_alpha = leaky_alpha
def build(self, input_shape):
dark_conv_args = {
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
'use_bn': self._use_bn,
'use_sync_bn': self._use_sync_bn,
'use_separable_conv': self._use_separable_conv,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._activation,
'kernel_regularizer': self._kernel_regularizer,
'leaky_alpha': self._leaky_alpha,
}
if not self._drop_first:
self._conv1 = ConvBN(
filters=self._filters // self._filter_scale,
kernel_size=self._kernel_size,
strides=(1, 1),
**dark_conv_args)
self._concat = tf.keras.layers.Concatenate(axis=-1)
if not self._drop_final:
self._conv2 = ConvBN(
filters=self._filters,
kernel_size=(1, 1),
strides=(1, 1),
**dark_conv_args)
def call(self, inputs, training=None):
x_prev, x_csp = inputs
if not self._drop_first:
x_prev = self._conv1(x_prev)
x = self._concat([x_prev, x_csp])
# skipped if drop final is true
if not self._drop_final:
x = self._conv2(x)
return x
class CSPStack(tf.keras.layers.Layer):
"""CSP Stack layer.
CSP full stack, combines the route and the connect in case you dont want to
jsut quickly wrap an existing callable or list of layers to
make it a cross stage partial. Added for ease of use. you should be able
to wrap any layer stack with a CSP independent of wether it belongs
to the Darknet family. if filter_scale = 2, then the blocks in the stack
passed into the CSP stack should also have filters = filters/filter_scale
Cross Stage Partial networks (CSPNets) were proposed in:
[1] Chien-Yao Wang, Hong-Yuan Mark Liao, I-Hau Yeh, Yueh-Hua Wu,
Ping-Yang Chen, Jun-Wei Hsieh
CSPNet: A New Backbone that can Enhance Learning Capability of CNN.
arXiv:1911.11929
"""
def __init__(self,
filters,
model_to_wrap=None,
filter_scale=2,
activation='mish',
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
bias_regularizer=None,
kernel_regularizer=None,
downsample=True,
use_bn=True,
use_sync_bn=False,
use_separable_conv=False,
norm_momentum=0.99,
norm_epsilon=0.001,
**kwargs):
"""CSPStack layer initializer.
Args:
filters: filter size for conv layers.
model_to_wrap: callable Model or a list of callable objects that will
process the output of CSPRoute, and be input into CSPConnect. list will
be called sequentially.
filter_scale: integer dictating (filters//2) or the number of filters in
the partial feature stack.
activation: string for activation function to use in layer.
kernel_initializer: string to indicate which function to use to initialize
weights.
bias_initializer: string to indicate which function to use to initialize
bias.
bias_regularizer: string to indicate which function to use to regularizer
bias.
kernel_regularizer: string to indicate which function to use to
regularizer weights.
downsample: down_sample the input.
use_bn: boolean for whether to use batch normalization.
use_sync_bn: boolean for whether sync batch normalization statistics of
all batch norm layers to the models global statistics (across all input
batches).
use_separable_conv: `bool` wether to use separable convs.
norm_momentum: float for moment to use for batch normalization.
norm_epsilon: float for batch normalization epsilon.
**kwargs: Keyword Arguments.
Raises:
TypeError: model_to_wrap is not a layer or a list of layers
"""
super().__init__(**kwargs)
# layer params
self._filters = filters
self._filter_scale = filter_scale
self._activation = activation
self._downsample = downsample
# convoultion params
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._use_bn = use_bn
self._use_sync_bn = use_sync_bn
self._use_separable_conv = use_separable_conv
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
if model_to_wrap is None:
self._model_to_wrap = []
elif isinstance(model_to_wrap, Callable):
self._model_to_wrap = [model_to_wrap]
elif isinstance(model_to_wrap, List):
self._model_to_wrap = model_to_wrap
else:
raise TypeError(
'the input to the CSPStack must be a list of layers that we can' +
'iterate through, or \n a callable')
def build(self, input_shape):
dark_conv_args = {
'filters': self._filters,
'filter_scale': self._filter_scale,
'activation': self._activation,
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
'use_bn': self._use_bn,
'use_sync_bn': self._use_sync_bn,
'use_separable_conv': self._use_separable_conv,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'kernel_regularizer': self._kernel_regularizer,
}
self._route = CSPRoute(downsample=self._downsample, **dark_conv_args)
self._connect = CSPConnect(**dark_conv_args)
def call(self, inputs, training=None):
x, x_route = self._route(inputs)
for layer in self._model_to_wrap:
x = layer(x)
x = self._connect([x, x_route])
return x
class PathAggregationBlock(tf.keras.layers.Layer):
"""Path Aggregation block."""
def __init__(self,
filters=1,
drop_final=True,
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
bias_regularizer=None,
kernel_regularizer=None,
use_bn=True,
use_sync_bn=False,
use_separable_conv=False,
inverted=False,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='leaky',
leaky_alpha=0.1,
downsample=False,
upsample=False,
upsample_size=2,
**kwargs):
"""Initializer for path aggregation block.
Args:
filters: integer for output depth, or the number of features to learn.
drop_final: do not create the last convolution block.
kernel_initializer: string to indicate which function to use to initialize
weights.
bias_initializer: string to indicate which function to use to initialize
bias.
bias_regularizer: string to indicate which function to use to regularizer
bias.
kernel_regularizer: string to indicate which function to use to
regularizer weights.
use_bn: boolean for whether to use batch normalization.
use_sync_bn: boolean for whether sync batch normalization statistics
of all batch norm layers to the models global statistics
(across all input batches).
use_separable_conv: `bool` wether to use separable convs.
inverted: boolean for inverting the order of the convolutions.
norm_momentum: float for moment to use for batch normalization.
norm_epsilon: float for batch normalization epsilon.
activation: string or None for activation function to use in layer,
if None activation is replaced by linear.
leaky_alpha: float to use as alpha if activation function is leaky.
downsample: `bool` for whehter to downwample and merge.
upsample: `bool` for whehter to upsample and merge.
upsample_size: `int` how much to upsample in order to match shapes.
**kwargs: Keyword Arguments.
"""
# Darkconv params
self._filters = filters
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._bias_regularizer = bias_regularizer
self._kernel_regularizer = kernel_regularizer
self._use_bn = use_bn
self._use_sync_bn = use_sync_bn
self._use_separable_conv = use_separable_conv
# Normal params
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
# Activation params
self._conv_activation = activation
self._leaky_alpha = leaky_alpha
self._downsample = downsample
self._upsample = upsample
self._upsample_size = upsample_size
self._drop_final = drop_final
# Block params
self._inverted = inverted
super().__init__(**kwargs)
def _build_regular(self, input_shape, kwargs):
if self._downsample:
self._conv = ConvBN(
filters=self._filters,
kernel_size=(3, 3),
strides=(2, 2),
padding='same',
**kwargs)
else:
self._conv = ConvBN(
filters=self._filters,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
**kwargs)
if not self._drop_final:
self._conv_concat = ConvBN(
filters=self._filters,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
**kwargs)
def _build_reversed(self, input_shape, kwargs):
if self._downsample:
self._conv_prev = ConvBN(
filters=self._filters,
kernel_size=(3, 3),
strides=(2, 2),
padding='same',
**kwargs)
else:
self._conv_prev = ConvBN(
filters=self._filters,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
**kwargs)
self._conv_route = ConvBN(
filters=self._filters,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
**kwargs)
if not self._drop_final:
self._conv_sync = ConvBN(
filters=self._filters,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
**kwargs)
def build(self, input_shape):
dark_conv_args = {
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
'use_bn': self._use_bn,
'use_sync_bn': self._use_sync_bn,
'use_separable_conv': self._use_separable_conv,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._conv_activation,
'kernel_regularizer': self._kernel_regularizer,
'leaky_alpha': self._leaky_alpha,
}
if self._inverted:
self._build_reversed(input_shape, dark_conv_args)
else:
self._build_regular(input_shape, dark_conv_args)
self._concat = tf.keras.layers.Concatenate()
super().build(input_shape)
def _call_regular(self, inputs, training=None):
input_to_convolve, input_to_concat = inputs
x_prev = self._conv(input_to_convolve)
if self._upsample:
x_prev = spatial_transform_ops.nearest_upsampling(x_prev,
self._upsample_size)
x = self._concat([x_prev, input_to_concat])
# used in csp conversion
if not self._drop_final:
x = self._conv_concat(x)
return x_prev, x
def _call_reversed(self, inputs, training=None):
x_route, x_prev = inputs
x_prev = self._conv_prev(x_prev)
if self._upsample:
x_prev = spatial_transform_ops.nearest_upsampling(x_prev,
self._upsample_size)
x_route = self._conv_route(x_route)
x = self._concat([x_route, x_prev])
if not self._drop_final:
x = self._conv_sync(x)
return x_prev, x
def call(self, inputs, training=None):
# done this way to prevent confusion in the auto graph
if self._inverted:
return self._call_reversed(inputs, training=training)
else:
return self._call_regular(inputs, training=training)
class SPP(tf.keras.layers.Layer):
"""Spatial Pyramid Pooling.
A non-agregated SPP layer that uses Pooling.
"""
def __init__(self, sizes, **kwargs):
self._sizes = list(reversed(sizes))
if not sizes:
raise ValueError('More than one maxpool should be specified in SPP block')
super().__init__(**kwargs)
def build(self, input_shape):
maxpools = []
for size in self._sizes:
maxpools.append(
tf.keras.layers.MaxPool2D(
pool_size=(size, size),
strides=(1, 1),
padding='same',
data_format=None))
self._maxpools = maxpools
super().build(input_shape)
def call(self, inputs, training=None):
outputs = []
for maxpool in self._maxpools:
outputs.append(maxpool(inputs))
outputs.append(inputs)
concat_output = tf.keras.layers.concatenate(outputs)
return concat_output
def get_config(self):
layer_config = {'sizes': self._sizes}
layer_config.update(super().get_config())
return layer_config
class SAM(tf.keras.layers.Layer):
"""Spatial Attention Model.
[1] Sanghyun Woo, Jongchan Park, Joon-Young Lee, In So Kweon
CBAM: Convolutional Block Attention Module. arXiv:1807.06521
implementation of the Spatial Attention Model (SAM)
"""
def __init__(self,
use_pooling=False,
filter_match=False,
filters=1,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
dilation_rate=(1, 1),
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
bias_regularizer=None,
kernel_regularizer=None,
use_bn=True,
use_sync_bn=True,
use_separable_conv=False,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='sigmoid',
output_activation=None,
leaky_alpha=0.1,
**kwargs):
# use_pooling
self._use_pooling = use_pooling
self._filters = filters
self._output_activation = output_activation
self._leaky_alpha = leaky_alpha
self.dark_conv_args = {
'kernel_size': kernel_size,
'strides': strides,
'padding': padding,
'dilation_rate': dilation_rate,
'kernel_initializer': kernel_initializer,
'bias_initializer': bias_initializer,
'bias_regularizer': bias_regularizer,
'use_bn': use_bn,
'use_sync_bn': use_sync_bn,
'use_separable_conv': use_separable_conv,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'activation': activation,
'kernel_regularizer': kernel_regularizer,
'leaky_alpha': leaky_alpha
}
super().__init__(**kwargs)
def build(self, input_shape):
if self._filters == -1:
self._filters = input_shape[-1]
self._conv = ConvBN(filters=self._filters, **self.dark_conv_args)
if self._output_activation == 'leaky':
self._activation_fn = tf.keras.layers.LeakyReLU(alpha=self._leaky_alpha)
elif self._output_activation == 'mish':
self._activation_fn = lambda x: x * tf.math.tanh(tf.math.softplus(x))
else:
self._activation_fn = tf_utils.get_activation(self._output_activation)
def call(self, inputs, training=None):
if self._use_pooling:
depth_max = tf.reduce_max(inputs, axis=-1, keepdims=True)
depth_avg = tf.reduce_mean(inputs, axis=-1, keepdims=True)
input_maps = tf.concat([depth_avg, depth_max], axis=-1)
else:
input_maps = inputs
attention_mask = self._conv(input_maps)
return self._activation_fn(inputs * attention_mask)
class CAM(tf.keras.layers.Layer):
"""Channel Attention Model.
[1] Sanghyun Woo, Jongchan Park, Joon-Young Lee, In So Kweon
CBAM: Convolutional Block Attention Module. arXiv:1807.06521
Implementation of the Channel Attention Model (CAM)
"""
def __init__(self,
reduction_ratio=1.0,
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
bias_regularizer=None,
kernel_regularizer=None,
use_bn=False,
use_sync_bn=False,
use_bias=False,
norm_momentum=0.99,
norm_epsilon=0.001,
mlp_activation='linear',
activation='sigmoid',
leaky_alpha=0.1,
**kwargs):
self._reduction_ratio = reduction_ratio
if not use_bn:
self._bn = Identity
self._bn_args = {}
else:
self._bn = functools.partial(
tf.keras.layers.BatchNormalization, synchronized=use_sync_bn)
self._bn_args = {
'momentum': norm_momentum,
'epsilon': norm_epsilon,
}
self._mlp_args = {
'use_bias': use_bias,
'kernel_initializer': kernel_initializer,
'bias_initializer': bias_initializer,
'bias_regularizer': bias_regularizer,
'activation': mlp_activation,
'kernel_regularizer': kernel_regularizer,
}
self._leaky_alpha = leaky_alpha
self._activation = activation
super().__init__(**kwargs)
def build(self, input_shape):
self._filters = input_shape[-1]
self._mlp = tf.keras.Sequential([
tf.keras.layers.Dense(self._filters, **self._mlp_args),
self._bn(**self._bn_args),
tf.keras.layers.Dense(
int(self._filters * self._reduction_ratio), **self._mlp_args),
self._bn(**self._bn_args),
tf.keras.layers.Dense(self._filters, **self._mlp_args),
self._bn(**self._bn_args),
])
if self._activation == 'leaky':
self._activation_fn = tf.keras.layers.LeakyReLU(alpha=self._leaky_alpha)
elif self._activation == 'mish':
self._activation_fn = lambda x: x * tf.math.tanh(tf.math.softplus(x))
else:
self._activation_fn = tf_utils.get_activation(self._activation)
def call(self, inputs, training=None):
depth_max = self._mlp(tf.reduce_max(inputs, axis=(1, 2)))
depth_avg = self._mlp(tf.reduce_mean(inputs, axis=(1, 2)))
channel_mask = self._activation_fn(depth_avg + depth_max)
channel_mask = tf.expand_dims(channel_mask, axis=1)
attention_mask = tf.expand_dims(channel_mask, axis=1)
return inputs * attention_mask
class CBAM(tf.keras.layers.Layer):
"""Convolutional Block Attention Module.
[1] Sanghyun Woo, Jongchan Park, Joon-Young Lee, In So Kweon
CBAM: Convolutional Block Attention Module. arXiv:1807.06521
implementation of the Convolution Block Attention Module (CBAM)
"""
def __init__(self,
use_pooling=False,
filters=1,
reduction_ratio=1.0,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
dilation_rate=(1, 1),
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
bias_regularizer=None,
kernel_regularizer=None,
use_bn=True,
use_sync_bn=False,
use_separable_conv=False,
norm_momentum=0.99,
norm_epsilon=0.001,
mlp_activation=None,
activation='sigmoid',
leaky_alpha=0.1,
**kwargs):
# use_pooling
self._sam_args = {
'use_pooling': use_pooling,
'filters': filters,
'kernel_size': kernel_size,
'strides': strides,
'padding': padding,
'dilation_rate': dilation_rate,
'use_separable_conv': use_separable_conv,
}
self._cam_args = {
'reduction_ratio': reduction_ratio,
'mlp_activation': mlp_activation
}
self._common_args = {
'kernel_initializer': kernel_initializer,
'bias_initializer': bias_initializer,
'bias_regularizer': bias_regularizer,
'use_bn': use_bn,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'activation': activation,
'kernel_regularizer': kernel_regularizer,
'leaky_alpha': leaky_alpha
}
self._cam_args.update(self._common_args)
self._sam_args.update(self._common_args)
super().__init__(**kwargs)
def build(self, input_shape):
self._cam = CAM(**self._cam_args)
self._sam = SAM(**self._sam_args)
def call(self, inputs, training=None):
return self._sam(self._cam(inputs))
class DarkRouteProcess(tf.keras.layers.Layer):
"""Dark Route Process block.
Process darknet outputs and connect back bone to head more generalizably
Abstracts repetition of DarkConv objects that is common in YOLO.
It is used like the following:
x = ConvBN(1024, (3, 3), (1, 1))(x)
proc = DarkRouteProcess(filters = 1024,
repetitions = 3,
insert_spp = False)(x)
"""
def __init__(self,
filters=2,
repetitions=2,
insert_spp=False,
insert_sam=False,
insert_cbam=False,
csp_stack=0,
csp_scale=2,
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
bias_regularizer=None,
kernel_regularizer=None,
use_sync_bn=False,
use_separable_conv=False,
norm_momentum=0.99,
norm_epsilon=0.001,
block_invert=False,
activation='leaky',
leaky_alpha=0.1,
spp_keys=None,
**kwargs):
"""DarkRouteProcess initializer.
Args:
filters: the number of filters to be used in all subsequent layers
filters should be the depth of the tensor input into this layer,
as no downsampling can be done within this layer object.
repetitions: number of times to repeat the processign nodes.
for tiny: 1 repition, no spp allowed.
for spp: insert_spp = True, and allow for 6 repetitions.
for regular: insert_spp = False, and allow for 6 repetitions.
insert_spp: bool if true add the spatial pyramid pooling layer.
insert_sam: bool if true add spatial attention module to path.
insert_cbam: bool if true add convolutional block attention
module to path.
csp_stack: int for the number of sequential layers from 0
to <value> you would like to convert into a Cross Stage
Partial(csp) type.
csp_scale: int for how much to down scale the number of filters
only for the csp layers in the csp section of the processing
path. A value 2 indicates that each layer that is int eh CSP
stack will have filters = filters/2.
kernel_initializer: method to use to initialize kernel weights.
bias_initializer: method to use to initialize the bias of the conv
layers.
bias_regularizer: string to indicate which function to use to regularizer
bias.
kernel_regularizer: string to indicate which function to use to
regularizer weights.
use_sync_bn: bool if true use the sync batch normalization.
use_separable_conv: `bool` wether to use separable convs.
norm_momentum: batch norm parameter see Tensorflow documentation.
norm_epsilon: batch norm parameter see Tensorflow documentation.
block_invert: bool use for switching between the even and odd
repretions of layers. usually the repetition is based on a
3x3 conv with filters, followed by a 1x1 with filters/2 with
an even number of repetitions to ensure each 3x3 gets a 1x1
sqeeze. block invert swaps the 3x3/1 1x1/2 to a 1x1/2 3x3/1
ordering typically used when the model requires an odd number
of repetiitions. All other peramters maintain their affects
activation: activation function to use in processing.
leaky_alpha: if leaky acitivation function, the alpha to use in
processing the relu input.
spp_keys: List[int] of the sampling levels to be applied by
the Spatial Pyramid Pooling Layer. By default it is
[5, 9, 13] inidicating a 5x5 pooling followed by 9x9
followed by 13x13 then followed by the standard concatnation
and convolution.
**kwargs: Keyword Arguments.
"""
super().__init__(**kwargs)
# darkconv params
self._filters = filters
self._use_sync_bn = use_sync_bn
self._use_separable_conv = use_separable_conv
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._bias_regularizer = bias_regularizer
self._kernel_regularizer = kernel_regularizer
# normal params
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
# activation params
self._activation = activation
self._leaky_alpha = leaky_alpha
repetitions += (2 * int(insert_spp))
if repetitions == 1:
block_invert = True
self._repetitions = repetitions
self.layer_list, self.outputs = self._get_base_layers()
if csp_stack > 0:
self._csp_scale = csp_scale
csp_stack += (2 * int(insert_spp))
self._csp_filters = lambda x: x // csp_scale
self._convert_csp(self.layer_list, self.outputs, csp_stack)
block_invert = False
self._csp_stack = csp_stack
if block_invert:
self._conv1_filters = lambda x: x
self._conv2_filters = lambda x: x // 2
self._conv1_kernel = (3, 3)
self._conv2_kernel = (1, 1)
else:
self._conv1_filters = lambda x: x // 2
self._conv2_filters = lambda x: x
self._conv1_kernel = (1, 1)
self._conv2_kernel = (3, 3)
# insert SPP will always add to the total nuber of layer, never replace
if insert_spp:
self._spp_keys = spp_keys if spp_keys is not None else [5, 9, 13]
self.layer_list = self._insert_spp(self.layer_list)
if repetitions > 1:
self.outputs[-2] = True
if insert_sam:
self.layer_list = self._insert_sam(self.layer_list, self.outputs)
self._repetitions += 1
self.outputs[-1] = True
def _get_base_layers(self):
layer_list = []
outputs = []
for i in range(self._repetitions):
layers = ['conv1'] * ((i + 1) % 2) + ['conv2'] * (i % 2)
layer_list.extend(layers)
outputs = [False] + outputs
return layer_list, outputs
def _insert_spp(self, layer_list):
if len(layer_list) <= 3:
layer_list[1] = 'spp'
else:
layer_list[3] = 'spp'
return layer_list
def _convert_csp(self, layer_list, outputs, csp_stack_size):
layer_list[0] = 'csp_route'
layer_list.insert(csp_stack_size - 1, 'csp_connect')
outputs.insert(csp_stack_size - 1, False)
return layer_list, outputs
def _insert_sam(self, layer_list, outputs):
if len(layer_list) >= 2 and layer_list[-2] != 'spp':
layer_list.insert(-2, 'sam')
outputs.insert(-1, True)
else:
layer_list.insert(-1, 'sam')
outputs.insert(-1, False)
return layer_list
def _conv1(self, filters, kwargs, csp=False):
if csp:
filters_ = self._csp_filters
else:
filters_ = self._conv1_filters
x1 = ConvBN(
filters=filters_(filters),
kernel_size=self._conv1_kernel,
strides=(1, 1),
padding='same',
use_bn=True,
**kwargs)
return x1
def _conv2(self, filters, kwargs, csp=False):
if csp:
filters_ = self._csp_filters
else:
filters_ = self._conv2_filters
x1 = ConvBN(
filters=filters_(filters),
kernel_size=self._conv2_kernel,
strides=(1, 1),
padding='same',
use_bn=True,
**kwargs)
return x1
def _csp_route(self, filters, kwargs):
x1 = CSPRoute(
filters=filters,
filter_scale=self._csp_scale,
downsample=False,
**kwargs)
return x1
def _csp_connect(self, filters, kwargs):
x1 = CSPConnect(filters=filters, drop_final=True, drop_first=True, **kwargs)
return x1
def _spp(self, filters, kwargs):
x1 = SPP(self._spp_keys)
return x1
def _sam(self, filters, kwargs):
x1 = SAM(filters=-1, use_pooling=False, use_bn=True, **kwargs)
return x1
def build(self, input_shape):
dark_conv_args = {
'activation': self._activation,
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
'use_sync_bn': self._use_sync_bn,
'use_separable_conv': self._use_separable_conv,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'kernel_regularizer': self._kernel_regularizer,
'leaky_alpha': self._leaky_alpha,
}
csp = False
self.layers = []
for layer in self.layer_list:
if layer == 'csp_route':
self.layers.append(self._csp_route(self._filters, dark_conv_args))
csp = True
elif layer == 'csp_connect':
self.layers.append(self._csp_connect(self._filters, dark_conv_args))
csp = False
elif layer == 'conv1':
self.layers.append(self._conv1(self._filters, dark_conv_args, csp=csp))
elif layer == 'conv2':
self.layers.append(self._conv2(self._filters, dark_conv_args, csp=csp))
elif layer == 'spp':
self.layers.append(self._spp(self._filters, dark_conv_args))
elif layer == 'sam':
self.layers.append(self._sam(-1, dark_conv_args))
self._lim = len(self.layers)
super().build(input_shape)
def _call_regular(self, inputs, training=None):
# check efficiency
x = inputs
x_prev = x
output_prev = True
for (layer, output) in zip(self.layers, self.outputs):
if output_prev:
x_prev = x
x = layer(x)
output_prev = output
return x_prev, x
def _call_csp(self, inputs, training=None):
# check efficiency
x = inputs
x_prev = x
output_prev = True
x_route = None
for i, (layer, output) in enumerate(zip(self.layers, self.outputs)):
if output_prev:
x_prev = x
if i == 0:
x, x_route = layer(x)
elif i == self._csp_stack - 1:
x = layer([x, x_route])
else:
x = layer(x)
output_prev = output
return x_prev, x
def call(self, inputs, training=None):
if self._csp_stack > 0:
return self._call_csp(inputs, training=training)
else:
return self._call_regular(inputs)
class Reorg(tf.keras.layers.Layer):
"""Splits a high resolution image into 4 lower resolution images.
Used in YOLOR to process very high resolution inputs efficiently.
for example an input image of [1280, 1280, 3] will become [640, 640, 12],
the images are sampled in such a way that the spatial resoltion is
retained.
"""
def call(self, x, training=None):
return tf.concat([
x[..., ::2, ::2, :], x[..., 1::2, ::2, :], x[..., ::2, 1::2, :],
x[..., 1::2, 1::2, :]
],
axis=-1)
class SPPCSPC(tf.keras.layers.Layer):
"""Cross-stage partial network with spatial pyramid pooling.
This module is used in YOLOv7 to process backbone feature at the highest
level. SPPCSPC uses fusion-first CSP block and it uses SPP within
the dense block.
"""
def __init__(
self,
filters,
pool_sizes=(5, 9, 13),
scale=0.5,
kernel_initializer='VarianceScaling',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
use_separable_conv=False,
use_bn=True,
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='swish',
**kwargs):
"""Initializes SPPCSPC block.
Args:
filters: an `int` for filters used in Conv2D.
pool_sizes: a tuple of `int` for maxpool layer used in the dense block.
scale: a `float` scale that applies on the filters to determine the
internal Conv2D filters within CSP block.
kernel_initializer: string to indicate which function to use to initialize
weights in Conv2D.
bias_initializer: string to indicate which function to use to initialize
bias.
kernel_regularizer: string to indicate which function to use to
regularizer weights in Conv2D.
bias_regularizer: string to indicate which function to use to regularizer
bias.
use_separable_conv: `bool` wether to use separable convs.
use_bn: boolean for whether to use batch normalization.
use_sync_bn: boolean for whether sync batch normalization statistics
of all batch norm layers to the models global statistics
(across all input batches).
norm_momentum: float for moment to use for batch normalization.
norm_epsilon: float for batch normalization epsilon.
activation: string to indicate the activation function used after each
Conv2D.
**kwargs: other keyword arguments.
"""
super().__init__(**kwargs)
self._filters = filters
self._pool_sizes = pool_sizes
self._scale = scale
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._use_separable_conv = use_separable_conv
self._use_bn = use_bn
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._activation = activation
def build(self, input_shape):
filters = self._filters * 2 * self._scale
conv_op = functools.partial(
ConvBN,
activation=self._activation,
use_separable_conv=self._use_separable_conv,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_initializer=self._bias_initializer,
bias_regularizer=self._bias_regularizer,
use_bn=self._use_bn,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
)
self._conv1_1 = conv_op(filters, kernel_size=1, strides=1)
self._conv1_2 = conv_op(filters, kernel_size=3, strides=1)
self._conv1_3 = conv_op(filters, kernel_size=1, strides=1)
self._poolings = [
tf.keras.layers.MaxPooling2D(pool_size, strides=1, padding='same')
for pool_size in self._pool_sizes
]
self._conv1_4 = conv_op(filters, kernel_size=1, strides=1)
self._conv1_5 = conv_op(filters, kernel_size=3, strides=1)
self._conv2_1 = conv_op(filters, kernel_size=1, strides=1)
self._merge_conv = conv_op(self._filters, kernel_size=1, strides=1)
super().build(input_shape)
def call(self, inputs, training=None):
x = self._conv1_3(self._conv1_2(self._conv1_1(inputs)))
x = self._conv1_5(
self._conv1_4(
tf.concat([x] + [pooling(x) for pooling in self._poolings], -1)
)
)
y = self._conv2_1(inputs)
return self._merge_conv(tf.concat([x, y], axis=-1))
def get_config(self):
# used to store/share parameters to reconstruct the model
layer_config = {
'filters': self._filters,
'pool_sizes': self._pool_sizes,
'scale': self._scale,
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'use_bn': self._use_bn,
'use_sync_bn': self._use_sync_bn,
'use_separable_conv': self._use_separable_conv,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._activation,
}
layer_config.update(super().get_config())
return layer_config
class RepConv(tf.keras.layers.Layer):
"""Represented convolution.
https://arxiv.org/abs/2101.03697
"""
def __init__(
self,
filters,
kernel_size=3,
strides=1,
padding='same',
activation='swish',
use_separable_conv=False,
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_initializer='zeros',
bias_regularizer=None,
**kwargs
):
"""Initializes RepConv layer.
Args:
filters: integer for output depth, or the number of features to learn.
kernel_size: integer or tuple for the shape of the weight matrix or kernel
to learn.
strides: integer of tuple how much to move the kernel after each kernel
use.
padding: string 'valid' or 'same', if same, then pad the image, else do
not.
activation: string or None for activation function to use in layer, if
None activation is replaced by linear.
use_separable_conv: `bool` wether to use separable convs.
use_sync_bn: boolean for whether sync batch normalization statistics of
all batch norm layers to the models global statistics (across all input
batches).
norm_momentum: float for moment to use for batch normalization.
norm_epsilon: float for batch normalization epsilon.
kernel_initializer: string to indicate which function to use to initialize
weights.
kernel_regularizer: string to indicate which function to use to
regularizer weights.
bias_initializer: string to indicate which function to use to initialize
bias.
bias_regularizer: string to indicate which function to use to regularizer
bias.
**kwargs: other keyword arguments.
"""
super().__init__(**kwargs)
self._filters = filters
self._kernel_size = kernel_size
self._strides = strides
self._padding = padding
self._activation = activation
self._use_separable_conv = use_separable_conv
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_initializer = bias_initializer
self._bias_regularizer = bias_regularizer
# For deploy.
self._fuse = False
def build(self, input_shape):
conv_op = functools.partial(
tf.keras.layers.SeparableConv2D
if self._use_separable_conv
else tf.keras.layers.Conv2D,
filters=self._filters,
strides=self._strides,
padding=self._padding,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_initializer=self._bias_initializer,
bias_regularizer=self._bias_regularizer,
)
bn_op = functools.partial(
tf.keras.layers.BatchNormalization,
synchronized=self._use_sync_bn,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
)
self._activation_fn = tf_utils.get_activation(self._activation)
self._rbr_reparam = conv_op(kernel_size=self._kernel_size, use_bias=True)
if input_shape[-1] == self._filters and self._strides == 1:
self._rbr_identity = bn_op()
self._rbr_dense = conv_op(kernel_size=self._kernel_size, use_bias=False)
self._rbr_dense_bn = bn_op()
self._rbr_1x1 = conv_op(kernel_size=1, use_bias=False)
self._rbr_1x1_bn = bn_op()
def call(self, inputs, training=None):
if self._fuse:
return self._activation_fn(self._rbr_reparam(inputs))
id_out = 0
if hasattr(self, '_rbr_identity'):
id_out = self._rbr_identity(inputs)
x = self._rbr_dense_bn(self._rbr_dense(inputs))
y = self._rbr_1x1_bn(self._rbr_1x1(inputs))
return self._activation_fn(x + y + id_out)
def fuse(self):
if self._fuse:
return
# TODO(b/264495198): Implement fuse for RepConv.
raise NotImplementedError()
| 68,760 | 34.153885 | 80 | py |
models | models-master/official/projects/yolo/modeling/backbones/yolov7.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains backbone architectures for YOLOv7 families.
The models are built with ELAN and E-ELAN.
ELAN was proposed in:
[1] Wang, Chien-Yao and Liao, Hong-Yuan Mark and Yeh, I-Hau
Designing Network Design Strategies Through Gradient Path Analysis
arXiv:2211.04800
E-ELAN is proposed in YOLOv7 paper:
[1] Wang, Chien-Yao and Bochkovskiy, Alexey and Liao, Hong-Yuan Mark
YOLOv7: Trainable bag-of-freebies sets new state-of-the-art for real-time
object detectors
arXiv:2207.02696
"""
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.yolo.modeling.layers import nn_blocks
from official.projects.yolo.ops import initializer_ops
from official.vision.modeling.backbones import factory
# Required block functions for YOLOv7 backbone familes.
_BLOCK_FNS = {
'convbn': nn_blocks.ConvBN,
'maxpool2d': tf.keras.layers.MaxPooling2D,
'concat': tf.keras.layers.Concatenate,
}
# Names for key arguments needed by each block function.
_BLOCK_SPEC_SCHEMAS = {
'convbn': [
'block_fn',
'from',
'kernel_size',
'strides',
'filters',
'is_output',
],
'maxpool2d': [
'block_fn',
'from',
'pool_size',
'strides',
'padding',
'is_output',
],
'concat': [
'block_fn',
'from',
'axis',
'is_output',
]
}
# Define YOLOv7-tiny variant.
_YoloV7Tiny = [
['convbn', -1, 3, 2, 32, False], # 0-P1/2
['convbn', -1, 3, 2, 64, False], # 1-P2/4
['convbn', -1, 1, 1, 32, False],
['convbn', -2, 1, 1, 32, False],
['convbn', -1, 3, 1, 32, False],
['convbn', -1, 3, 1, 32, False],
['concat', [-1, -2, -3, -4], -1, False],
['convbn', -1, 1, 1, 64, False], # 7
['maxpool2d', -1, 2, 2, 'same', False], # 8-P3/8
['convbn', -1, 1, 1, 64, False],
['convbn', -2, 1, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['concat', [-1, -2, -3, -4], -1, False],
['convbn', -1, 1, 1, 128, True], # 14
['maxpool2d', -1, 2, 2, 'same', False], # 15-P4/16
['convbn', -1, 1, 1, 128, False],
['convbn', -2, 1, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['concat', [-1, -2, -3, -4], -1, False],
['convbn', -1, 1, 1, 256, True], # 21
['maxpool2d', -1, 2, 2, 'same', False], # 22-P5/32
['convbn', -1, 1, 1, 256, False],
['convbn', -2, 1, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['concat', [-1, -2, -3, -4], -1, False],
['convbn', -1, 1, 1, 512, True], # 28
]
# Define YOLOv7 variant.
_YoloV7 = [
['convbn', -1, 3, 1, 32, False], # 0
['convbn', -1, 3, 2, 64, False], # 1-P1/2
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 2, 128, False], # 3-P2/4
['convbn', -1, 1, 1, 64, False],
['convbn', -2, 1, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['concat', [-1, -3, -5, -6], -1, False],
['convbn', -1, 1, 1, 256, False], # 11
['maxpool2d', -1, 2, 2, 'same', False],
['convbn', -1, 1, 1, 128, False],
['convbn', -3, 1, 1, 128, False],
['convbn', -1, 3, 2, 128, False],
['concat', [-1, -3], -1, False], # 16-P3/8
['convbn', -1, 1, 1, 128, False],
['convbn', -2, 1, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['concat', [-1, -3, -5, -6], -1, False],
['convbn', -1, 1, 1, 512, True], # 24
['maxpool2d', -1, 2, 2, 'same', False],
['convbn', -1, 1, 1, 256, False],
['convbn', -3, 1, 1, 256, False],
['convbn', -1, 3, 2, 256, False],
['concat', [-1, -3], -1, False], # 29-P4/16
['convbn', -1, 1, 1, 256, False],
['convbn', -2, 1, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['concat', [-1, -3, -5, -6], -1, False],
['convbn', -1, 1, 1, 1024, True], # 37
['maxpool2d', -1, 2, 2, 'same', False],
['convbn', -1, 1, 1, 512, False],
['convbn', -3, 1, 1, 512, False],
['convbn', -1, 3, 2, 512, False],
['concat', [-1, -3], -1, False], # 42-P5/32
['convbn', -1, 1, 1, 256, False],
['convbn', -2, 1, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['concat', [-1, -3, -5, -6], -1, False],
['convbn', -1, 1, 1, 1024, True], # 50
]
_YoloV7X = [
['convbn', -1, 3, 1, 40, False], # 0
['convbn', -1, 3, 2, 80, False], # 1-P1/2
['convbn', -1, 3, 1, 80, False],
['convbn', -1, 3, 2, 160, False], # 3-P2/4
['convbn', -1, 1, 1, 64, False],
['convbn', -2, 1, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['convbn', -1, 3, 1, 64, False],
['concat', [-1, -3, -5, -7, -8], -1, False],
['convbn', -1, 1, 1, 320, False], # 13
['maxpool2d', -1, 2, 2, 'same', False],
['convbn', -1, 1, 1, 160, False],
['convbn', -3, 1, 1, 160, False],
['convbn', -1, 3, 2, 160, False],
['concat', [-1, -3], -1, False], # 18-P3/8
['convbn', -1, 1, 1, 128, False],
['convbn', -2, 1, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['convbn', -1, 3, 1, 128, False],
['concat', [-1, -3, -5, -7, -8], -1, False],
['convbn', -1, 1, 1, 640, True], # 28
['maxpool2d', -1, 2, 2, 'same', False],
['convbn', -1, 1, 1, 320, False],
['convbn', -3, 1, 1, 320, False],
['convbn', -1, 3, 2, 320, False],
['concat', [-1, -3], -1, False], # 33-P4/16
['convbn', -1, 1, 1, 256, False],
['convbn', -2, 1, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['concat', [-1, -3, -5, -7, -8], -1, False],
['convbn', -1, 1, 1, 1280, True], # 43
['maxpool2d', -1, 2, 2, 'same', False],
['convbn', -1, 1, 1, 640, False],
['convbn', -3, 1, 1, 640, False],
['convbn', -1, 3, 2, 640, False],
['concat', [-1, -3], -1, False], # 48-P5/32
['convbn', -1, 1, 1, 256, False],
['convbn', -2, 1, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['convbn', -1, 3, 1, 256, False],
['concat', [-1, -3, -5, -7, -8], -1, False],
['convbn', -1, 1, 1, 1280, True], # 58
]
# Aggregates all variants for YOLOv7 backbones.
BACKBONES = {
'yolov7-tiny': _YoloV7Tiny,
'yolov7': _YoloV7,
'yolov7x': _YoloV7X,
}
class YoloV7(tf.keras.Model):
"""YOLOv7 backbone architecture."""
def __init__(
self,
model_id='yolov7',
input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]),
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='swish',
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_initializer='zeros',
bias_regularizer=None,
**kwargs):
"""Initializes the YOLOv7 backbone.
Args:
model_id: a `str` represents the model variants.
input_specs: a `tf.keras.layers.InputSpec` of the input tensor.
use_sync_bn: if set to `True`, use synchronized batch normalization.
norm_momentum: a `float` of normalization momentum for the moving average.
norm_epsilon: a small `float` added to variance to avoid dividing by zero.
activation: a `str` name of the activation function.
kernel_initializer: a `str` for kernel initializer of convolutional
layers.
kernel_regularizer: a `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_initializer: a `str` for bias initializer of convolutional layers.
bias_regularizer: a `tf.keras.regularizers.Regularizer` object for Conv2D.
Default to None.
**kwargs: Additional keyword arguments to be passed.
"""
self._model_id = model_id
self._input_specs = input_specs
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._activation = activation
self._kernel_initializer = initializer_ops.pytorch_kernel_initializer(
kernel_initializer
)
self._kernel_regularizer = kernel_regularizer
self._bias_initializer = bias_initializer
self._bias_regularizer = bias_regularizer
inputs = tf.keras.layers.Input(shape=input_specs.shape[1:])
block_specs = BACKBONES[model_id.lower()]
outputs = []
endpoints = {}
level = 3
for spec in block_specs:
block_kwargs = dict(zip(_BLOCK_SPEC_SCHEMAS[spec[0]], spec))
block_fn_str = block_kwargs.pop('block_fn')
from_index = block_kwargs.pop('from')
is_output = block_kwargs.pop('is_output')
if not outputs:
x = inputs
elif isinstance(from_index, int):
x = outputs[from_index]
else:
x = [outputs[idx] for idx in from_index]
if block_fn_str in ['convbn']:
block_kwargs.update({
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._activation,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
})
block_fn = _BLOCK_FNS[block_fn_str](**block_kwargs)
x = block_fn(x)
outputs.append(x)
if is_output:
endpoints[str(level)] = x
level += 1
self._output_specs = {k: v.get_shape() for k, v in endpoints.items()}
super().__init__(inputs=inputs, outputs=endpoints, **kwargs)
def get_config(self):
config_dict = {
'model_id': self._model_id,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'activation': self._activation,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_initializer': self._bias_initializer,
'bias_regularizer': self._bias_regularizer,
}
return config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_backbone_builder('yolov7')
def build_yolov7(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None,
) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds YOLOv7."""
assert backbone_config.type == 'yolov7', (
f'Inconsistent backbone type {backbone_config.type}.')
backbone_config = backbone_config.get()
assert backbone_config.model_id in BACKBONES, (
f'Unsupported backbone {backbone_config.model_id}.')
model = YoloV7(
model_id=backbone_config.model_id,
input_specs=input_specs,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
activation=norm_activation_config.activation,
kernel_regularizer=l2_regularizer,
)
return model
| 12,875 | 32.185567 | 80 | py |
models | models-master/official/projects/yolo/modeling/backbones/yolov7_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yolov7 backbone."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.projects.yolo.modeling.backbones import yolov7
_INPUT_SIZE = (224, 224)
class YoloV7BackboneTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('yolov7',),
)
def test_network_creation(self, model_id):
"""Tests declaration of YOLOv7 backbone variants."""
tf.keras.backend.set_image_data_format('channels_last')
network = yolov7.YoloV7(model_id)
self.assertEqual(network.get_config()['model_id'], model_id)
inputs = tf.keras.Input(shape=(*_INPUT_SIZE, 3), batch_size=1)
outputs = network(inputs)
for level, level_output in outputs.items():
scale = 2**int(level)
input_size = (_INPUT_SIZE[0] // scale, _INPUT_SIZE[1] // scale)
self.assertAllEqual((1, *input_size), level_output.shape.as_list()[:-1])
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
)
)
def test_sync_bn_multiple_devices(self, strategy):
"""Test for sync bn on TPU and GPU devices."""
inputs = np.random.rand(1, *_INPUT_SIZE, 3)
tf.keras.backend.set_image_data_format('channels_last')
with strategy.scope():
network = yolov7.YoloV7(model_id='yolov7')
_ = network(inputs)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
model_id='yolov7',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='swish',
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_initializer='zeros',
bias_regularizer=None,
)
network = yolov7.YoloV7(**kwargs)
# Create another network object from the first object's config.
new_network = yolov7.YoloV7.from_config(network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 3,049 | 31.795699 | 79 | py |
models | models-master/official/projects/yolo/modeling/backbones/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/projects/yolo/modeling/backbones/darknet_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yolo."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.projects.yolo.modeling.backbones import darknet
class DarknetTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(224, 'darknet53', 2, 1, True),
(224, 'darknettiny', 1, 2, False),
(224, 'cspdarknettiny', 1, 1, False),
(224, 'cspdarknet53', 2, 1, True),
)
def test_network_creation(self, input_size, model_id, endpoint_filter_scale,
scale_final, dilate):
"""Test creation of ResNet family models."""
tf.keras.backend.set_image_data_format('channels_last')
network = darknet.Darknet(
model_id=model_id, min_level=3, max_level=5, dilate=dilate)
self.assertEqual(network.model_id, model_id)
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
endpoints = network(inputs)
if dilate:
self.assertAllEqual([
1, input_size / 2**3, input_size / 2**3, 128 * endpoint_filter_scale
], endpoints['3'].shape.as_list())
self.assertAllEqual([
1, input_size / 2**3, input_size / 2**3, 256 * endpoint_filter_scale
], endpoints['4'].shape.as_list())
self.assertAllEqual([
1, input_size / 2**3, input_size / 2**3,
512 * endpoint_filter_scale * scale_final
], endpoints['5'].shape.as_list())
else:
self.assertAllEqual([
1, input_size / 2**3, input_size / 2**3, 128 * endpoint_filter_scale
], endpoints['3'].shape.as_list())
self.assertAllEqual([
1, input_size / 2**4, input_size / 2**4, 256 * endpoint_filter_scale
], endpoints['4'].shape.as_list())
self.assertAllEqual([
1, input_size / 2**5, input_size / 2**5,
512 * endpoint_filter_scale * scale_final
], endpoints['5'].shape.as_list())
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
use_sync_bn=[False, True],
))
def test_sync_bn_multiple_devices(self, strategy, use_sync_bn):
"""Test for sync bn on TPU and GPU devices."""
inputs = np.random.rand(1, 224, 224, 3)
tf.keras.backend.set_image_data_format('channels_last')
with strategy.scope():
network = darknet.Darknet(
model_id='darknet53',
min_level=3,
max_level=5,
use_sync_bn=use_sync_bn,
)
_ = network(inputs)
@parameterized.parameters(1, 3, 4)
def test_input_specs(self, input_dim):
"""Test different input feature dimensions."""
tf.keras.backend.set_image_data_format('channels_last')
input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, input_dim])
network = darknet.Darknet(
model_id='darknet53', min_level=3, max_level=5, input_specs=input_specs)
inputs = tf.keras.Input(shape=(224, 224, input_dim), batch_size=1)
_ = network(inputs)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
model_id='darknet53',
min_level=3,
max_level=5,
use_sync_bn=False,
activation='relu',
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
)
network = darknet.Darknet(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = darknet.Darknet.from_config(network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 4,758 | 34.251852 | 80 | py |
models | models-master/official/projects/yolo/modeling/backbones/darknet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of Darknet Backbone Networks.
The models are inspired by ResNet and CSPNet.
Residual networks (ResNets) were proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
Cross Stage Partial networks (CSPNets) were proposed in:
[1] Chien-Yao Wang, Hong-Yuan Mark Liao, I-Hau Yeh, Yueh-Hua Wu, Ping-Yang Chen,
Jun-Wei Hsieh
CSPNet: A New Backbone that can Enhance Learning Capability of CNN.
arXiv:1911.11929
Darknets are used mainly for object detection in:
[1] Joseph Redmon, Ali Farhadi
YOLOv3: An Incremental Improvement. arXiv:1804.02767
[2] Alexey Bochkovskiy, Chien-Yao Wang, Hong-Yuan Mark Liao
YOLOv4: Optimal Speed and Accuracy of Object Detection. arXiv:2004.10934
"""
import collections
import tensorflow as tf
from official.modeling import hyperparams
from official.projects.yolo.modeling.layers import nn_blocks
from official.vision.modeling.backbones import factory
class BlockConfig:
"""Class to store layer config to make code more readable."""
def __init__(self, layer, stack, reps, bottleneck, filters, pool_size,
kernel_size, strides, padding, activation, route, dilation_rate,
output_name, is_output):
"""Initializing method for BlockConfig.
Args:
layer: A `str` for layer name.
stack: A `str` for the type of layer ordering to use for this specific
level.
reps: An `int` for the number of times to repeat block.
bottleneck: A `bool` for whether this stack has a bottle neck layer.
filters: An `int` for the output depth of the level.
pool_size: An `int` for the pool_size of max pool layers.
kernel_size: An `int` for convolution kernel size.
strides: A `Union[int, tuple]` that indicates convolution strides.
padding: An `int` for the padding to apply to layers in this stack.
activation: A `str` for the activation to use for this stack.
route: An `int` for the level to route from to get the next input.
dilation_rate: An `int` for the scale used in dialated Darknet.
output_name: A `str` for the name to use for this output.
is_output: A `bool` for whether this layer is an output in the default
model.
"""
self.layer = layer
self.stack = stack
self.repetitions = reps
self.bottleneck = bottleneck
self.filters = filters
self.kernel_size = kernel_size
self.pool_size = pool_size
self.strides = strides
self.padding = padding
self.activation = activation
self.route = route
self.dilation_rate = dilation_rate
self.output_name = output_name
self.is_output = is_output
def build_block_specs(config):
specs = []
for layer in config:
specs.append(BlockConfig(*layer))
return specs
class LayerBuilder:
"""Layer builder class.
Class for quick look up of default layers used by darknet to
connect, introduce or exit a level. Used in place of an if condition
or switch to make adding new layers easier and to reduce redundant code.
"""
def __init__(self):
self._layer_dict = {
'ConvBN': (nn_blocks.ConvBN, self.conv_bn_config_todict),
'MaxPool': (tf.keras.layers.MaxPool2D, self.maxpool_config_todict)
}
def conv_bn_config_todict(self, config, kwargs):
dictvals = {
'filters': config.filters,
'kernel_size': config.kernel_size,
'strides': config.strides,
'padding': config.padding
}
dictvals.update(kwargs)
return dictvals
def darktiny_config_todict(self, config, kwargs):
dictvals = {'filters': config.filters, 'strides': config.strides}
dictvals.update(kwargs)
return dictvals
def maxpool_config_todict(self, config, kwargs):
return {
'pool_size': config.pool_size,
'strides': config.strides,
'padding': config.padding,
'name': kwargs['name']
}
def __call__(self, config, kwargs):
layer, get_param_dict = self._layer_dict[config.layer]
param_dict = get_param_dict(config, kwargs)
return layer(**param_dict)
# model configs
LISTNAMES = [
'default_layer_name', 'level_type', 'number_of_layers_in_level',
'bottleneck', 'filters', 'kernal_size', 'pool_size', 'strides', 'padding',
'default_activation', 'route', 'dilation', 'level/name', 'is_output'
]
CSPDARKNET53 = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 106,
'neck_split': 132
},
'backbone': [
[
'ConvBN', None, 1, False, 32, None, 3, 1, 'same', 'mish', -1, 1, 0,
False
],
[
'DarkRes', 'csp', 1, True, 64, None, None, None, None, 'mish', -1,
1, 1, False
],
[
'DarkRes', 'csp', 2, False, 128, None, None, None, None, 'mish', -1,
1, 2, False
],
[
'DarkRes', 'csp', 8, False, 256, None, None, None, None, 'mish', -1,
1, 3, True
],
[
'DarkRes', 'csp', 8, False, 512, None, None, None, None, 'mish', -1,
2, 4, True
],
[
'DarkRes', 'csp', 4, False, 1024, None, None, None, None, 'mish',
-1, 4, 5, True
],
]
}
CSPADARKNET53 = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 100,
'neck_split': 135
},
'backbone': [
[
'ConvBN', None, 1, False, 32, None, 3, 1, 'same', 'mish', -1, 1, 0,
False
],
[
'DarkRes', 'residual', 1, True, 64, None, None, None, None, 'mish',
-1, 1, 1, False
],
[
'DarkRes', 'csp', 2, False, 128, None, None, None, None, 'mish', -1,
1, 2, False
],
[
'DarkRes', 'csp', 8, False, 256, None, None, None, None, 'mish', -1,
1, 3, True
],
[
'DarkRes', 'csp', 8, False, 512, None, None, None, None, 'mish', -1,
2, 4, True
],
[
'DarkRes', 'csp', 4, False, 1024, None, None, None, None, 'mish',
-1, 4, 5, True
],
]
}
LARGECSP53 = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 100,
'neck_split': 135
},
'backbone': [
[
'ConvBN', None, 1, False, 32, None, 3, 1, 'same', 'mish', -1, 1, 0,
False
],
[
'DarkRes', 'csp', 1, False, 64, None, None, None, None, 'mish', -1,
1, 1, False
],
[
'DarkRes', 'csp', 3, False, 128, None, None, None, None, 'mish', -1,
1, 2, False
],
[
'DarkRes', 'csp', 15, False, 256, None, None, None, None, 'mish',
-1, 1, 3, True
],
[
'DarkRes', 'csp', 15, False, 512, None, None, None, None, 'mish',
-1, 2, 4, True
],
[
'DarkRes', 'csp', 7, False, 1024, None, None, None, None, 'mish',
-1, 4, 5, True
],
[
'DarkRes', 'csp', 7, False, 1024, None, None, None, None, 'mish',
-1, 8, 6, True
],
[
'DarkRes', 'csp', 7, False, 1024, None, None, None, None, 'mish',
-1, 16, 7, True
],
]
}
DARKNET53 = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 76
},
'backbone': [
[
'ConvBN', None, 1, False, 32, None, 3, 1, 'same', 'leaky', -1, 1, 0,
False
],
[
'DarkRes', 'residual', 1, True, 64, None, None, None, None, 'leaky',
-1, 1, 1, False
],
[
'DarkRes', 'residual', 2, False, 128, None, None, None, None,
'leaky', -1, 1, 2, False
],
[
'DarkRes', 'residual', 8, False, 256, None, None, None, None,
'leaky', -1, 1, 3, True
],
[
'DarkRes', 'residual', 8, False, 512, None, None, None, None,
'leaky', -1, 2, 4, True
],
[
'DarkRes', 'residual', 4, False, 1024, None, None, None, None,
'leaky', -1, 4, 5, True
],
]
}
CSPDARKNETTINY = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 28
},
'backbone': [
[
'ConvBN', None, 1, False, 32, None, 3, 2, 'same', 'leaky', -1, 1, 0,
False
],
[
'ConvBN', None, 1, False, 64, None, 3, 2, 'same', 'leaky', -1, 1, 1,
False
],
[
'CSPTiny', 'csp_tiny', 1, False, 64, None, 3, 2, 'same', 'leaky',
-1, 1, 2, False
],
[
'CSPTiny', 'csp_tiny', 1, False, 128, None, 3, 2, 'same', 'leaky',
-1, 1, 3, False
],
[
'CSPTiny', 'csp_tiny', 1, False, 256, None, 3, 2, 'same', 'leaky',
-1, 1, 4, True
],
[
'ConvBN', None, 1, False, 512, None, 3, 1, 'same', 'leaky', -1, 1,
5, True
],
]
}
DARKNETTINY = {
'list_names':
LISTNAMES,
'splits': {
'backbone_split': 14
},
'backbone': [
[
'ConvBN', None, 1, False, 16, None, 3, 1, 'same', 'leaky', -1, 1, 0,
False
],
[
'DarkTiny', 'tiny', 1, True, 32, None, 3, 2, 'same', 'leaky', -1, 1,
1, False
],
[
'DarkTiny', 'tiny', 1, True, 64, None, 3, 2, 'same', 'leaky', -1, 1,
2, False
],
[
'DarkTiny', 'tiny', 1, False, 128, None, 3, 2, 'same', 'leaky', -1,
1, 3, False
],
[
'DarkTiny', 'tiny', 1, False, 256, None, 3, 2, 'same', 'leaky', -1,
1, 4, True
],
[
'DarkTiny', 'tiny', 1, False, 512, None, 3, 2, 'same', 'leaky', -1,
1, 5, False
],
[
'DarkTiny', 'tiny', 1, False, 1024, None, 3, 1, 'same', 'leaky', -1,
1, 5, True
],
]
}
BACKBONES = {
'darknettiny': DARKNETTINY,
'darknet53': DARKNET53,
'cspdarknet53': CSPDARKNET53,
'altered_cspdarknet53': CSPADARKNET53,
'cspdarknettiny': CSPDARKNETTINY,
'csp-large': LARGECSP53,
}
class Darknet(tf.keras.Model):
"""The Darknet backbone architecture."""
def __init__(
self,
model_id='darknet53',
input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]),
min_level=None,
max_level=5,
width_scale=1.0,
depth_scale=1.0,
use_reorg_input=False,
csp_level_mod=(),
activation=None,
use_sync_bn=False,
use_separable_conv=False,
norm_momentum=0.99,
norm_epsilon=0.001,
dilate=False,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
**kwargs):
layer_specs, splits = Darknet.get_model_config(model_id)
self._model_name = model_id
self._splits = splits
self._input_specs = input_specs
self._registry = LayerBuilder()
# default layer look up
self._min_size = min_level
self._max_size = max_level
self._output_specs = None
self._csp_level_mod = set(csp_level_mod)
self._kernel_initializer = kernel_initializer
self._bias_regularizer = bias_regularizer
self._norm_momentum = norm_momentum
self._norm_epislon = norm_epsilon
self._use_sync_bn = use_sync_bn
self._use_separable_conv = use_separable_conv
self._activation = activation
self._kernel_regularizer = kernel_regularizer
self._dilate = dilate
self._width_scale = width_scale
self._depth_scale = depth_scale
self._use_reorg_input = use_reorg_input
self._default_dict = {
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epislon,
'use_sync_bn': self._use_sync_bn,
'activation': self._activation,
'use_separable_conv': self._use_separable_conv,
'dilation_rate': 1,
'name': None
}
inputs = tf.keras.Input(shape=input_specs.shape[1:])
output = self._build_struct(layer_specs, inputs)
super().__init__(
inputs=inputs, outputs=output, name=self._model_name, **kwargs
)
@property
def output_specs(self):
return self._output_specs
@property
def splits(self):
return self._splits
def _build_struct(self, net, inputs):
if self._use_reorg_input:
inputs = nn_blocks.Reorg()(inputs)
net[0].filters = net[1].filters
net[0].output_name = net[1].output_name
del net[1]
endpoints = collections.OrderedDict()
stack_outputs = [inputs]
for i, config in enumerate(net):
if config.output_name > self._max_size:
break
if config.output_name in self._csp_level_mod:
config.stack = 'residual'
config.filters = int(config.filters * self._width_scale)
config.repetitions = int(config.repetitions * self._depth_scale)
if config.stack is None:
x = self._build_block(
stack_outputs[config.route], config, name=f'{config.layer}_{i}')
stack_outputs.append(x)
elif config.stack == 'residual':
x = self._residual_stack(
stack_outputs[config.route], config, name=f'{config.layer}_{i}')
stack_outputs.append(x)
elif config.stack == 'csp':
x = self._csp_stack(
stack_outputs[config.route], config, name=f'{config.layer}_{i}')
stack_outputs.append(x)
elif config.stack == 'csp_tiny':
x_pass, x = self._csp_tiny_stack(
stack_outputs[config.route], config, name=f'{config.layer}_{i}')
stack_outputs.append(x_pass)
elif config.stack == 'tiny':
x = self._tiny_stack(
stack_outputs[config.route], config, name=f'{config.layer}_{i}')
stack_outputs.append(x)
if (config.is_output and self._min_size is None):
endpoints[str(config.output_name)] = x
elif (self._min_size is not None and
config.output_name >= self._min_size and
config.output_name <= self._max_size):
endpoints[str(config.output_name)] = x
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints.keys()}
return endpoints
def _get_activation(self, activation):
if self._activation is None:
return activation
return self._activation
def _csp_stack(self, inputs, config, name):
if config.bottleneck:
csp_filter_scale = 1
residual_filter_scale = 2
scale_filters = 1
else:
csp_filter_scale = 2
residual_filter_scale = 1
scale_filters = 2
self._default_dict['activation'] = self._get_activation(config.activation)
self._default_dict['name'] = f'{name}_csp_down'
if self._dilate:
self._default_dict['dilation_rate'] = config.dilation_rate
degrid = int(tf.math.log(float(config.dilation_rate)) / tf.math.log(2.))
else:
self._default_dict['dilation_rate'] = 1
degrid = 0
# swap/add dialation
x, x_route = nn_blocks.CSPRoute(
filters=config.filters,
filter_scale=csp_filter_scale,
downsample=True,
**self._default_dict)(
inputs)
dilated_reps = config.repetitions - degrid
for i in range(dilated_reps):
self._default_dict['name'] = f'{name}_{i}'
x = nn_blocks.DarkResidual(
filters=config.filters // scale_filters,
filter_scale=residual_filter_scale,
**self._default_dict,
)(x)
for i in range(dilated_reps, config.repetitions):
self._default_dict['dilation_rate'] = max(
1, self._default_dict['dilation_rate'] // 2
)
self._default_dict['name'] = (
f"{name}_{i}_degridded_{self._default_dict['dilation_rate']}"
)
x = nn_blocks.DarkResidual(
filters=config.filters // scale_filters,
filter_scale=residual_filter_scale,
**self._default_dict,
)(x)
self._default_dict['name'] = f'{name}_csp_connect'
output = nn_blocks.CSPConnect(
filters=config.filters,
filter_scale=csp_filter_scale,
**self._default_dict,
)([x, x_route])
self._default_dict['activation'] = self._activation
self._default_dict['name'] = None
return output
def _csp_tiny_stack(self, inputs, config, name):
self._default_dict['activation'] = self._get_activation(config.activation)
self._default_dict['name'] = f'{name}_csp_tiny'
x, x_route = nn_blocks.CSPTiny(
filters=config.filters, **self._default_dict)(
inputs)
self._default_dict['activation'] = self._activation
self._default_dict['name'] = None
return x, x_route
def _tiny_stack(self, inputs, config, name):
x = tf.keras.layers.MaxPool2D(
pool_size=2,
strides=config.strides,
padding='same',
data_format=None,
name=f'{name}_tiny/pool')(
inputs)
self._default_dict['activation'] = self._get_activation(config.activation)
self._default_dict['name'] = f'{name}_tiny/conv'
x = nn_blocks.ConvBN(
filters=config.filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
**self._default_dict)(
x)
self._default_dict['activation'] = self._activation
self._default_dict['name'] = None
return x
def _residual_stack(self, inputs, config, name):
self._default_dict['activation'] = self._get_activation(config.activation)
self._default_dict['name'] = f'{name}_residual_down'
if self._dilate:
self._default_dict['dilation_rate'] = config.dilation_rate
if config.repetitions < 8:
config.repetitions += 2
else:
self._default_dict['dilation_rate'] = 1
x = nn_blocks.DarkResidual(
filters=config.filters, downsample=True, **self._default_dict
)(inputs)
dilated_reps = (
config.repetitions - self._default_dict['dilation_rate'] // 2 - 1
)
for i in range(dilated_reps):
self._default_dict['name'] = f'{name}_{i}'
x = nn_blocks.DarkResidual(filters=config.filters, **self._default_dict)(
x
)
for i in range(dilated_reps, config.repetitions - 1):
self._default_dict['dilation_rate'] = (
self._default_dict['dilation_rate'] // 2
)
self._default_dict['name'] = (
f"{name}_{i}_degridded_{self._default_dict['dilation_rate']}"
)
x = nn_blocks.DarkResidual(filters=config.filters, **self._default_dict)(
x
)
self._default_dict['activation'] = self._activation
self._default_dict['name'] = None
self._default_dict['dilation_rate'] = 1
return x
def _build_block(self, inputs, config, name):
x = inputs
i = 0
self._default_dict['activation'] = self._get_activation(config.activation)
while i < config.repetitions:
self._default_dict['name'] = f'{name}_{i}'
layer = self._registry(config, self._default_dict)
x = layer(x)
i += 1
self._default_dict['activation'] = self._activation
self._default_dict['name'] = None
return x
@staticmethod
def get_model_config(name):
name = name.lower()
backbone = BACKBONES[name]['backbone']
splits = BACKBONES[name]['splits']
return build_block_specs(backbone), splits
@property
def model_id(self):
return self._model_name
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
def get_config(self):
layer_config = {
'model_id': self._model_name,
'min_level': self._min_size,
'max_level': self._max_size,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epislon,
'use_sync_bn': self._use_sync_bn,
'activation': self._activation,
}
return layer_config
@factory.register_backbone_builder('darknet')
def build_darknet(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None
) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds darknet."""
backbone_config = backbone_config.get()
model = Darknet(
model_id=backbone_config.model_id,
min_level=backbone_config.min_level,
max_level=backbone_config.max_level,
input_specs=input_specs,
dilate=backbone_config.dilate,
width_scale=backbone_config.width_scale,
depth_scale=backbone_config.depth_scale,
use_reorg_input=backbone_config.use_reorg_input,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
use_separable_conv=backbone_config.use_separable_conv,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
return model
| 22,015 | 30.317212 | 80 | py |
models | models-master/official/projects/yolo/modeling/heads/yolov7_head_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yolov7 heads."""
from absl.testing import parameterized
import tensorflow as tf
from official.projects.yolo.modeling.backbones import yolov7 as backbone
from official.projects.yolo.modeling.decoders import yolov7 as decoder
from official.projects.yolo.modeling.heads import yolov7_head as head
_INPUT_SIZE = (224, 224)
class YoloV7DetectionHeadTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('yolov7',),
)
def test_network_creation(self, model_id):
"""Tests declaration of YOLOv7 detection head."""
tf.keras.backend.set_image_data_format('channels_last')
backbone_network = backbone.YoloV7(model_id)
decoder_network = decoder.YoloV7(backbone_network.output_specs, model_id)
head_network = head.YoloV7DetectionHead()
inputs = tf.keras.Input(shape=(*_INPUT_SIZE, 3), batch_size=1)
outputs = head_network(decoder_network(backbone_network(inputs)))
for level, level_output in outputs.items():
scale = 2 ** int(level)
input_size = (_INPUT_SIZE[0] // scale, _INPUT_SIZE[1] // scale)
head_config = head_network.get_config()
num_classes = head_config['num_classes']
num_anchors = head_config['num_anchors']
self.assertAllEqual(
(1, *input_size, num_anchors, num_classes + 5),
level_output.shape.as_list(),
)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
num_classes=3,
min_level=3,
max_level=5,
num_anchors=3,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_initializer='zeros',
bias_regularizer=None,
)
network = head.YoloV7DetectionHead(**kwargs)
# Create another network object from the first object's config.
new_network = head.YoloV7DetectionHead.from_config(network.get_config())
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,724 | 34.38961 | 79 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.