repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/official/vision/utils/object_detection/preprocessor.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocess images and bounding boxes for detection.
We perform two sets of operations in preprocessing stage:
(a) operations that are applied to both training and testing data,
(b) operations that are applied only to training data for the purpose of
data augmentation.
A preprocessing function receives a set of inputs,
e.g. an image and bounding boxes,
performs an operation on them, and returns them.
Some examples are: randomly cropping the image, randomly mirroring the image,
randomly changing the brightness, contrast, hue and
randomly jittering the bounding boxes.
The image is a rank 4 tensor: [1, height, width, channels] with
dtype=tf.float32. The groundtruth_boxes is a rank 2 tensor: [N, 4] where
in each row there is a box with [ymin xmin ymax xmax].
Boxes are in normalized coordinates meaning
their coordinate values range in [0, 1]
Important Note: In tensor_dict, images is a rank 4 tensor, but preprocessing
functions receive a rank 3 tensor for processing the image. Thus, inside the
preprocess function we squeeze the image to become a rank 3 tensor and then
we pass it to the functions. At the end of the preprocess we expand the image
back to rank 4.
"""
import numpy as np
import tensorflow as tf
from official.vision.utils.object_detection import box_list
def _flip_boxes_left_right(boxes):
"""Left-right flip the boxes.
Args:
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes
are in normalized form meaning their coordinates vary between [0, 1]. Each
row is in the form of [ymin, xmin, ymax, xmax].
Returns:
Flipped boxes.
"""
ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
flipped_xmin = tf.subtract(1.0, xmax)
flipped_xmax = tf.subtract(1.0, xmin)
flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)
return flipped_boxes
def _flip_masks_left_right(masks):
"""Left-right flip masks.
Args:
masks: rank 3 float32 tensor with shape [num_instances, height, width]
representing instance masks.
Returns:
flipped masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
"""
return masks[:, :, ::-1]
def keypoint_flip_horizontal(keypoints,
flip_point,
flip_permutation,
scope=None):
"""Flips the keypoints horizontally around the flip_point.
This operation flips the x coordinate for each keypoint around the flip_point
and also permutes the keypoints in a manner specified by flip_permutation.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
flip_point: (float) scalar tensor representing the x coordinate to flip the
keypoints around.
flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation. This specifies the mapping from original keypoint indices to
the flipped keypoint indices. This is used primarily for keypoints that
are not reflection invariant. E.g. Suppose there are 3 keypoints
representing ['head', 'right_eye', 'left_eye'], then a logical choice for
flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye'
and 'right_eye' after a horizontal flip.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
if not scope:
scope = 'FlipHorizontal'
with tf.name_scope(scope):
keypoints = tf.transpose(a=keypoints, perm=[1, 0, 2])
keypoints = tf.gather(keypoints, flip_permutation)
v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
u = flip_point * 2.0 - u
new_keypoints = tf.concat([v, u], 2)
new_keypoints = tf.transpose(a=new_keypoints, perm=[1, 0, 2])
return new_keypoints
def keypoint_change_coordinate_frame(keypoints, window, scope=None):
"""Changes coordinate frame of the keypoints to be relative to window's frame.
Given a window of the form [y_min, x_min, y_max, x_max], changes keypoint
coordinates from keypoints of shape [num_instances, num_keypoints, 2]
to be relative to this window.
An example use case is data augmentation: where we are given groundtruth
keypoints and would like to randomly crop the image to some window. In this
case we need to change the coordinate frame of each groundtruth keypoint to be
relative to this new window.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window we should change the coordinate frame to.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
if not scope:
scope = 'ChangeCoordinateFrame'
with tf.name_scope(scope):
win_height = window[2] - window[0]
win_width = window[3] - window[1]
new_keypoints = box_list_ops.scale(keypoints - [window[0], window[1]],
1.0 / win_height, 1.0 / win_width)
return new_keypoints
def keypoint_prune_outside_window(keypoints, window, scope=None):
"""Prunes keypoints that fall outside a given window.
This function replaces keypoints that fall outside the given window with nan.
See also clip_to_window which clips any keypoints that fall outside the given
window.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window outside of which the op should prune the keypoints.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
if not scope:
scope = 'PruneOutsideWindow'
with tf.name_scope(scope):
y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
valid_indices = tf.logical_and(
tf.logical_and(y >= win_y_min, y <= win_y_max),
tf.logical_and(x >= win_x_min, x <= win_x_max))
new_y = tf.where(valid_indices, y, np.nan * tf.ones_like(y))
new_x = tf.where(valid_indices, x, np.nan * tf.ones_like(x))
new_keypoints = tf.concat([new_y, new_x], 2)
return new_keypoints
def random_horizontal_flip(image,
boxes=None,
masks=None,
keypoints=None,
keypoint_flip_permutation=None,
seed=None):
"""Randomly flips the image and detections horizontally.
The probability of flipping the image is 50%.
Args:
image: rank 3 float32 tensor with shape [height, width, channels].
boxes: (optional) rank 2 float32 tensor with shape [N, 4] containing the
bounding boxes. Boxes are in normalized form meaning their coordinates
vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax].
masks: (optional) rank 3 float32 tensor with shape [num_instances, height,
width] containing instance masks. The masks are of the same height, width
as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape [num_instances,
num_keypoints, 2]. The keypoints are in y-x normalized coordinates.
keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation.
seed: random seed
Returns:
image: image which is the same shape as input image.
If boxes, masks, keypoints, and keypoint_flip_permutation are not None,
the function also returns the following tensors.
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
Raises:
ValueError: if keypoints are provided but keypoint_flip_permutation is not.
"""
def _flip_image(image):
# flip image
image_flipped = tf.image.flip_left_right(image)
return image_flipped
if keypoints is not None and keypoint_flip_permutation is None:
raise ValueError(
'keypoints are provided but keypoints_flip_permutation is not provided')
with tf.name_scope('RandomHorizontalFlip'):
result = []
# random variable defining whether to do flip or not
do_a_flip_random = tf.greater(tf.random.uniform([], seed=seed), 0.5)
# flip image
image = tf.cond(
pred=do_a_flip_random,
true_fn=lambda: _flip_image(image),
false_fn=lambda: image)
result.append(image)
# flip boxes
if boxes is not None:
boxes = tf.cond(
pred=do_a_flip_random,
true_fn=lambda: _flip_boxes_left_right(boxes),
false_fn=lambda: boxes)
result.append(boxes)
# flip masks
if masks is not None:
masks = tf.cond(
pred=do_a_flip_random,
true_fn=lambda: _flip_masks_left_right(masks),
false_fn=lambda: masks)
result.append(masks)
# flip keypoints
if keypoints is not None and keypoint_flip_permutation is not None:
permutation = keypoint_flip_permutation
keypoints = tf.cond(
pred=do_a_flip_random,
true_fn=lambda: keypoint_flip_horizontal(keypoints, 0.5, permutation),
false_fn=lambda: keypoints)
result.append(keypoints)
return tuple(result)
def _compute_new_static_size(image, min_dimension, max_dimension):
"""Compute new static shape for resize_to_range method."""
image_shape = image.get_shape().as_list()
orig_height = image_shape[0]
orig_width = image_shape[1]
num_channels = image_shape[2]
orig_min_dim = min(orig_height, orig_width)
# Calculates the larger of the possible sizes
large_scale_factor = min_dimension / float(orig_min_dim)
# Scaling orig_(height|width) by large_scale_factor will make the smaller
# dimension equal to min_dimension, save for floating point rounding errors.
# For reasonably-sized images, taking the nearest integer will reliably
# eliminate this error.
large_height = int(round(orig_height * large_scale_factor))
large_width = int(round(orig_width * large_scale_factor))
large_size = [large_height, large_width]
if max_dimension:
# Calculates the smaller of the possible sizes, use that if the larger
# is too big.
orig_max_dim = max(orig_height, orig_width)
small_scale_factor = max_dimension / float(orig_max_dim)
# Scaling orig_(height|width) by small_scale_factor will make the larger
# dimension equal to max_dimension, save for floating point rounding
# errors. For reasonably-sized images, taking the nearest integer will
# reliably eliminate this error.
small_height = int(round(orig_height * small_scale_factor))
small_width = int(round(orig_width * small_scale_factor))
small_size = [small_height, small_width]
new_size = large_size
if max(large_size) > max_dimension:
new_size = small_size
else:
new_size = large_size
return tf.constant(new_size + [num_channels])
def _compute_new_dynamic_size(image, min_dimension, max_dimension):
"""Compute new dynamic shape for resize_to_range method."""
image_shape = tf.shape(input=image)
orig_height = tf.cast(image_shape[0], dtype=tf.float32)
orig_width = tf.cast(image_shape[1], dtype=tf.float32)
num_channels = image_shape[2]
orig_min_dim = tf.minimum(orig_height, orig_width)
# Calculates the larger of the possible sizes
min_dimension = tf.constant(min_dimension, dtype=tf.float32)
large_scale_factor = min_dimension / orig_min_dim
# Scaling orig_(height|width) by large_scale_factor will make the smaller
# dimension equal to min_dimension, save for floating point rounding errors.
# For reasonably-sized images, taking the nearest integer will reliably
# eliminate this error.
large_height = tf.cast(
tf.round(orig_height * large_scale_factor), dtype=tf.int32)
large_width = tf.cast(
tf.round(orig_width * large_scale_factor), dtype=tf.int32)
large_size = tf.stack([large_height, large_width])
if max_dimension:
# Calculates the smaller of the possible sizes, use that if the larger
# is too big.
orig_max_dim = tf.maximum(orig_height, orig_width)
max_dimension = tf.constant(max_dimension, dtype=tf.float32)
small_scale_factor = max_dimension / orig_max_dim
# Scaling orig_(height|width) by small_scale_factor will make the larger
# dimension equal to max_dimension, save for floating point rounding
# errors. For reasonably-sized images, taking the nearest integer will
# reliably eliminate this error.
small_height = tf.cast(
tf.round(orig_height * small_scale_factor), dtype=tf.int32)
small_width = tf.cast(
tf.round(orig_width * small_scale_factor), dtype=tf.int32)
small_size = tf.stack([small_height, small_width])
new_size = tf.cond(
pred=tf.cast(tf.reduce_max(input_tensor=large_size), dtype=tf.float32) >
max_dimension,
true_fn=lambda: small_size,
false_fn=lambda: large_size)
else:
new_size = large_size
return tf.stack(tf.unstack(new_size) + [num_channels])
def resize_to_range(image,
masks=None,
min_dimension=None,
max_dimension=None,
method=tf.image.ResizeMethod.BILINEAR,
align_corners=False,
pad_to_max_dimension=False):
"""Resizes an image so its dimensions are within the provided value.
The output size can be described by two cases:
1. If the image can be rescaled so its minimum dimension is equal to the
provided value without the other dimension exceeding max_dimension,
then do so.
2. Otherwise, resize so the largest dimension is equal to max_dimension.
Args:
image: A 3D tensor of shape [height, width, channels]
masks: (optional) rank 3 float32 tensor with shape [num_instances, height,
width] containing instance masks.
min_dimension: (optional) (scalar) desired size of the smaller image
dimension.
max_dimension: (optional) (scalar) maximum allowed size of the larger image
dimension.
method: (optional) interpolation method used in resizing. Defaults to
BILINEAR.
align_corners: bool. If true, exactly align all 4 corners of the input and
output. Defaults to False.
pad_to_max_dimension: Whether to resize the image and pad it with zeros so
the resulting image is of the spatial size [max_dimension, max_dimension].
If masks are included they are padded similarly.
Returns:
Note that the position of the resized_image_shape changes based on whether
masks are present.
resized_image: A 3D tensor of shape [new_height, new_width, channels],
where the image has been resized (with bilinear interpolation) so that
min(new_height, new_width) == min_dimension or
max(new_height, new_width) == max_dimension.
resized_masks: If masks is not None, also outputs masks. A 3D tensor of
shape [num_instances, new_height, new_width].
resized_image_shape: A 1D tensor of shape [3] containing shape of the
resized image.
Raises:
ValueError: if the image is not a 3D tensor.
"""
if len(image.get_shape()) != 3:
raise ValueError('Image should be 3D tensor')
with tf.name_scope('ResizeToRange'):
if image.get_shape().is_fully_defined():
new_size = _compute_new_static_size(image, min_dimension, max_dimension)
else:
new_size = _compute_new_dynamic_size(image, min_dimension, max_dimension)
new_image = tf.image.resize(image, new_size[:-1], method=method)
if pad_to_max_dimension:
new_image = tf.image.pad_to_bounding_box(new_image, 0, 0, max_dimension,
max_dimension)
result = [new_image]
if masks is not None:
new_masks = tf.expand_dims(masks, 3)
new_masks = tf.image.resize(
new_masks,
new_size[:-1],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
new_masks = tf.squeeze(new_masks, 3)
if pad_to_max_dimension:
new_masks = tf.image.pad_to_bounding_box(new_masks, 0, 0, max_dimension,
max_dimension)
result.append(new_masks)
result.append(new_size)
return result
def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from):
"""Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to.
Args:
boxlist_to_copy_to: BoxList to which extra fields are copied.
boxlist_to_copy_from: BoxList from which fields are copied.
Returns:
boxlist_to_copy_to with extra fields.
"""
for field in boxlist_to_copy_from.get_extra_fields():
boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field))
return boxlist_to_copy_to
def box_list_scale(boxlist, y_scale, x_scale, scope=None):
"""scale box coordinates in x and y dimensions.
Args:
boxlist: BoxList holding N boxes
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
boxlist: BoxList holding N boxes
"""
if not scope:
scope = 'Scale'
with tf.name_scope(scope):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
y_min = y_scale * y_min
y_max = y_scale * y_max
x_min = x_scale * x_min
x_max = x_scale * x_max
scaled_boxlist = box_list.BoxList(
tf.concat([y_min, x_min, y_max, x_max], 1))
return _copy_extra_fields(scaled_boxlist, boxlist)
def keypoint_scale(keypoints, y_scale, x_scale, scope=None):
"""Scales keypoint coordinates in x and y dimensions.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
if not scope:
scope = 'Scale'
with tf.name_scope(scope):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
new_keypoints = keypoints * [[[y_scale, x_scale]]]
return new_keypoints
def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None):
"""Scales boxes from normalized to pixel coordinates.
Args:
image: A 3D float32 tensor of shape [height, width, channels].
boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding
boxes in normalized coordinates. Each row is of the form [ymin, xmin,
ymax, xmax].
keypoints: (optional) rank 3 float32 tensor with shape [num_instances,
num_keypoints, 2]. The keypoints are in y-x normalized coordinates.
Returns:
image: unchanged input image.
scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the
bounding boxes in pixel coordinates.
scaled_keypoints: a 3D float32 tensor with shape
[num_instances, num_keypoints, 2] containing the keypoints in pixel
coordinates.
"""
boxlist = box_list.BoxList(boxes)
image_height = tf.shape(input=image)[0]
image_width = tf.shape(input=image)[1]
scaled_boxes = box_list_scale(boxlist, image_height, image_width).get()
result = [image, scaled_boxes]
if keypoints is not None:
scaled_keypoints = keypoint_scale(keypoints, image_height, image_width)
result.append(scaled_keypoints)
return tuple(result)
| 20,356 | 38.148077 | 80 | py |
models | models-master/official/vision/utils/object_detection/box_coder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base box coder.
Box coders convert between coordinate frames, namely image-centric
(with (0,0) on the top left of image) and anchor-centric (with (0,0) being
defined by a specific anchor).
Users of a BoxCoder can call two methods:
encode: which encodes a box with respect to a given anchor
(or rather, a tensor of boxes wrt a corresponding tensor of anchors) and
decode: which inverts this encoding with a decode operation.
In both cases, the arguments are assumed to be in 1-1 correspondence already;
it is not the job of a BoxCoder to perform matching.
"""
from abc import ABCMeta
from abc import abstractmethod
from abc import abstractproperty
import tensorflow as tf
# Box coder types.
FASTER_RCNN = 'faster_rcnn'
KEYPOINT = 'keypoint'
MEAN_STDDEV = 'mean_stddev'
SQUARE = 'square'
class BoxCoder(object):
"""Abstract base class for box coder."""
__metaclass__ = ABCMeta
@abstractproperty
def code_size(self):
"""Return the size of each code.
This number is a constant and should agree with the output of the `encode`
op (e.g. if rel_codes is the output of self.encode(...), then it should have
shape [N, code_size()]). This abstractproperty should be overridden by
implementations.
Returns:
an integer constant
"""
pass
def encode(self, boxes, anchors):
"""Encode a box list relative to an anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes
"""
with tf.name_scope('Encode'):
return self._encode(boxes, anchors)
def decode(self, rel_codes, anchors):
"""Decode boxes that are encoded relative to an anchor collection.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)
"""
with tf.name_scope('Decode'):
return self._decode(rel_codes, anchors)
@abstractmethod
def _encode(self, boxes, anchors):
"""Method to be overriden by implementations.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes
"""
pass
@abstractmethod
def _decode(self, rel_codes, anchors):
"""Method to be overriden by implementations.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)
"""
pass
def batch_decode(encoded_boxes, box_coder, anchors):
"""Decode a batch of encoded boxes.
This op takes a batch of encoded bounding boxes and transforms
them to a batch of bounding boxes specified by their corners in
the order of [y_min, x_min, y_max, x_max].
Args:
encoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
code_size] representing the location of the objects.
box_coder: a BoxCoder object.
anchors: a BoxList of anchors used to encode `encoded_boxes`.
Returns:
decoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
coder_size] representing the corners of the objects in the order
of [y_min, x_min, y_max, x_max].
Raises:
ValueError: if batch sizes of the inputs are inconsistent, or if
the number of anchors inferred from encoded_boxes and anchors are
inconsistent.
"""
encoded_boxes.get_shape().assert_has_rank(3)
if encoded_boxes.get_shape()[1].value != anchors.num_boxes_static():
raise ValueError(
'The number of anchors inferred from encoded_boxes'
' and anchors are inconsistent: shape[1] of encoded_boxes'
' %s should be equal to the number of anchors: %s.' %
(encoded_boxes.get_shape()[1].value, anchors.num_boxes_static()))
decoded_boxes = tf.stack([
box_coder.decode(boxes, anchors).get()
for boxes in tf.unstack(encoded_boxes)
])
return decoded_boxes
| 4,776 | 30.846667 | 80 | py |
models | models-master/official/vision/utils/object_detection/box_list.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bounding Box List definition.
BoxList represents a list of bounding boxes as tensorflow
tensors, where each bounding box is represented as a row of 4 numbers,
[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes
within a given list correspond to a single image. See also
box_list_ops.py for common box related operations (such as area, iou, etc).
Optionally, users can add additional related fields (such as weights).
We assume the following things to be true about fields:
* they correspond to boxes in the box_list along the 0th dimension
* they have inferrable rank at graph construction time
* all dimensions except for possibly the 0th can be inferred
(i.e., not None) at graph construction time.
Some other notes:
* Following tensorflow conventions, we use height, width ordering,
and correspondingly, y,x (or ymin, xmin, ymax, xmax) ordering
* Tensors are always provided as (flat) [N, 4] tensors.
"""
import tensorflow as tf
class BoxList(object):
"""Box collection."""
def __init__(self, boxes):
"""Constructs box collection.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data or if bbox data is not in
float32 format.
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError('Invalid dimensions for box data.')
if boxes.dtype != tf.float32:
raise ValueError('Invalid tensor type: should be tf.float32')
self.data = {'boxes': boxes}
def num_boxes(self):
"""Returns number of boxes held in collection.
Returns:
a tensor representing the number of boxes held in the collection.
"""
return tf.shape(input=self.data['boxes'])[0]
def num_boxes_static(self):
"""Returns number of boxes held in collection.
This number is inferred at graph construction time rather than run-time.
Returns:
Number of boxes held in collection (integer) or None if this is not
inferrable at graph construction time.
"""
return self.data['boxes'].get_shape().dims[0].value
def get_all_fields(self):
"""Returns all fields."""
return self.data.keys()
def get_extra_fields(self):
"""Returns all non-box fields (i.e., everything not named 'boxes')."""
return [k for k in self.data.keys() if k != 'boxes']
def add_field(self, field, field_data):
"""Add field to box list.
This method can be used to add related box data such as
weights/labels, etc.
Args:
field: a string key to access the data via `get`
field_data: a tensor containing the data to store in the BoxList
"""
self.data[field] = field_data
def has_field(self, field):
return field in self.data
def get(self):
"""Convenience function for accessing box coordinates.
Returns:
a tensor with shape [N, 4] representing box coordinates.
"""
return self.get_field('boxes')
def set(self, boxes):
"""Convenience function for setting box coordinates.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError('Invalid dimensions for box data.')
self.data['boxes'] = boxes
def get_field(self, field):
"""Accesses a box collection and associated fields.
This function returns specified field with object; if no field is specified,
it returns the box coordinates.
Args:
field: this optional string parameter can be used to specify a related
field to be accessed.
Returns:
a tensor representing the box collection or an associated field.
Raises:
ValueError: if invalid field
"""
if not self.has_field(field):
raise ValueError('field ' + str(field) + ' does not exist')
return self.data[field]
def set_field(self, field, value):
"""Sets the value of a field.
Updates the field of a box_list with a given value.
Args:
field: (string) name of the field to set value.
value: the value to assign to the field.
Raises:
ValueError: if the box_list does not have specified field.
"""
if not self.has_field(field):
raise ValueError('field %s does not exist' % field)
self.data[field] = value
def get_center_coordinates_and_sizes(self, scope=None):
"""Computes the center coordinates, height and width of the boxes.
Args:
scope: name scope of the function.
Returns:
a list of 4 1-D tensors [ycenter, xcenter, height, width].
"""
if not scope:
scope = 'get_center_coordinates_and_sizes'
with tf.name_scope(scope):
box_corners = self.get()
ymin, xmin, ymax, xmax = tf.unstack(tf.transpose(a=box_corners))
width = xmax - xmin
height = ymax - ymin
ycenter = ymin + height / 2.
xcenter = xmin + width / 2.
return [ycenter, xcenter, height, width]
def transpose_coordinates(self, scope=None):
"""Transpose the coordinate representation in a boxlist.
Args:
scope: name scope of the function.
"""
if not scope:
scope = 'transpose_coordinates'
with tf.name_scope(scope):
y_min, x_min, y_max, x_max = tf.split(
value=self.get(), num_or_size_splits=4, axis=1)
self.set(tf.concat([x_min, y_min, x_max, y_max], 1))
def as_tensor_dict(self, fields=None):
"""Retrieves specified fields as a dictionary of tensors.
Args:
fields: (optional) list of fields to return in the dictionary. If None
(default), all fields are returned.
Returns:
tensor_dict: A dictionary of tensors specified by fields.
Raises:
ValueError: if specified field is not contained in boxlist.
"""
tensor_dict = {}
if fields is None:
fields = self.get_all_fields()
for field in fields:
if not self.has_field(field):
raise ValueError('boxlist must contain all specified fields')
tensor_dict[field] = self.get_field(field)
return tensor_dict
| 6,739 | 30.943128 | 80 | py |
models | models-master/official/vision/data/fake_feature_generator.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates fake feature for testing and validation."""
import collections
from typing import Optional, Tuple, Union
import numpy as np
_RGB_CHANNELS = 3
def generate_image_np(height: int,
width: int,
num_channels: int = _RGB_CHANNELS) -> np.ndarray:
"""Returns a fake numpy image matrix array."""
return np.reshape(
np.mod(np.arange(height * width * num_channels), 255).astype(np.uint8),
newshape=(height, width, num_channels))
def generate_normalized_boxes_np(num_boxes: int) -> np.ndarray:
"""Returns a fake numpy normalized boxes array."""
xmins = np.reshape(np.arange(num_boxes) / (2 * num_boxes), (num_boxes, 1))
ymins = np.reshape(np.arange(num_boxes) / (2 * num_boxes), (num_boxes, 1))
xmaxs = xmins + .5
ymaxs = ymins + .5
return np.concatenate((ymins, xmins, ymaxs, xmaxs), axis=-1)
def generate_boxes_np(height: int, width: int, num_boxes: int) -> np.ndarray:
"""Returns a fake numpy absolute boxes array."""
normalized_boxes = generate_normalized_boxes_np(num_boxes)
normalized_boxes[:, 1::2] *= height
normalized_boxes[:, 0::2] *= width
return normalized_boxes
def generate_classes_np(num_classes: int,
size: Optional[int] = None) -> Union[int, np.ndarray]:
"""Returns a fake class or a fake numpy classes array."""
if size is None:
return num_classes - 1
return np.arange(size) % num_classes
def generate_confidences_np(
size: Optional[int] = None) -> Union[float, np.ndarray]:
"""Returns a fake confidence score or a fake numpy confidence score array."""
if size is None:
return 0.5
return np.arange(size) / size
def generate_instance_masks_np(height: int,
width: int,
boxes_np: np.ndarray,
normalized: bool = True) -> np.ndarray:
"""Returns a fake numpy instance mask matrices array."""
num_boxes = len(boxes_np)
instance_masks_np = np.zeros((num_boxes, height, width, 1))
if normalized:
boxes_np[:, 1::2] *= height
boxes_np[:, ::2] *= width
xmins = boxes_np[:, 0].astype(int)
ymins = boxes_np[:, 1].astype(int)
box_widths = boxes_np[:, 2].astype(int) - xmins
box_heights = boxes_np[:, 3].astype(int) - ymins
for i, (x, y, w, h) in enumerate(zip(xmins, ymins, box_widths, box_heights)):
instance_masks_np[i, y:y + h, x:x + w, :] = np.reshape(
np.mod(np.arange(h * w), 2).astype(np.uint8), newshape=(h, w, 1))
return instance_masks_np
def generate_semantic_mask_np(height: int, width: int,
num_classes: int) -> np.ndarray:
"""Returns a fake numpy semantic mask array."""
return generate_image_np(height, width, num_channels=1) % num_classes
def generate_panoptic_masks_np(
semantic_mask: np.ndarray, instance_masks: np.ndarray,
instance_classes: np.ndarray,
stuff_classes_offset: int) -> Tuple[np.ndarray, np.ndarray]:
"""Returns fake numpy panoptic category and instance mask arrays."""
panoptic_category_mask = np.zeros_like(semantic_mask)
panoptic_instance_mask = np.zeros_like(semantic_mask)
instance_ids = collections.defaultdict(int)
for instance_mask, instance_class in zip(instance_masks, instance_classes):
if instance_class == 0:
continue
instance_ids[instance_class] += 1
# If a foreground pixel is labelled previously, replace the old category
# class and instance ID with the new one.
foreground_indices = np.where(np.equal(instance_mask, 1))
# Note that instance class start from index 1.
panoptic_category_mask[foreground_indices] = instance_class + 1
panoptic_instance_mask[foreground_indices] = instance_ids[instance_class]
# If there are pixels remains unlablled (labelled as background), then the
# semantic labels will be used (if it has one).
# Note that in panoptic FPN, the panoptic labels are expected in this order,
# 0 (background), 1 ..., N (stuffs), N + 1, ..., N + M - 2 (things)
# N classes for stuff classes, without background class, and M classes for
# thing classes, with 0 representing the background class and 1 representing
# all stuff classes.
background_indices = np.where(np.equal(panoptic_category_mask, 0))
panoptic_category_mask[background_indices] = (
semantic_mask[background_indices] + stuff_classes_offset)
return panoptic_category_mask, panoptic_instance_mask
| 5,039 | 39 | 79 | py |
models | models-master/official/vision/data/create_coco_tf_record.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Convert raw COCO dataset to TFRecord format.
This scripts follows the label map decoder format and supports detection
boxes, instance masks and captions.
Example usage:
python create_coco_tf_record.py --logtostderr \
--image_dir="${TRAIN_IMAGE_DIR}" \
--image_info_file="${TRAIN_IMAGE_INFO_FILE}" \
--object_annotations_file="${TRAIN_ANNOTATIONS_FILE}" \
--caption_annotations_file="${CAPTION_ANNOTATIONS_FILE}" \
--output_file_prefix="${OUTPUT_DIR/FILE_PREFIX}" \
--num_shards=100
"""
import collections
import json
import logging
import os
from absl import app # pylint:disable=unused-import
from absl import flags
import numpy as np
from pycocotools import mask
import tensorflow as tf
import multiprocessing as mp
from official.vision.data import tfrecord_lib
flags.DEFINE_boolean(
'include_masks', False, 'Whether to include instance segmentations masks '
'(PNG encoded) in the result. default: False.')
flags.DEFINE_multi_string('image_dir', '', 'Directory containing images.')
flags.DEFINE_string(
'image_info_file', '', 'File containing image information. '
'Tf Examples in the output files correspond to the image '
'info entries in this file. If this file is not provided '
'object_annotations_file is used if present. Otherwise, '
'caption_annotations_file is used to get image info.')
flags.DEFINE_string(
'object_annotations_file', '', 'File containing object '
'annotations - boxes and instance masks.')
flags.DEFINE_string('caption_annotations_file', '', 'File containing image '
'captions.')
flags.DEFINE_string('panoptic_annotations_file', '', 'File containing panoptic '
'annotations.')
flags.DEFINE_string('panoptic_masks_dir', '',
'Directory containing panoptic masks annotations.')
flags.DEFINE_boolean(
'include_panoptic_masks', False, 'Whether to include category and '
'instance masks in the result. These are required to run the PQ evaluator '
'default: False.')
flags.DEFINE_boolean(
'panoptic_skip_crowd', False, 'Whether to skip crowd or not for panoptic '
'annotations. default: False.')
flags.DEFINE_string('output_file_prefix', '/tmp/train', 'Path to output file')
flags.DEFINE_integer('num_shards', 32, 'Number of shards for output file.')
_NUM_PROCESSES = flags.DEFINE_integer(
'num_processes', None,
('Number of parallel processes to use. '
'If set to 0, disables multi-processing.'))
FLAGS = flags.FLAGS
logger = tf.get_logger()
logger.setLevel(logging.INFO)
_VOID_LABEL = 0
_VOID_INSTANCE_ID = 0
_THING_CLASS_ID = 1
_STUFF_CLASSES_OFFSET = 90
def coco_segmentation_to_mask_png(segmentation, height, width, is_crowd):
"""Encode a COCO mask segmentation as PNG string."""
run_len_encoding = mask.frPyObjects(segmentation, height, width)
binary_mask = mask.decode(run_len_encoding)
if not is_crowd:
binary_mask = np.amax(binary_mask, axis=2)
return tfrecord_lib.encode_mask_as_png(binary_mask)
def generate_coco_panoptics_masks(segments_info, mask_path,
include_panoptic_masks,
is_category_thing):
"""Creates masks for panoptic segmentation task.
Args:
segments_info: a list of dicts, where each dict has keys: [u'id',
u'category_id', u'area', u'bbox', u'iscrowd'], detailing information for
each segment in the panoptic mask.
mask_path: path to the panoptic mask.
include_panoptic_masks: bool, when set to True, category and instance
masks are included in the outputs. Set this to True, when using
the Panoptic Quality evaluator.
is_category_thing: a dict with category ids as keys and, 0/1 as values to
represent "stuff" and "things" classes respectively.
Returns:
A dict with keys: [u'semantic_segmentation_mask', u'category_mask',
u'instance_mask']. The dict contains 'category_mask' and 'instance_mask'
only if `include_panoptic_eval_masks` is set to True.
"""
rgb_mask = tfrecord_lib.read_image(mask_path)
r, g, b = np.split(rgb_mask, 3, axis=-1)
# decode rgb encoded panoptic mask to get segments ids
# refer https://cocodataset.org/#format-data
segments_encoded_mask = (r + g * 256 + b * (256**2)).squeeze()
semantic_segmentation_mask = np.ones_like(
segments_encoded_mask, dtype=np.uint8) * _VOID_LABEL
if include_panoptic_masks:
category_mask = np.ones_like(
segments_encoded_mask, dtype=np.uint8) * _VOID_LABEL
instance_mask = np.ones_like(
segments_encoded_mask, dtype=np.uint8) * _VOID_INSTANCE_ID
for idx, segment in enumerate(segments_info):
segment_id = segment['id']
category_id = segment['category_id']
is_crowd = segment['iscrowd']
if FLAGS.panoptic_skip_crowd and is_crowd:
continue
if is_category_thing[category_id]:
encoded_category_id = _THING_CLASS_ID
instance_id = idx + 1
else:
encoded_category_id = category_id - _STUFF_CLASSES_OFFSET
instance_id = _VOID_INSTANCE_ID
segment_mask = (segments_encoded_mask == segment_id)
semantic_segmentation_mask[segment_mask] = encoded_category_id
if include_panoptic_masks:
category_mask[segment_mask] = category_id
instance_mask[segment_mask] = instance_id
outputs = {
'semantic_segmentation_mask': tfrecord_lib.encode_mask_as_png(
semantic_segmentation_mask)
}
if include_panoptic_masks:
outputs.update({
'category_mask': tfrecord_lib.encode_mask_as_png(category_mask),
'instance_mask': tfrecord_lib.encode_mask_as_png(instance_mask)
})
return outputs
def coco_annotations_to_lists(bbox_annotations, id_to_name_map,
image_height, image_width, include_masks):
"""Converts COCO annotations to feature lists."""
data = dict((k, list()) for k in
['xmin', 'xmax', 'ymin', 'ymax', 'is_crowd',
'category_id', 'category_names', 'area'])
if include_masks:
data['encoded_mask_png'] = []
num_annotations_skipped = 0
for object_annotations in bbox_annotations:
(x, y, width, height) = tuple(object_annotations['bbox'])
if width <= 0 or height <= 0:
num_annotations_skipped += 1
continue
if x + width > image_width or y + height > image_height:
num_annotations_skipped += 1
continue
data['xmin'].append(float(x) / image_width)
data['xmax'].append(float(x + width) / image_width)
data['ymin'].append(float(y) / image_height)
data['ymax'].append(float(y + height) / image_height)
data['is_crowd'].append(object_annotations['iscrowd'])
category_id = int(object_annotations['category_id'])
data['category_id'].append(category_id)
data['category_names'].append(id_to_name_map[category_id].encode('utf8'))
data['area'].append(object_annotations['area'])
if include_masks:
data['encoded_mask_png'].append(
coco_segmentation_to_mask_png(object_annotations['segmentation'],
image_height, image_width,
object_annotations['iscrowd'])
)
return data, num_annotations_skipped
def bbox_annotations_to_feature_dict(
bbox_annotations, image_height, image_width, id_to_name_map, include_masks):
"""Convert COCO annotations to an encoded feature dict."""
data, num_skipped = coco_annotations_to_lists(
bbox_annotations, id_to_name_map, image_height, image_width,
include_masks)
feature_dict = {}
if len(bbox_annotations) != num_skipped:
feature_dict = {
'image/object/bbox/xmin': tfrecord_lib.convert_to_feature(data['xmin']),
'image/object/bbox/xmax': tfrecord_lib.convert_to_feature(data['xmax']),
'image/object/bbox/ymin': tfrecord_lib.convert_to_feature(data['ymin']),
'image/object/bbox/ymax': tfrecord_lib.convert_to_feature(data['ymax']),
'image/object/class/text': tfrecord_lib.convert_to_feature(
data['category_names']
),
'image/object/class/label': tfrecord_lib.convert_to_feature(
data['category_id']
),
'image/object/is_crowd': tfrecord_lib.convert_to_feature(
data['is_crowd']
),
'image/object/area': tfrecord_lib.convert_to_feature(
data['area'], 'float_list'
),
}
if include_masks:
feature_dict['image/object/mask'] = tfrecord_lib.convert_to_feature(
data['encoded_mask_png']
)
return feature_dict, num_skipped
def encode_caption_annotations(caption_annotations):
captions = []
for caption_annotation in caption_annotations:
captions.append(caption_annotation['caption'].encode('utf8'))
return captions
def create_tf_example(image,
image_dirs,
panoptic_masks_dir=None,
bbox_annotations=None,
id_to_name_map=None,
caption_annotations=None,
panoptic_annotation=None,
is_category_thing=None,
include_panoptic_masks=False,
include_masks=False):
"""Converts image and annotations to a tf.Example proto.
Args:
image: dict with keys: [u'license', u'file_name', u'coco_url', u'height',
u'width', u'date_captured', u'flickr_url', u'id']
image_dirs: list of directories containing the image files.
panoptic_masks_dir: `str` of the panoptic masks directory.
bbox_annotations:
list of dicts with keys: [u'segmentation', u'area', u'iscrowd',
u'image_id', u'bbox', u'category_id', u'id'] Notice that bounding box
coordinates in the official COCO dataset are given as [x, y, width,
height] tuples using absolute coordinates where x, y represent the
top-left (0-indexed) corner. This function converts to the format
expected by the Tensorflow Object Detection API (which is which is
[ymin, xmin, ymax, xmax] with coordinates normalized relative to image
size).
id_to_name_map: a dict mapping category IDs to string names.
caption_annotations:
list of dict with keys: [u'id', u'image_id', u'str'].
panoptic_annotation: dict with keys: [u'image_id', u'file_name',
u'segments_info']. Where the value for segments_info is a list of dicts,
with each dict containing information for a single segment in the mask.
is_category_thing: `bool`, whether it is a category thing.
include_panoptic_masks: `bool`, whether to include panoptic masks.
include_masks: Whether to include instance segmentations masks
(PNG encoded) in the result. default: False.
Returns:
example: The converted tf.Example
num_annotations_skipped: Number of (invalid) annotations that were ignored.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG,
does not exist, or is not unique across image directories.
"""
image_height = image['height']
image_width = image['width']
filename = image['file_name']
image_id = image['id']
if len(image_dirs) > 1:
full_paths = [os.path.join(image_dir, filename) for image_dir in image_dirs]
full_existing_paths = [p for p in full_paths if tf.io.gfile.exists(p)]
if not full_existing_paths:
raise ValueError(
'{} does not exist across image directories.'.format(filename))
if len(full_existing_paths) > 1:
raise ValueError(
'{} is not unique across image directories'.format(filename))
full_path, = full_existing_paths
# If there is only one image directory, it's not worth checking for existence,
# since trying to open the file will raise an informative error message if it
# does not exist.
else:
image_dir, = image_dirs
full_path = os.path.join(image_dir, filename)
with tf.io.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
feature_dict = tfrecord_lib.image_info_to_feature_dict(
image_height, image_width, filename, image_id, encoded_jpg, 'jpg')
num_annotations_skipped = 0
if bbox_annotations:
box_feature_dict, num_skipped = bbox_annotations_to_feature_dict(
bbox_annotations, image_height, image_width, id_to_name_map,
include_masks)
num_annotations_skipped += num_skipped
feature_dict.update(box_feature_dict)
if caption_annotations:
encoded_captions = encode_caption_annotations(caption_annotations)
feature_dict.update(
{'image/caption': tfrecord_lib.convert_to_feature(encoded_captions)})
if panoptic_annotation:
segments_info = panoptic_annotation['segments_info']
panoptic_mask_filename = os.path.join(
panoptic_masks_dir,
panoptic_annotation['file_name'])
encoded_panoptic_masks = generate_coco_panoptics_masks(
segments_info, panoptic_mask_filename, include_panoptic_masks,
is_category_thing)
feature_dict.update(
{'image/segmentation/class/encoded': tfrecord_lib.convert_to_feature(
encoded_panoptic_masks['semantic_segmentation_mask'])})
if include_panoptic_masks:
feature_dict.update({
'image/panoptic/category_mask': tfrecord_lib.convert_to_feature(
encoded_panoptic_masks['category_mask']),
'image/panoptic/instance_mask': tfrecord_lib.convert_to_feature(
encoded_panoptic_masks['instance_mask'])
})
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return example, num_annotations_skipped
def _load_object_annotations(object_annotations_file):
"""Loads object annotation JSON file."""
with tf.io.gfile.GFile(object_annotations_file, 'r') as fid:
obj_annotations = json.load(fid)
images = obj_annotations['images']
id_to_name_map = dict((element['id'], element['name']) for element in
obj_annotations['categories'])
img_to_obj_annotation = collections.defaultdict(list)
logging.info('Building bounding box index.')
for annotation in obj_annotations['annotations']:
image_id = annotation['image_id']
img_to_obj_annotation[image_id].append(annotation)
missing_annotation_count = 0
for image in images:
image_id = image['id']
if image_id not in img_to_obj_annotation:
missing_annotation_count += 1
logging.info('%d images are missing bboxes.', missing_annotation_count)
return img_to_obj_annotation, id_to_name_map
def _load_caption_annotations(caption_annotations_file):
"""Loads caption annotation JSON file."""
with tf.io.gfile.GFile(caption_annotations_file, 'r') as fid:
caption_annotations = json.load(fid)
img_to_caption_annotation = collections.defaultdict(list)
logging.info('Building caption index.')
for annotation in caption_annotations['annotations']:
image_id = annotation['image_id']
img_to_caption_annotation[image_id].append(annotation)
missing_annotation_count = 0
images = caption_annotations['images']
for image in images:
image_id = image['id']
if image_id not in img_to_caption_annotation:
missing_annotation_count += 1
logging.info('%d images are missing captions.', missing_annotation_count)
return img_to_caption_annotation
def _load_panoptic_annotations(panoptic_annotations_file):
"""Loads panoptic annotation from file."""
with tf.io.gfile.GFile(panoptic_annotations_file, 'r') as fid:
panoptic_annotations = json.load(fid)
img_to_panoptic_annotation = dict()
logging.info('Building panoptic index.')
for annotation in panoptic_annotations['annotations']:
image_id = annotation['image_id']
img_to_panoptic_annotation[image_id] = annotation
is_category_thing = dict()
for category_info in panoptic_annotations['categories']:
is_category_thing[category_info['id']] = category_info['isthing'] == 1
missing_annotation_count = 0
images = panoptic_annotations['images']
for image in images:
image_id = image['id']
if image_id not in img_to_panoptic_annotation:
missing_annotation_count += 1
logging.info(
'%d images are missing panoptic annotations.', missing_annotation_count)
return img_to_panoptic_annotation, is_category_thing
def _load_images_info(images_info_file):
with tf.io.gfile.GFile(images_info_file, 'r') as fid:
info_dict = json.load(fid)
return info_dict['images']
def generate_annotations(images, image_dirs,
panoptic_masks_dir=None,
img_to_obj_annotation=None,
img_to_caption_annotation=None,
img_to_panoptic_annotation=None,
is_category_thing=None,
id_to_name_map=None,
include_panoptic_masks=False,
include_masks=False):
"""Generator for COCO annotations."""
for image in images:
object_annotation = (img_to_obj_annotation.get(image['id'], None) if
img_to_obj_annotation else None)
caption_annotaion = (img_to_caption_annotation.get(image['id'], None) if
img_to_caption_annotation else None)
panoptic_annotation = (img_to_panoptic_annotation.get(image['id'], None) if
img_to_panoptic_annotation else None)
yield (image, image_dirs, panoptic_masks_dir, object_annotation,
id_to_name_map, caption_annotaion, panoptic_annotation,
is_category_thing, include_panoptic_masks, include_masks)
def _create_tf_record_from_coco_annotations(images_info_file,
image_dirs,
output_path,
num_shards,
object_annotations_file=None,
caption_annotations_file=None,
panoptic_masks_dir=None,
panoptic_annotations_file=None,
include_panoptic_masks=False,
include_masks=False):
"""Loads COCO annotation json files and converts to tf.Record format.
Args:
images_info_file: JSON file containing image info. The number of tf.Examples
in the output tf Record files is exactly equal to the number of image info
entries in this file. This can be any of train/val/test annotation json
files Eg. 'image_info_test-dev2017.json',
'instance_annotations_train2017.json',
'caption_annotations_train2017.json', etc.
image_dirs: List of directories containing the image files.
output_path: Path to output tf.Record file.
num_shards: Number of output files to create.
object_annotations_file: JSON file containing bounding box annotations.
caption_annotations_file: JSON file containing caption annotations.
panoptic_masks_dir: Directory containing panoptic masks.
panoptic_annotations_file: JSON file containing panoptic annotations.
include_panoptic_masks: Whether to include 'category_mask'
and 'instance_mask', which is required by the panoptic quality evaluator.
include_masks: Whether to include instance segmentations masks
(PNG encoded) in the result. default: False.
"""
logging.info('writing to output path: %s', output_path)
images = _load_images_info(images_info_file)
img_to_obj_annotation = None
img_to_caption_annotation = None
id_to_name_map = None
img_to_panoptic_annotation = None
is_category_thing = None
if object_annotations_file:
img_to_obj_annotation, id_to_name_map = (
_load_object_annotations(object_annotations_file))
if caption_annotations_file:
img_to_caption_annotation = (
_load_caption_annotations(caption_annotations_file))
if panoptic_annotations_file:
img_to_panoptic_annotation, is_category_thing = (
_load_panoptic_annotations(panoptic_annotations_file))
coco_annotations_iter = generate_annotations(
images=images,
image_dirs=image_dirs,
panoptic_masks_dir=panoptic_masks_dir,
img_to_obj_annotation=img_to_obj_annotation,
img_to_caption_annotation=img_to_caption_annotation,
img_to_panoptic_annotation=img_to_panoptic_annotation,
is_category_thing=is_category_thing,
id_to_name_map=id_to_name_map,
include_panoptic_masks=include_panoptic_masks,
include_masks=include_masks)
num_skipped = tfrecord_lib.write_tf_record_dataset(
output_path, coco_annotations_iter, create_tf_example, num_shards,
multiple_processes=_NUM_PROCESSES.value)
logging.info('Finished writing, skipped %d annotations.', num_skipped)
def main(_):
assert FLAGS.image_dir, '`image_dir` missing.'
assert (FLAGS.image_info_file or FLAGS.object_annotations_file or
FLAGS.caption_annotations_file), ('All annotation files are '
'missing.')
if FLAGS.image_info_file:
images_info_file = FLAGS.image_info_file
elif FLAGS.object_annotations_file:
images_info_file = FLAGS.object_annotations_file
else:
images_info_file = FLAGS.caption_annotations_file
directory = os.path.dirname(FLAGS.output_file_prefix)
if not tf.io.gfile.isdir(directory):
tf.io.gfile.makedirs(directory)
_create_tf_record_from_coco_annotations(images_info_file, FLAGS.image_dir,
FLAGS.output_file_prefix,
FLAGS.num_shards,
FLAGS.object_annotations_file,
FLAGS.caption_annotations_file,
FLAGS.panoptic_masks_dir,
FLAGS.panoptic_annotations_file,
FLAGS.include_panoptic_masks,
FLAGS.include_masks)
if __name__ == '__main__':
app.run(main)
| 22,732 | 38.952548 | 80 | py |
models | models-master/official/vision/data/tf_example_builder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_example_builder."""
from absl.testing import parameterized
import tensorflow as tf
from official.vision.data import fake_feature_generator
from official.vision.data import image_utils
from official.vision.data import tf_example_builder
class TfExampleBuilderTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('RGB_PNG', 128, 64, 3, 'PNG', 3),
('RGB_RAW', 128, 128, 3, 'RAW', 0),
('RGB_JPEG', 64, 128, 3, 'JPEG', [2, 5]))
def test_add_image_matrix_feature_success(self, height, width, num_channels,
image_format, label):
# Prepare test data.
image_np = fake_feature_generator.generate_image_np(height, width,
num_channels)
expected_image_bytes = image_utils.encode_image(image_np, image_format)
hashed_image = bytes('10242048', 'ascii')
# Run code logic.
example_builder = tf_example_builder.TfExampleBuilder()
example_builder.add_image_matrix_feature(
image_np, image_format, hashed_image, label=label)
example = example_builder.example
# Verify outputs.
# Prefer to use string literal for feature keys to directly display the
# structure of the expected tf.train.Example.
if isinstance(label, int):
expected_labels = [label]
else:
expected_labels = label
self.assertProtoEquals(
tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[expected_image_bytes])),
'image/format':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[bytes(image_format, 'ascii')])),
'image/height':
tf.train.Feature(
int64_list=tf.train.Int64List(value=[height])),
'image/width':
tf.train.Feature(
int64_list=tf.train.Int64List(value=[width])),
'image/channels':
tf.train.Feature(
int64_list=tf.train.Int64List(
value=[num_channels])),
'image/source_id':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[hashed_image])),
'image/class/label':
tf.train.Feature(
int64_list=tf.train.Int64List(
value=expected_labels)),
})), example)
def test_add_image_matrix_feature_with_feature_prefix_success(self):
height = 64
width = 64
num_channels = 1
image_format = 'PNG'
feature_prefix = 'depth'
label = 8
image_np = fake_feature_generator.generate_image_np(height, width,
num_channels)
expected_image_bytes = image_utils.encode_image(image_np, image_format)
hashed_image = bytes('10242048', 'ascii')
example_builder = tf_example_builder.TfExampleBuilder()
example_builder.add_image_matrix_feature(
image_np,
image_format,
hashed_image,
feature_prefix=feature_prefix,
label=label)
example = example_builder.example
self.assertProtoEquals(
tf.train.Example(
features=tf.train.Features(
feature={
'depth/image/encoded':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[expected_image_bytes])),
'depth/image/format':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[bytes(image_format, 'ascii')])),
'depth/image/height':
tf.train.Feature(
int64_list=tf.train.Int64List(value=[height])),
'depth/image/width':
tf.train.Feature(
int64_list=tf.train.Int64List(value=[width])),
'depth/image/channels':
tf.train.Feature(
int64_list=tf.train.Int64List(
value=[num_channels])),
'depth/image/source_id':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[hashed_image])),
'depth/image/class/label':
tf.train.Feature(
int64_list=tf.train.Int64List(value=[label]))
})), example)
def test_add_encoded_raw_image_feature_success(self):
height = 128
width = 128
num_channels = 3
image_format = 'RAW'
expected_image_bytes = bytes('image', 'ascii')
hashed_image = bytes('16188651', 'ascii')
example_builder = tf_example_builder.TfExampleBuilder()
example_builder.add_encoded_image_feature(expected_image_bytes, 'RAW',
height, width, num_channels)
example = example_builder.example
self.assertProtoEquals(
tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[expected_image_bytes])),
'image/format':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[bytes(image_format, 'ascii')])),
'image/height':
tf.train.Feature(
int64_list=tf.train.Int64List(value=[height])),
'image/width':
tf.train.Feature(
int64_list=tf.train.Int64List(value=[width])),
'image/channels':
tf.train.Feature(
int64_list=tf.train.Int64List(
value=[num_channels])),
'image/source_id':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[hashed_image]))
})), example)
def test_add_encoded_raw_image_feature_valueerror(self):
image_format = 'RAW'
image_bytes = tf.bfloat16.as_numpy_dtype
image_np = fake_feature_generator.generate_image_np(1, 1, 1)
image_np = image_np.astype(image_bytes)
expected_image_bytes = image_utils.encode_image(image_np, image_format)
example_builder = tf_example_builder.TfExampleBuilder()
with self.assertRaises(ValueError):
example_builder.add_encoded_image_feature(expected_image_bytes,
image_format)
@parameterized.product(
miss_image_format=(True, False),
miss_height=(True, False),
miss_width=(True, False),
miss_num_channels=(True, False),
miss_label=(True, False))
def test_add_encoded_image_feature_success(self, miss_image_format,
miss_height, miss_width,
miss_num_channels,
miss_label):
height = 64
width = 64
num_channels = 3
image_format = 'PNG'
image_np = fake_feature_generator.generate_image_np(height, width,
num_channels)
image_bytes = image_utils.encode_image(image_np, image_format)
# We don't test on image_source_id because encoding process becomes
# non-deterministic.
hashed_image = bytes('10242048', 'ascii')
label = 5
image_format = None if miss_image_format else image_format
height = None if miss_height else height
width = None if miss_width else width
num_channels = None if miss_num_channels else num_channels
label = None if miss_label else label
example_builder = tf_example_builder.TfExampleBuilder()
example_builder.add_encoded_image_feature(
image_bytes,
image_format=image_format,
height=height,
width=width,
num_channels=num_channels,
image_source_id=hashed_image,
label=label)
example = example_builder.example
expected_features = {
'image/encoded':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[image_bytes])),
'image/format':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[bytes('PNG', 'ascii')])),
'image/height':
tf.train.Feature(
int64_list=tf.train.Int64List(value=[64])),
'image/width':
tf.train.Feature(
int64_list=tf.train.Int64List(value=[64])),
'image/channels':
tf.train.Feature(
int64_list=tf.train.Int64List(value=[3])),
'image/source_id':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[hashed_image]))}
if not miss_label:
expected_features.update({
'image/class/label':
tf.train.Feature(
int64_list=tf.train.Int64List(value=[label]))})
self.assertProtoEquals(
tf.train.Example(features=tf.train.Features(feature=expected_features)),
example)
@parameterized.named_parameters(('no_box', 0), ('10_boxes', 10))
def test_add_normalized_boxes_feature(self, num_boxes):
normalized_boxes_np = fake_feature_generator.generate_normalized_boxes_np(
num_boxes)
ymins, xmins, ymaxs, xmaxs = normalized_boxes_np.T.tolist()
labels = fake_feature_generator.generate_classes_np(
2, size=num_boxes).tolist()
example_builder = tf_example_builder.TfExampleBuilder()
example_builder.add_boxes_feature(
xmins, xmaxs, ymins, ymaxs, labels=labels, normalized=True)
example = example_builder.example
self.assertProtoEquals(
tf.train.Example(
features=tf.train.Features(
feature={
'image/object/bbox/xmin':
tf.train.Feature(
float_list=tf.train.FloatList(value=xmins)),
'image/object/bbox/ymin':
tf.train.Feature(
float_list=tf.train.FloatList(value=ymins)),
'image/object/bbox/xmax':
tf.train.Feature(
float_list=tf.train.FloatList(value=xmaxs)),
'image/object/bbox/ymax':
tf.train.Feature(
float_list=tf.train.FloatList(value=ymaxs)),
'image/object/class/label':
tf.train.Feature(
int64_list=tf.train.Int64List(value=labels)),
})), example)
@parameterized.named_parameters(('no_box', 0), ('10_boxes', 10))
def test_add_box_pixels_feature(self, num_boxes):
height, width = 10, 10
boxes_np = fake_feature_generator.generate_boxes_np(height, width,
num_boxes)
ymins, xmins, ymaxs, xmaxs = boxes_np.T.tolist()
labels = fake_feature_generator.generate_classes_np(
2, size=num_boxes).tolist()
example_builder = tf_example_builder.TfExampleBuilder()
example_builder.add_boxes_feature(
xmins, xmaxs, ymins, ymaxs, labels=labels, normalized=False)
example = example_builder.example
self.assertProtoEquals(
tf.train.Example(
features=tf.train.Features(
feature={
'image/object/bbox/xmin_pixels':
tf.train.Feature(
float_list=tf.train.FloatList(value=xmins)),
'image/object/bbox/ymin_pixels':
tf.train.Feature(
float_list=tf.train.FloatList(value=ymins)),
'image/object/bbox/xmax_pixels':
tf.train.Feature(
float_list=tf.train.FloatList(value=xmaxs)),
'image/object/bbox/ymax_pixels':
tf.train.Feature(
float_list=tf.train.FloatList(value=ymaxs)),
'image/object/class/label':
tf.train.Feature(
int64_list=tf.train.Int64List(value=labels)),
})), example)
@parameterized.named_parameters(('no_box', 0), ('10_boxes', 10))
def test_add_normalized_boxes_feature_with_confidence_and_prefix(
self, num_boxes):
normalized_boxes_np = fake_feature_generator.generate_normalized_boxes_np(
num_boxes)
ymins, xmins, ymaxs, xmaxs = normalized_boxes_np.T.tolist()
labels = fake_feature_generator.generate_classes_np(
2, size=num_boxes).tolist()
confidences = fake_feature_generator.generate_confidences_np(
size=num_boxes).tolist()
feature_prefix = 'predicted'
example_builder = tf_example_builder.TfExampleBuilder()
example_builder.add_boxes_feature(
xmins,
xmaxs,
ymins,
ymaxs,
labels=labels,
confidences=confidences,
normalized=True,
feature_prefix=feature_prefix)
example = example_builder.example
self.assertProtoEquals(
tf.train.Example(
features=tf.train.Features(
feature={
'predicted/image/object/bbox/xmin':
tf.train.Feature(
float_list=tf.train.FloatList(value=xmins)),
'predicted/image/object/bbox/ymin':
tf.train.Feature(
float_list=tf.train.FloatList(value=ymins)),
'predicted/image/object/bbox/xmax':
tf.train.Feature(
float_list=tf.train.FloatList(value=xmaxs)),
'predicted/image/object/bbox/ymax':
tf.train.Feature(
float_list=tf.train.FloatList(value=ymaxs)),
'predicted/image/object/class/label':
tf.train.Feature(
int64_list=tf.train.Int64List(value=labels)),
'predicted/image/object/bbox/confidence':
tf.train.Feature(
float_list=tf.train.FloatList(value=confidences)),
})), example)
@parameterized.named_parameters(('no_mask', 128, 64, 0),
('10_masks', 64, 128, 10))
def test_add_instance_mask_matrices_feature_success(self, height, width,
num_masks):
# Prepare test data.
instance_masks_np = fake_feature_generator.generate_instance_masks_np(
height,
width,
boxes_np=fake_feature_generator.generate_boxes_np(
height, width, num_masks),
normalized=False)
expected_instance_masks_bytes = list(
map(lambda x: image_utils.encode_image(x, 'PNG'), instance_masks_np))
# Run code logic.
example_builder = tf_example_builder.TfExampleBuilder()
example_builder.add_instance_mask_matrices_feature(instance_masks_np)
example = example_builder.example
# Verify outputs.
# Prefer to use string literal for feature keys to directly display the
# structure of the expected tf.train.Example.
self.assertProtoEquals(
tf.train.Example(
features=tf.train.Features(
feature={
'image/object/mask':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=expected_instance_masks_bytes)),
'image/object/area':
# The box area is 4x smaller than the image, and the
# mask area is 2x smaller than the box.
tf.train.Feature(
float_list=tf.train.FloatList(
value=[height * width / 8] * num_masks))
})), example)
@parameterized.named_parameters(('with_mask_areas', True),
('without_mask_areas', False))
def test_add_encoded_instance_masks_feature_success(self, has_mask_areas):
height = 64
width = 64
image_format = 'PNG'
mask_np = fake_feature_generator.generate_semantic_mask_np(height, width, 2)
mask_bytes = image_utils.encode_image(mask_np, image_format)
test_masks = [mask_bytes for _ in range(2)]
mask_areas = [2040., 2040.] if has_mask_areas else None
example_builder = tf_example_builder.TfExampleBuilder()
example_builder.add_encoded_instance_masks_feature(
test_masks, mask_areas=mask_areas)
example = example_builder.example
self.assertProtoEquals(
tf.train.Example(
features=tf.train.Features(
feature={
'image/object/mask':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=test_masks)),
'image/object/area':
tf.train.Feature(
float_list=tf.train.FloatList(
value=[2040., 2040.])),
})), example)
@parameterized.named_parameters(
('with_visualization_mask', 128, 64, True),
('without_visualization_mask', 64, 128, False))
def test_add_semantic_mask_matrices_feature_success(self, height, width,
has_visualization_mask):
# Prepare test data.
semantic_mask_np = fake_feature_generator.generate_semantic_mask_np(
height, width, 2)
image_format = 'PNG'
expected_feature_dict = {
'image/segmentation/class/encoded':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[
image_utils.encode_image(semantic_mask_np, image_format)
])),
'image/segmentation/class/format':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[bytes(image_format, 'ascii')])),
}
visualization_mask_np = None
if has_visualization_mask:
visualization_mask_np = fake_feature_generator.generate_image_np(
height, width)
expected_feature_dict.update({
'image/segmentation/class/visualization/encoded':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[
image_utils.encode_image(visualization_mask_np,
image_format)
])),
'image/segmentation/class/visualization/format':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[bytes(image_format, 'ascii')])),
})
# Run code logic.
example_builder = tf_example_builder.TfExampleBuilder()
example_builder.add_semantic_mask_matrix_feature(semantic_mask_np,
image_format,
visualization_mask_np,
image_format)
example = example_builder.example
self.assertProtoEquals(
tf.train.Example(
features=tf.train.Features(feature=expected_feature_dict)), example)
@parameterized.named_parameters(('with_visualization_mask', True),
('without_visualization_mask', False))
def test_add_encoded_semantic_mask_feature_success(self,
has_visualization_mask):
height, width = 64, 64
semantic_mask_np = fake_feature_generator.generate_semantic_mask_np(
height, width, 2)
image_format = 'PNG'
encoded_semantic_mask = image_utils.encode_image(semantic_mask_np,
image_format)
expected_feature_dict = {
'image/segmentation/class/encoded':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[encoded_semantic_mask])),
'image/segmentation/class/format':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[bytes(image_format, 'ascii')])),
}
encoded_visualization_mask = None
if has_visualization_mask:
visualization_mask_np = fake_feature_generator.generate_image_np(
height, width)
encoded_visualization_mask = image_utils.encode_image(
visualization_mask_np, image_format)
expected_feature_dict.update({
'image/segmentation/class/visualization/encoded':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[encoded_visualization_mask])),
'image/segmentation/class/visualization/format':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[bytes(image_format, 'ascii')])),
})
example_builder = tf_example_builder.TfExampleBuilder()
example_builder.add_encoded_semantic_mask_feature(
encoded_semantic_mask, image_format, encoded_visualization_mask,
image_format)
example = example_builder.example
self.assertProtoEquals(
tf.train.Example(
features=tf.train.Features(feature=expected_feature_dict)), example)
def test_add_panoptic_mask_matrices_feature_success(self):
# Prepare test data.
height, width, num_instances = 64, 64, 10
num_thing_classes, num_semantic_segmentation_classes = 3, 6
image_format = 'PNG'
normalized_boxes_np = fake_feature_generator.generate_normalized_boxes_np(
num_instances)
instance_masks_np = fake_feature_generator.generate_instance_masks_np(
height, width, normalized_boxes_np)
instance_classes_np = fake_feature_generator.generate_classes_np(
num_thing_classes, num_instances)
semantic_mask_np = fake_feature_generator.generate_semantic_mask_np(
height, width, num_semantic_segmentation_classes)
panoptic_category_mask_np, panoptic_instance_mask_np = (
fake_feature_generator.generate_panoptic_masks_np(
semantic_mask_np, instance_masks_np, instance_classes_np,
num_thing_classes - 1))
# Run code logic.
example_builder = tf_example_builder.TfExampleBuilder()
example_builder.add_panoptic_mask_matrix_feature(panoptic_category_mask_np,
panoptic_instance_mask_np,
image_format,
image_format)
example = example_builder.example
self.assertProtoEquals(
tf.train.Example(
features=tf.train.Features(
feature={
'image/panoptic/category/encoded':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[
image_utils.encode_image(
panoptic_category_mask_np, image_format)
])),
'image/panoptic/category/format':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[bytes(image_format, 'ascii')])),
'image/panoptic/instance/encoded':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[
image_utils.encode_image(
panoptic_instance_mask_np, image_format)
])),
'image/panoptic/instance/format':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[bytes(image_format, 'ascii')])),
})), example)
def test_add_encoded_panoptic_mask_feature_success(self):
# Prepare test data.
height, width, num_instances = 64, 64, 10
num_thing_classes, num_semantic_segmentation_classes = 3, 6
image_format = 'PNG'
normalized_boxes_np = fake_feature_generator.generate_normalized_boxes_np(
num_instances)
instance_masks_np = fake_feature_generator.generate_instance_masks_np(
height, width, normalized_boxes_np)
instance_classes_np = fake_feature_generator.generate_classes_np(
num_thing_classes, num_instances)
semantic_mask_np = fake_feature_generator.generate_semantic_mask_np(
height, width, num_semantic_segmentation_classes)
panoptic_category_mask_np, panoptic_instance_mask_np = (
fake_feature_generator.generate_panoptic_masks_np(
semantic_mask_np, instance_masks_np, instance_classes_np,
num_thing_classes - 1))
encoded_panoptic_category_mask = image_utils.encode_image(
panoptic_category_mask_np, image_format)
encoded_panoptic_instance_mask = image_utils.encode_image(
panoptic_instance_mask_np, image_format)
example_builder = tf_example_builder.TfExampleBuilder()
example_builder.add_encoded_panoptic_mask_feature(
encoded_panoptic_category_mask, encoded_panoptic_instance_mask,
image_format, image_format)
example = example_builder.example
self.assertProtoEquals(
tf.train.Example(
features=tf.train.Features(
feature={
'image/panoptic/category/encoded':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[encoded_panoptic_category_mask])),
'image/panoptic/category/format':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[bytes(image_format, 'ascii')])),
'image/panoptic/instance/encoded':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[encoded_panoptic_instance_mask])),
'image/panoptic/instance/format':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[bytes(image_format, 'ascii')])),
})), example)
if __name__ == '__main__':
tf.test.main()
| 27,976 | 42.577882 | 80 | py |
models | models-master/official/vision/data/image_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image-related utilities that are useful to prepare dataset."""
import dataclasses
import imghdr
import io
from typing import Optional, Tuple
import numpy as np
from PIL import Image
@dataclasses.dataclass
class ImageFormat:
"""Supported image formats.
For model development, this library should support the same image formats as
`tf.io.decode_image`[1].
[1]: https://www.tensorflow.org/api_docs/python/tf/io/decode_image
"""
bmp: str = 'BMP'
png: str = 'PNG'
jpeg: str = 'JPEG'
raw: str = 'RAW'
def validate_image_format(format_str: str) -> str:
"""Validates `format_str` and returns canonical format.
This function accepts image format in lower case and will returns the upper
case string as canonical format.
Args:
format_str: Image format string.
Returns:
Canonical image format string.
Raises:
ValueError: If the canonical format is not listed in `ImageFormat`.
"""
canonical_format = format_str.upper()
if canonical_format in dataclasses.asdict(ImageFormat()).values():
return canonical_format
raise ValueError(f'Image format is invalid: {format_str}')
def encode_image(image_np: np.ndarray, image_format: str) -> bytes:
"""Encodes `image_np` specified by `image_format`.
Args:
image_np: Numpy image array.
image_format: An enum specifying the format of the generated image.
Returns:
Encoded image string.
"""
if image_format == 'RAW':
return image_np.tobytes()
if len(image_np.shape) > 2 and image_np.shape[2] == 1:
image_pil = Image.fromarray(np.squeeze(image_np), 'L')
else:
image_pil = Image.fromarray(image_np)
with io.BytesIO() as output:
image_pil.save(output, format=validate_image_format(image_format))
return output.getvalue()
def decode_image(image_bytes: bytes,
image_format: Optional[str] = None,
image_dtype: str = 'uint8') -> np.ndarray:
"""Decodes image_bytes into numpy array."""
if image_format == 'RAW':
return np.frombuffer(image_bytes, dtype=image_dtype)
image_pil = Image.open(io.BytesIO(image_bytes))
image_np = np.array(image_pil)
if len(image_np.shape) < 3:
image_np = image_np[..., np.newaxis]
return image_np
def decode_image_metadata(image_bytes: bytes) -> Tuple[int, int, int, str]:
"""Decodes image metadata from encoded image string.
Note that if the image is encoded in RAW format, the metadata cannot be
inferred from the image bytes.
Args:
image_bytes: Encoded image string.
Returns:
A tuple of height, width, number of channels, and encoding format.
"""
image_np = decode_image(image_bytes)
# https://pillow.readthedocs.io/en/stable/reference/Image.html#image-attributes
height, width, num_channels = image_np.shape
image_format = imghdr.what(file=None, h=image_bytes)
return height, width, num_channels, validate_image_format(image_format)
| 3,503 | 29.736842 | 81 | py |
models | models-master/official/vision/data/image_utils_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for image_utils."""
import imghdr
from unittest import mock
from absl.testing import parameterized
import tensorflow as tf
from official.vision.data import fake_feature_generator
from official.vision.data import image_utils
class ImageUtilsTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters(
('RGB_PNG', 128, 64, 3, 'PNG'), ('RGB_JPEG', 2, 1, 3, 'JPEG'),
('GREY_BMP', 32, 32, 1, 'BMP'), ('GREY_PNG', 128, 128, 1, 'png'))
def test_encode_image_then_decode_image(self, height, width, num_channels,
image_format):
image_np = fake_feature_generator.generate_image_np(height, width,
num_channels)
image_str = image_utils.encode_image(image_np, image_format)
actual_image_np = image_utils.decode_image(image_str)
# JPEG encoding does not keep the pixel value.
if image_format != 'JPEG':
self.assertAllClose(actual_image_np, image_np)
self.assertEqual(actual_image_np.shape, image_np.shape)
@parameterized.named_parameters(
('RGB_RAW', 128, 64, 3, tf.bfloat16.as_numpy_dtype),
('GREY_RAW', 32, 32, 1, tf.uint8.as_numpy_dtype))
def test_encode_raw_image_then_decode_raw_image(self, height, width,
num_channels, image_dtype):
image_np = fake_feature_generator.generate_image_np(height, width,
num_channels)
image_np = image_np.astype(image_dtype)
image_str = image_utils.encode_image(image_np, 'RAW')
actual_image_np = image_utils.decode_image(image_str, 'RAW', image_dtype)
actual_image_np = actual_image_np.reshape([height, width, num_channels])
self.assertAllClose(actual_image_np, image_np)
self.assertEqual(actual_image_np.shape, image_np.shape)
@parameterized.named_parameters(
('RGB_PNG', 128, 64, 3, 'PNG'), ('RGB_JPEG', 64, 128, 3, 'JPEG'),
('GREY_BMP', 32, 32, 1, 'BMP'), ('GREY_PNG', 128, 128, 1, 'png'))
def test_encode_image_then_decode_image_metadata(self, height, width,
num_channels, image_format):
image_np = fake_feature_generator.generate_image_np(height, width,
num_channels)
image_str = image_utils.encode_image(image_np, image_format)
(actual_height, actual_width, actual_num_channels, actual_format) = (
image_utils.decode_image_metadata(image_str))
self.assertEqual(actual_height, height)
self.assertEqual(actual_width, width)
self.assertEqual(actual_num_channels, num_channels)
self.assertEqual(actual_format, image_format.upper())
def test_encode_image_raise_error_with_invalid_image_format(self):
with self.assertRaisesRegex(ValueError, 'Image format is invalid: foo'):
image_np = fake_feature_generator.generate_image_np(2, 2, 1)
image_utils.encode_image(image_np, 'foo')
@mock.patch.object(imghdr, 'what', return_value='foo', autospec=True)
def test_decode_image_raise_error_with_invalid_image_format(self, _):
image_np = fake_feature_generator.generate_image_np(1, 1, 3)
image_str = image_utils.encode_image(image_np, 'PNG')
with self.assertRaisesRegex(ValueError, 'Image format is invalid: foo'):
image_utils.decode_image_metadata(image_str)
if __name__ == '__main__':
tf.test.main()
| 4,061 | 45.159091 | 79 | py |
models | models-master/official/vision/data/tfrecord_lib_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfrecord_lib."""
import os
from absl import flags
from absl.testing import parameterized
import tensorflow as tf
from official.vision.data import create_coco_tf_record as create_coco_tf_record_lib
from official.vision.data import tfrecord_lib
FLAGS = flags.FLAGS
def process_sample(x):
d = {'x': x}
return tf.train.Example(features=tf.train.Features(feature=d)), 0
def parse_function(example_proto):
feature_description = {
'x': tf.io.FixedLenFeature([], tf.int64, default_value=-1)
}
return tf.io.parse_single_example(example_proto, feature_description)
class TfrecordLibTest(parameterized.TestCase):
def test_write_tf_record_dataset(self):
data = [(tfrecord_lib.convert_to_feature(i),) for i in range(17)]
path = os.path.join(FLAGS.test_tmpdir, 'train')
tfrecord_lib.write_tf_record_dataset(
path, data, process_sample, 3, multiple_processes=0)
tfrecord_files = tf.io.gfile.glob(path + '*')
self.assertLen(tfrecord_files, 3)
dataset = tf.data.TFRecordDataset(tfrecord_files)
dataset = dataset.map(parse_function)
read_values = set(d['x'] for d in dataset.as_numpy_iterator())
self.assertSetEqual(read_values, set(range(17)))
def test_convert_to_feature_float(self):
proto = tfrecord_lib.convert_to_feature(0.0)
self.assertEqual(proto.float_list.value[0], 0.0)
def test_convert_to_feature_int(self):
proto = tfrecord_lib.convert_to_feature(0)
self.assertEqual(proto.int64_list.value[0], 0)
def test_convert_to_feature_bytes(self):
proto = tfrecord_lib.convert_to_feature(b'123')
self.assertEqual(proto.bytes_list.value[0], b'123')
def test_convert_to_feature_float_list(self):
proto = tfrecord_lib.convert_to_feature([0.0, 1.0])
self.assertSequenceAlmostEqual(proto.float_list.value, [0.0, 1.0])
def test_convert_to_feature_int_list(self):
proto = tfrecord_lib.convert_to_feature([0, 1])
self.assertSequenceAlmostEqual(proto.int64_list.value, [0, 1])
def test_convert_to_feature_bytes_list(self):
proto = tfrecord_lib.convert_to_feature([b'123', b'456'])
self.assertSequenceAlmostEqual(proto.bytes_list.value, [b'123', b'456'])
def test_obj_annotation_tf_example(self):
images = [
{
'id': 0,
'file_name': 'example1.jpg',
'height': 512,
'width': 512,
},
{
'id': 1,
'file_name': 'example2.jpg',
'height': 512,
'width': 512,
},
]
img_to_obj_annotation = {
0: [{
'id': 0,
'image_id': 0,
'category_id': 1,
'bbox': [3, 1, 511, 510],
'area': 260610.00,
'segmentation': [],
'iscrowd': 0,
}],
1: [{
'id': 1,
'image_id': 1,
'category_id': 1,
'bbox': [1, 1, 100, 150],
'area': 15000.00,
'segmentation': [],
'iscrowd': 0,
}],
}
id_to_name_map = {
0: 'Super-Class',
1: 'Class-1',
}
temp_dir = FLAGS.test_tmpdir
image_dir = os.path.join(temp_dir, 'data')
if not os.path.exists(image_dir):
os.mkdir(image_dir)
for image in images:
image_path = os.path.join(image_dir, image['file_name'])
tf.keras.utils.save_img(
image_path,
tf.ones(shape=(image['height'], image['width'], 3)).numpy(),
)
output_path = os.path.join(image_dir, 'train')
coco_annotations_iter = create_coco_tf_record_lib.generate_annotations(
images=images,
image_dirs=[image_dir],
panoptic_masks_dir=None,
img_to_obj_annotation=img_to_obj_annotation,
img_to_caption_annotation=None,
img_to_panoptic_annotation=None,
is_category_thing=None,
id_to_name_map=id_to_name_map,
include_panoptic_masks=False,
include_masks=False,
)
tfrecord_lib.write_tf_record_dataset(
output_path,
coco_annotations_iter,
create_coco_tf_record_lib.create_tf_example,
1,
multiple_processes=0,
)
tfrecord_files = tf.io.gfile.glob(output_path + '*')
self.assertLen(tfrecord_files, 1)
ds = tf.data.TFRecordDataset(tfrecord_files)
assertion_count = 0
for _ in ds:
assertion_count += 1
self.assertEqual(assertion_count, 2)
if __name__ == '__main__':
tf.test.main()
| 5,071 | 27.494382 | 83 | py |
models | models-master/official/vision/data/tf_example_feature_key.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data classes for tf.Example proto feature keys in vision tasks.
Feature keys are grouped by feature types. Key names follow conventions in
go/tf-example.
"""
import dataclasses
import functools
from official.core import tf_example_feature_key
# Disable init function to use the one defined in base class.
dataclass = functools.partial(dataclasses.dataclass(init=False))
@dataclass
class EncodedImageFeatureKey(tf_example_feature_key.TfExampleFeatureKeyBase):
"""Feature keys for a single encoded image.
The image matrix is expected to be in the shape of (height, width,
num_channels).
Attributes:
encoded: encoded image bytes.
format: format string, e.g. 'PNG'.
height: number of rows.
width: number of columns.
num_channels: number of channels.
source_id: Unique string ID to identify the image.
label: the label or a list of labels for the image.
"""
encoded: str = 'image/encoded'
format: str = 'image/format'
height: str = 'image/height'
width: str = 'image/width'
num_channels: str = 'image/channels'
source_id: str = 'image/source_id'
label: str = 'image/class/label'
@dataclass
class BoxFeatureKey(tf_example_feature_key.TfExampleFeatureKeyBase):
"""Feature keys for normalized boxes representing objects in an image.
Each box is defined by ((ymin, xmin), (ymax, xmax)).
The origin point of an image matrix is top left.
Note: The coordinate values are normalized to [0, 1], this is commonly adopted
by most model implementations.
Attributes:
xmin: The x coordinate (column) of top-left corner.
xmax: The x coordinate (column) of bottom-right corner.
ymin: The y coordinate (row) of top-left corner.
ymax: The y coordinate (row) of bottom-right corner.
label: The class id.
confidence: The confidence score of the box, could be prior score (for
training) or predicted score (for prediction).
"""
xmin: str = 'image/object/bbox/xmin'
xmax: str = 'image/object/bbox/xmax'
ymin: str = 'image/object/bbox/ymin'
ymax: str = 'image/object/bbox/ymax'
label: str = 'image/object/class/label'
confidence: str = 'image/object/bbox/confidence'
@dataclass
class BoxPixelFeatureKey(tf_example_feature_key.TfExampleFeatureKeyBase):
"""Feature keys for boxes in pixel values representing objects in an image.
Each box is defined by ((ymin, xmin), (ymax, xmax)).
Note: The coordinate values are in the scale of the context image. The image
size is usually stored in `EncodedImageFeatureKey`.
Attributes:
xmin: The x coordinate (column) of top-left corner.
xmax: The x coordinate (column) of bottom-right corner.
ymin: The y coordinate (row) of top-left corner.
ymax: The y coordinate (row) of bottom-right corner.
label: The class id.
confidence: The confidence score of the box, could be prior score (for
training) or predicted score (for prediction).
"""
xmin: str = 'image/object/bbox/xmin_pixels'
xmax: str = 'image/object/bbox/xmax_pixels'
ymin: str = 'image/object/bbox/ymin_pixels'
ymax: str = 'image/object/bbox/ymax_pixels'
label: str = 'image/object/class/label'
confidence: str = 'image/object/bbox/confidence'
@dataclass
class EncodedInstanceMaskFeatureKey(
tf_example_feature_key.TfExampleFeatureKeyBase):
"""Feature keys for a single encoded instance mask.
The instance mask matrices are expected to be in the shape of (num_instances,
height, width, 1) or (num_instance, height, width). The height and width
correspond to the image height and width. For each instance mask, the pixel
value is either 0, representing a background, or 1, representing the object.
TODO(b/223653024): Add keys for visualization mask as well.
Attributes:
mask: Encoded instance mask bytes.
area: Total number of pixels that are marked as objects.
"""
mask: str = 'image/object/mask'
area: str = 'image/object/area'
@dataclass
class EncodedSemanticMaskFeatureKey(
tf_example_feature_key.TfExampleFeatureKeyBase):
"""Feature keys for a encoded semantic mask and its associated images.
The semantic mask matrix is expected to be in the shape of (height, width, 1)
or (height, width). The visualization mask matrix is expected to be in the
shape of (height, width, 3). The height and width correspond to the image
height and width. Each pixel in the semantic mask respresents a class.
Attributes:
mask: Encoded semantic mask bytes.
mask_format: Format string for semantic mask, e.g. 'PNG'.
visualization_mask: Encoded visualization mask bytes.
visualization_mask_format: Format string for visualization mask, e.g.
'PNG'.
"""
mask: str = 'image/segmentation/class/encoded'
mask_format: str = 'image/segmentation/class/format'
visualization_mask: str = 'image/segmentation/class/visualization/encoded'
visualization_mask_format: str = 'image/segmentation/class/visualization/format'
@dataclass
class EncodedPanopticMaskFeatureKey(
tf_example_feature_key.TfExampleFeatureKeyBase):
"""Feature keys for encoded panoptic category and instance masks.
Both panoptic mask matrices are expected to be in the shape of (height, width,
1) or (height, width). The height and width correspond to the image height and
width. For category mask, each pixel represents a class ID, and for instance
mask, each pixel represents an instance ID.
TODO(b/223653024): Add keys for visualization mask as well.
Attributes:
category_mask: Encoded panoptic category mask bytes.
category_mask_format: Format string for panoptic category mask, e.g.
'PNG'.
instance_mask: Encoded panoptic instance mask bytes.
instance_mask_format: Format string for panoptic instance mask, e.g.
'PNG'.
"""
category_mask: str = 'image/panoptic/category/encoded'
category_mask_format: str = 'image/panoptic/category/format'
instance_mask: str = 'image/panoptic/instance/encoded'
instance_mask_format: str = 'image/panoptic/instance/format'
| 6,677 | 37.16 | 82 | py |
models | models-master/official/vision/data/tf_example_builder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builder class for preparing tf.train.Example in vision tasks."""
# https://www.python.org/dev/peps/pep-0563/#enabling-the-future-behavior-in-python-3-7
from __future__ import annotations
import hashlib
from typing import Optional, Sequence, Union
import numpy as np
from official.core import tf_example_builder
from official.vision.data import image_utils
from official.vision.data import tf_example_feature_key
BytesValueType = Union[bytes, Sequence[bytes], str, Sequence[str]]
_to_array = lambda v: [v] if not isinstance(v, (list, np.ndarray)) else v
_to_bytes = lambda v: v.encode() if isinstance(v, str) else v
_to_bytes_array = lambda v: list(map(_to_bytes, _to_array(v)))
class TfExampleBuilder(tf_example_builder.TfExampleBuilder):
"""Builder class for preparing tf.train.Example in vision task.
Read API doc at https://www.tensorflow.org/api_docs/python/tf/train/Example.
"""
def add_image_matrix_feature(
self,
image_matrix: np.ndarray,
image_format: str = 'PNG',
image_source_id: Optional[bytes] = None,
feature_prefix: Optional[str] = None,
label: Optional[Union[int, Sequence[int]]] = None) -> 'TfExampleBuilder':
"""Encodes and adds image features to the example.
See `tf_example_feature_key.EncodedImageFeatureKey` for list of feature keys
that will be added to the example.
Example usages:
>>> example_builder = TfExampleBuilder()
* For adding RGB image feature with PNG encoding:
>>> example_builder.add_image_matrix_feature(image_matrix)
* For adding RGB image feature with a pre-generated source ID.
>>> example_builder.add_image_matrix_feature(
image_matrix, image_source_id=image_source_id)
* For adding single-channel depth image feature with JPEG encoding:
>>> example_builder.add_image_matrix_feature(
image_matrix, image_format=ImageFormat.JPEG,
feature_prefix='depth')
Args:
image_matrix: Numpy image matrix with shape (height, width, channels)
image_format: Image format string, defaults to 'PNG'.
image_source_id: Unique string ID to identify the image. Hashed image will
be used if the field is not provided.
feature_prefix: Feature prefix for image features.
label: the label or a list of labels for the image.
Returns:
The builder object for subsequent method calls.
"""
encoded_image = image_utils.encode_image(image_matrix, image_format)
height, width, num_channels = image_matrix.shape
return self.add_encoded_image_feature(encoded_image, image_format, height,
width, num_channels, image_source_id,
feature_prefix, label)
def add_encoded_image_feature(
self,
encoded_image: bytes,
image_format: Optional[str] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_channels: Optional[int] = None,
image_source_id: Optional[bytes] = None,
feature_prefix: Optional[str] = None,
label: Optional[Union[int, Sequence[int]]] = None) -> 'TfExampleBuilder':
"""Adds encoded image features to the example.
See `tf_example_feature_key.EncodedImageFeatureKey` for list of feature keys
that will be added to the example.
Image format, height, width, and channels are inferred from the encoded
image bytes if any of them is not provided. Hashed image will be used if
pre-generated source ID is not provided.
Example usages:
>>> example_builder = TfExampleBuilder()
* For adding RGB image feature:
>>> example_builder.add_encoded_image_feature(image_bytes)
* For adding RGB image feature with pre-generated source ID:
>>> example_builder.add_encoded_image_feature(
image_bytes, image_source_id=image_source_id)
* For adding single-channel depth image feature:
>>> example_builder.add_encoded_image_feature(
image_bytes, feature_prefix='depth')
Args:
encoded_image: Encoded image string.
image_format: Image format string.
height: Number of rows.
width: Number of columns.
num_channels: Number of channels.
image_source_id: Unique string ID to identify the image.
feature_prefix: Feature prefix for image features.
label: the label or a list of labels for the image.
Returns:
The builder object for subsequent method calls.
"""
if image_format == 'RAW':
if not (height and width and num_channels):
raise ValueError('For raw image feature, height, width and '
'num_channels fields are required.')
if not all((height, width, num_channels, image_format)):
(height, width, num_channels, image_format) = (
image_utils.decode_image_metadata(encoded_image))
else:
image_format = image_utils.validate_image_format(image_format)
feature_key = tf_example_feature_key.EncodedImageFeatureKey(feature_prefix)
# If source ID is not provided, we use hashed encoded image as the source
# ID. Note that we only keep 24 bits to be consistent with the Model Garden
# requirement, which will transform the source ID into float32.
if not image_source_id:
hashed_image = int(hashlib.blake2s(encoded_image).hexdigest(), 16)
image_source_id = _to_bytes(str(hashed_image % ((1 << 24) + 1)))
if label is not None:
self.add_ints_feature(feature_key.label, label)
return (
self.add_bytes_feature(feature_key.encoded, encoded_image)
.add_bytes_feature(feature_key.format, image_format)
.add_ints_feature(feature_key.height, [height])
.add_ints_feature(feature_key.width, [width])
.add_ints_feature(feature_key.num_channels, num_channels)
.add_bytes_feature(feature_key.source_id, image_source_id))
def add_boxes_feature(
self,
xmins: Sequence[float],
xmaxs: Sequence[float],
ymins: Sequence[float],
ymaxs: Sequence[float],
labels: Sequence[int],
confidences: Optional[Sequence[float]] = None,
normalized: bool = True,
feature_prefix: Optional[str] = None) -> 'TfExampleBuilder':
"""Adds box and label features to the example.
Four features will be generated for xmin, ymin, xmax, and ymax. One feature
will be generated for label. Different feature keys will be used for
normalized boxes and pixel-value boxes, depending on the value of
`normalized`.
Example usages:
>>> example_builder = TfExampleBuilder()
>>> example_builder.add_boxes_feature(xmins, xmaxs, ymins, ymaxs, labels)
Args:
xmins: A list of minimum X coordinates.
xmaxs: A list of maximum X coordinates.
ymins: A list of minimum Y coordinates.
ymaxs: A list of maximum Y coordinates.
labels: The labels of added boxes.
confidences: The confidences of added boxes.
normalized: Indicate if the coordinates of boxes are normalized.
feature_prefix: Feature prefix for added box features.
Returns:
The builder object for subsequent method calls.
"""
if normalized:
feature_key = tf_example_feature_key.BoxFeatureKey(feature_prefix)
else:
feature_key = tf_example_feature_key.BoxPixelFeatureKey(feature_prefix)
self.add_floats_feature(feature_key.xmin, xmins)
self.add_floats_feature(feature_key.xmax, xmaxs)
self.add_floats_feature(feature_key.ymin, ymins)
self.add_floats_feature(feature_key.ymax, ymaxs)
self.add_ints_feature(feature_key.label, labels)
if confidences is not None:
self.add_floats_feature(feature_key.confidence, confidences)
return self
def _compute_mask_areas(
self, instance_mask_matrices: np.ndarray) -> Sequence[float]:
return np.sum(
instance_mask_matrices, axis=(1, 2, 3),
dtype=float).flatten().tolist()
def add_instance_mask_matrices_feature(
self,
instance_mask_matrices: np.ndarray,
feature_prefix: Optional[str] = None) -> 'TfExampleBuilder':
"""Encodes and adds instance mask features to the example.
See `tf_example_feature_key.EncodedInstanceMaskFeatureKey` for list of
feature keys that will be added to the example. Please note that all masks
will be encoded as PNG images.
Example usages:
>>> example_builder = TfExampleBuilder()
>>> example_builder.add_instance_mask_matrices_feature(
instance_mask_matrices)
TODO(b/223653024): Provide a way to generate visualization mask from
feature mask.
Args:
instance_mask_matrices: Numpy instance mask matrices with shape
(num_instance, height, width, 1) or (num_instance, height, width).
feature_prefix: Feature prefix for instance mask features.
Returns:
The builder object for subsequent method calls.
"""
if len(instance_mask_matrices.shape) == 3:
instance_mask_matrices = instance_mask_matrices[..., np.newaxis]
mask_areas = self._compute_mask_areas(instance_mask_matrices)
encoded_instance_masks = list(
map(lambda x: image_utils.encode_image(x, 'PNG'),
instance_mask_matrices))
return self.add_encoded_instance_masks_feature(encoded_instance_masks,
mask_areas, feature_prefix)
def add_encoded_instance_masks_feature(
self,
encoded_instance_masks: Sequence[bytes],
mask_areas: Optional[Sequence[float]] = None,
feature_prefix: Optional[str] = None) -> 'TfExampleBuilder':
"""Adds encoded instance mask features to the example.
See `tf_example_feature_key.EncodedInstanceMaskFeatureKey` for list of
feature keys that will be added to the example.
Image area is inferred from the encoded instance mask bytes if not provided.
Example usages:
>>> example_builder = TfExampleBuilder()
>>> example_builder.add_encoded_instance_masks_feature(
instance_mask_bytes)
TODO(b/223653024): Provide a way to generate visualization mask from
feature mask.
Args:
encoded_instance_masks: A list of encoded instance mask string. Note that
the encoding is not changed in this function and it always assumes the
image is in "PNG" format.
mask_areas: Areas for each instance masks.
feature_prefix: Feature prefix for instance mask features.
Returns:
The builder object for subsequent method calls.
"""
encoded_instance_masks = _to_bytes_array(encoded_instance_masks)
if mask_areas is None:
instance_mask_matrices = np.array(
list(map(image_utils.decode_image, encoded_instance_masks)))
mask_areas = self._compute_mask_areas(instance_mask_matrices)
feature_key = tf_example_feature_key.EncodedInstanceMaskFeatureKey(
feature_prefix)
return (
self.add_bytes_feature(feature_key.mask, encoded_instance_masks)
.add_floats_feature(feature_key.area, mask_areas))
def add_semantic_mask_matrix_feature(
self,
mask_matrix: np.ndarray,
mask_format: str = 'PNG',
visualization_mask_matrix: Optional[np.ndarray] = None,
visualization_mask_format: str = 'PNG',
feature_prefix: Optional[str] = None) -> 'TfExampleBuilder':
"""Encodes and adds semantic mask features to the example.
See `tf_example_feature_key.EncodedSemanticMaskFeatureKey` for list of
feature keys that will be added to the example.
Example usages:
>>> example_builder = TfExampleBuilder()
* For adding semantic mask feature:
>>> example_builder.add_semantic_mask_matrix_feature(
semantic_mask_matrix)
* For adding semantic mask feature and visualization mask feature:
>>> example_builder.add_semantic_mask_matrix_feature(
semantic_mask_matrix,
visualization_mask_matrix=visualization_mask_matrix)
* For adding predicted semantic mask feature with visualization mask:
>>> example_builder.add_encoded_semantic_mask_feature(
predicted_mask_matrix,
visualization_mask_matrix=predicted_visualization_mask_matrix,
feature_prefix='predicted')
TODO(b/223653024): Provide a way to generate visualization mask from
feature mask.
Args:
mask_matrix: Numpy semantic mask matrix with shape (height, width, 1) or
(height, width).
mask_format: Mask format string, defaults to 'PNG'.
visualization_mask_matrix: Numpy visualization mask matrix for semantic
mask with shape (height, width, 3).
visualization_mask_format: Visualization mask format string, defaults to
'PNG'.
feature_prefix: Feature prefix for semantic mask features.
Returns:
The builder object for subsequent method calls.
"""
if len(mask_matrix.shape) == 2:
mask_matrix = mask_matrix[..., np.newaxis]
encoded_mask = image_utils.encode_image(mask_matrix, mask_format)
encoded_visualization_mask = None
if visualization_mask_matrix is not None:
encoded_visualization_mask = image_utils.encode_image(
visualization_mask_matrix, visualization_mask_format)
return self.add_encoded_semantic_mask_feature(encoded_mask, mask_format,
encoded_visualization_mask,
visualization_mask_format,
feature_prefix)
def add_encoded_semantic_mask_feature(
self, encoded_mask: bytes,
mask_format: str = 'PNG',
encoded_visualization_mask: Optional[bytes] = None,
visualization_mask_format: str = 'PNG',
feature_prefix: Optional[str] = None) -> 'TfExampleBuilder':
"""Adds encoded semantic mask features to the example.
See `tf_example_feature_key.EncodedSemanticMaskFeatureKey` for list of
feature keys that will be added to the example.
Example usages:
>>> example_builder = TfExampleBuilder()
* For adding semantic mask feature:
>>> example_builder.add_encoded_semantic_mask_feature(semantic_mask_bytes)
* For adding semantic mask feature and visualization mask feature:
>>> example_builder.add_encoded_semantic_mask_feature(
semantic_mask_bytes,
encoded_visualization_mask=visualization_mask_bytes)
* For adding predicted semantic mask feature with visualization mask:
>>> example_builder.add_encoded_semantic_mask_feature(
predicted_mask_bytes,
encoded_visualization_mask=predicted_visualization_mask_bytes,
feature_prefix='predicted')
TODO(b/223653024): Provide a way to generate visualization mask from
feature mask.
Args:
encoded_mask: Encoded semantic mask string.
mask_format: Semantic mask format string, defaults to 'PNG'.
encoded_visualization_mask: Encoded visualization mask string.
visualization_mask_format: Visualization mask format string, defaults to
'PNG'.
feature_prefix: Feature prefix for semantic mask features.
Returns:
The builder object for subsequent method calls.
"""
feature_key = tf_example_feature_key.EncodedSemanticMaskFeatureKey(
feature_prefix)
example_builder = (
self.add_bytes_feature(feature_key.mask, encoded_mask)
.add_bytes_feature(feature_key.mask_format, mask_format))
if encoded_visualization_mask is not None:
example_builder = (
example_builder.add_bytes_feature(
feature_key.visualization_mask, encoded_visualization_mask)
.add_bytes_feature(
feature_key.visualization_mask_format, visualization_mask_format))
return example_builder
def add_panoptic_mask_matrix_feature(
self,
panoptic_category_mask_matrix: np.ndarray,
panoptic_instance_mask_matrix: np.ndarray,
panoptic_category_mask_format: str = 'PNG',
panoptic_instance_mask_format: str = 'PNG',
feature_prefix: Optional[str] = None) -> 'TfExampleBuilder':
"""Encodes and adds panoptic mask features to the example.
See `tf_example_feature_key.EncodedPanopticMaskFeatureKey` for list of
feature keys that will be added to the example.
Example usages:
>>> example_builder = TfExampleBuilder()
>>> example_builder.add_panoptic_mask_matrix_feature(
panoptic_category_mask_matrix, panoptic_instance_mask_matrix)
TODO(b/223653024): Provide a way to generate visualization mask from
feature mask.
Args:
panoptic_category_mask_matrix: Numpy panoptic category mask matrix with
shape (height, width, 1) or (height, width).
panoptic_instance_mask_matrix: Numpy panoptic instance mask matrix with
shape (height, width, 1) or (height, width).
panoptic_category_mask_format: Panoptic category mask format string,
defaults to 'PNG'.
panoptic_instance_mask_format: Panoptic instance mask format string,
defaults to 'PNG'.
feature_prefix: Feature prefix for panoptic mask features.
Returns:
The builder object for subsequent method calls.
"""
if len(panoptic_category_mask_matrix.shape) == 2:
panoptic_category_mask_matrix = (
panoptic_category_mask_matrix[..., np.newaxis])
if len(panoptic_instance_mask_matrix.shape) == 2:
panoptic_instance_mask_matrix = (
panoptic_instance_mask_matrix[..., np.newaxis])
encoded_panoptic_category_mask = image_utils.encode_image(
panoptic_category_mask_matrix, panoptic_category_mask_format)
encoded_panoptic_instance_mask = image_utils.encode_image(
panoptic_instance_mask_matrix, panoptic_instance_mask_format)
return self.add_encoded_panoptic_mask_feature(
encoded_panoptic_category_mask, encoded_panoptic_instance_mask,
panoptic_category_mask_format, panoptic_instance_mask_format,
feature_prefix)
def add_encoded_panoptic_mask_feature(
self,
encoded_panoptic_category_mask: bytes,
encoded_panoptic_instance_mask: bytes,
panoptic_category_mask_format: str = 'PNG',
panoptic_instance_mask_format: str = 'PNG',
feature_prefix: Optional[str] = None) -> 'TfExampleBuilder':
"""Adds encoded panoptic mask features to the example.
See `tf_example_feature_key.EncodedPanopticMaskFeatureKey` for list of
feature keys that will be added to the example.
Example usages:
>>> example_builder = TfExampleBuilder()
>>> example_builder.add_encoded_panoptic_mask_feature(
encoded_panoptic_category_mask, encoded_panoptic_instance_mask)
TODO(b/223653024): Provide a way to generate visualization mask from
feature mask.
Args:
encoded_panoptic_category_mask: Encoded panoptic category mask string.
encoded_panoptic_instance_mask: Encoded panoptic instance mask string.
panoptic_category_mask_format: Panoptic category mask format string,
defaults to 'PNG'.
panoptic_instance_mask_format: Panoptic instance mask format string,
defaults to 'PNG'.
feature_prefix: Feature prefix for panoptic mask features.
Returns:
The builder object for subsequent method calls.
"""
feature_key = tf_example_feature_key.EncodedPanopticMaskFeatureKey(
feature_prefix)
return (
self.add_bytes_feature(
feature_key.category_mask, encoded_panoptic_category_mask)
.add_bytes_feature(
feature_key.category_mask_format, panoptic_category_mask_format)
.add_bytes_feature(
feature_key.instance_mask, encoded_panoptic_instance_mask)
.add_bytes_feature(
feature_key.instance_mask_format, panoptic_instance_mask_format))
| 20,494 | 40.572008 | 86 | py |
models | models-master/official/vision/data/tfrecord_lib.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for creating TFRecord datasets."""
import hashlib
import io
import itertools
from absl import logging
import numpy as np
from PIL import Image
import tensorflow as tf
import multiprocessing as mp
LOG_EVERY = 100
def convert_to_feature(value, value_type=None):
"""Converts the given python object to a tf.train.Feature.
Args:
value: int, float, bytes or a list of them.
value_type: optional, if specified, forces the feature to be of the given
type. Otherwise, type is inferred automatically. Can be one of
['bytes', 'int64', 'float', 'bytes_list', 'int64_list', 'float_list']
Returns:
feature: A tf.train.Feature object.
"""
if value_type is None:
element = value[0] if isinstance(value, list) else value
if isinstance(element, bytes):
value_type = 'bytes'
elif isinstance(element, (int, np.integer)):
value_type = 'int64'
elif isinstance(element, (float, np.floating)):
value_type = 'float'
else:
raise ValueError('Cannot convert type {} to feature'.
format(type(element)))
if isinstance(value, list):
value_type = value_type + '_list'
if value_type == 'int64':
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
elif value_type == 'int64_list':
value = np.asarray(value).astype(np.int64).reshape(-1)
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
elif value_type == 'float':
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
elif value_type == 'float_list':
value = np.asarray(value).astype(np.float32).reshape(-1)
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
elif value_type == 'bytes':
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
elif value_type == 'bytes_list':
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
else:
raise ValueError('Unknown value_type parameter - {}'.format(value_type))
def image_info_to_feature_dict(height, width, filename, image_id,
encoded_str, encoded_format):
"""Convert image information to a dict of features."""
key = hashlib.sha256(encoded_str).hexdigest()
return {
'image/height': convert_to_feature(height),
'image/width': convert_to_feature(width),
'image/filename': convert_to_feature(filename.encode('utf8')),
'image/source_id': convert_to_feature(str(image_id).encode('utf8')),
'image/key/sha256': convert_to_feature(key.encode('utf8')),
'image/encoded': convert_to_feature(encoded_str),
'image/format': convert_to_feature(encoded_format.encode('utf8')),
}
def read_image(image_path):
pil_image = Image.open(image_path)
return np.asarray(pil_image)
def encode_mask_as_png(mask):
pil_image = Image.fromarray(mask)
output_io = io.BytesIO()
pil_image.save(output_io, format='PNG')
return output_io.getvalue()
def write_tf_record_dataset(output_path, annotation_iterator,
process_func, num_shards,
multiple_processes=None, unpack_arguments=True):
"""Iterates over annotations, processes them and writes into TFRecords.
Args:
output_path: The prefix path to create TF record files.
annotation_iterator: An iterator of tuples containing details about the
dataset.
process_func: A function which takes the elements from the tuples of
annotation_iterator as arguments and returns a tuple of (tf.train.Example,
int). The integer indicates the number of annotations that were skipped.
num_shards: int, the number of shards to write for the dataset.
multiple_processes: integer, the number of multiple parallel processes to
use. If None, uses multi-processing with number of processes equal to
`os.cpu_count()`, which is Python's default behavior. If set to 0,
multi-processing is disabled.
Whether or not to use multiple processes to write TF Records.
unpack_arguments:
Whether to unpack the tuples from annotation_iterator as individual
arguments to the process func or to pass the returned value as it is.
Returns:
num_skipped: The total number of skipped annotations.
"""
writers = [
tf.io.TFRecordWriter(
output_path + '-%05d-of-%05d.tfrecord' % (i, num_shards))
for i in range(num_shards)
]
total_num_annotations_skipped = 0
if multiple_processes is None or multiple_processes > 0:
pool = mp.Pool(
processes=multiple_processes)
if unpack_arguments:
tf_example_iterator = pool.starmap(process_func, annotation_iterator)
else:
tf_example_iterator = pool.imap(process_func, annotation_iterator)
else:
if unpack_arguments:
tf_example_iterator = itertools.starmap(process_func, annotation_iterator)
else:
tf_example_iterator = map(process_func, annotation_iterator)
for idx, (tf_example, num_annotations_skipped) in enumerate(
tf_example_iterator):
if idx % LOG_EVERY == 0:
logging.info('On image %d', idx)
total_num_annotations_skipped += num_annotations_skipped
writers[idx % num_shards].write(tf_example.SerializeToString())
if multiple_processes is None or multiple_processes > 0:
pool.close()
pool.join()
for writer in writers:
writer.close()
logging.info('Finished writing, skipped %d annotations.',
total_num_annotations_skipped)
return total_num_annotations_skipped
def check_and_make_dir(directory):
"""Creates the directory if it doesn't exist."""
if not tf.io.gfile.isdir(directory):
tf.io.gfile.makedirs(directory)
| 6,316 | 32.42328 | 80 | py |
models | models-master/official/vision/data/process_coco_few_shot_json_files.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processes the JSON files for COCO few-shot.
We assume that `workdir` mirrors the contents of
http://dl.yf.io/fs-det/datasets/cocosplit/, which contains the official JSON
files for the few-shot COCO evaluation procedure that Wang et al. (2020)'s
"Frustratingly Simple Few-Shot Object Detection" paper uses.
"""
import collections
import itertools
import json
import logging
import os
from absl import app
from absl import flags
import tensorflow as tf
logger = tf.get_logger()
logger.setLevel(logging.INFO)
flags.DEFINE_string('workdir', None, 'Working directory.')
FLAGS = flags.FLAGS
CATEGORIES = ['airplane', 'apple', 'backpack', 'banana', 'baseball bat',
'baseball glove', 'bear', 'bed', 'bench', 'bicycle', 'bird',
'boat', 'book', 'bottle', 'bowl', 'broccoli', 'bus', 'cake',
'car', 'carrot', 'cat', 'cell phone', 'chair', 'clock', 'couch',
'cow', 'cup', 'dining table', 'dog', 'donut', 'elephant',
'fire hydrant', 'fork', 'frisbee', 'giraffe', 'hair drier',
'handbag', 'horse', 'hot dog', 'keyboard', 'kite', 'knife',
'laptop', 'microwave', 'motorcycle', 'mouse', 'orange', 'oven',
'parking meter', 'person', 'pizza', 'potted plant',
'refrigerator', 'remote', 'sandwich', 'scissors', 'sheep',
'sink', 'skateboard', 'skis', 'snowboard', 'spoon', 'sports ball',
'stop sign', 'suitcase', 'surfboard', 'teddy bear',
'tennis racket', 'tie', 'toaster', 'toilet', 'toothbrush',
'traffic light', 'train', 'truck', 'tv', 'umbrella', 'vase',
'wine glass', 'zebra']
SEEDS = list(range(10))
SHOTS = [1, 3, 5, 10, 30]
FILE_SUFFIXES = collections.defaultdict(list)
for _seed, _shots in itertools.product(SEEDS, SHOTS):
for _category in CATEGORIES:
FILE_SUFFIXES[(_seed, _shots)].append(
'{}full_box_{}shot_{}_trainval.json'.format(
# http://dl.yf.io/fs-det/datasets/cocosplit/ is organized like so:
#
# datasplit/
# trainvalno5k.json
# 5k.json
# full_box_{1,2,3,5,10,30}shot_{category}_trainval.json
# seed{1-9}/
# full_box_{1,2,3,5,10,30}shot_{category}_trainval.json
#
# This means that the JSON files for seed0 are located in the root
# directory rather than in a `seed?/` subdirectory, hence the
# conditional expression below.
'' if _seed == 0 else 'seed{}/'.format(_seed),
_shots,
_category))
# Base class IDs, as defined in
# https://github.com/ucbdrive/few-shot-object-detection/blob/master/fsdet/evaluation/coco_evaluation.py#L60-L65
BASE_CLASS_IDS = [8, 10, 11, 13, 14, 15, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 46, 47, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 65, 70, 73, 74, 75,
76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
def main(unused_argv):
workdir = FLAGS.workdir
# Filter novel class annotations from the training and validation sets.
for name in ('trainvalno5k', '5k'):
file_path = os.path.join(workdir, 'datasplit', '{}.json'.format(name))
with tf.io.gfile.GFile(file_path, 'r') as f:
json_dict = json.load(f)
json_dict['annotations'] = [a for a in json_dict['annotations']
if a['category_id'] in BASE_CLASS_IDS]
output_path = os.path.join(
workdir, 'datasplit', '{}_base.json'.format(name))
with tf.io.gfile.GFile(output_path, 'w') as f:
json.dump(json_dict, f)
for seed, shots in itertools.product(SEEDS, SHOTS):
# Retrieve all examples for a given seed and shots setting.
file_paths = [os.path.join(workdir, suffix)
for suffix in FILE_SUFFIXES[(seed, shots)]]
json_dicts = []
for file_path in file_paths:
with tf.io.gfile.GFile(file_path, 'r') as f:
json_dicts.append(json.load(f))
# Make sure that all JSON files for a given seed and shots setting have the
# same metadata. We count on this to fuse them later on.
metadata_dicts = [{'info': d['info'], 'licenses': d['licenses'],
'categories': d['categories']} for d in json_dicts]
if not all(d == metadata_dicts[0] for d in metadata_dicts[1:]):
raise RuntimeError(
'JSON files for {} shots (seed {}) '.format(shots, seed) +
'have different info, licences, or categories fields')
# Retrieve images across all JSON files.
images = sum((d['images'] for d in json_dicts), [])
# Remove duplicate image entries.
images = list({image['id']: image for image in images}.values())
output_dict = {
'info': json_dicts[0]['info'],
'licenses': json_dicts[0]['licenses'],
'categories': json_dicts[0]['categories'],
'images': images,
'annotations': sum((d['annotations'] for d in json_dicts), [])
}
output_path = os.path.join(workdir,
'{}shot_seed{}.json'.format(shots, seed))
with tf.io.gfile.GFile(output_path, 'w') as f:
json.dump(output_dict, f)
logger.info('Processed %d shots (seed %d) and saved to %s',
shots, seed, output_path)
if __name__ == '__main__':
flags.mark_flag_as_required('workdir')
app.run(main)
| 6,051 | 40.737931 | 111 | py |
models | models-master/official/vision/data/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/vision/modeling/video_classification_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build video classification models."""
from typing import Any, Mapping, Optional, Union, List, Text
import tensorflow as tf
layers = tf.keras.layers
@tf.keras.utils.register_keras_serializable(package='Vision')
class VideoClassificationModel(tf.keras.Model):
"""A video classification class builder."""
def __init__(
self,
backbone: tf.keras.Model,
num_classes: int,
input_specs: Optional[Mapping[str, tf.keras.layers.InputSpec]] = None,
dropout_rate: float = 0.0,
aggregate_endpoints: bool = False,
kernel_initializer: str = 'random_uniform',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
require_endpoints: Optional[List[Text]] = None,
**kwargs):
"""Video Classification initialization function.
Args:
backbone: a 3d backbone network.
num_classes: `int` number of classes in classification task.
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
dropout_rate: `float` rate for dropout regularization.
aggregate_endpoints: `bool` aggregate all end ponits or only use the
final end point.
kernel_initializer: kernel initializer for the dense layer.
kernel_regularizer: tf.keras.regularizers.Regularizer object. Default to
None.
bias_regularizer: tf.keras.regularizers.Regularizer object. Default to
None.
require_endpoints: the required endpoints for prediction. If None or
empty, then only uses the final endpoint.
**kwargs: keyword arguments to be passed.
"""
if not input_specs:
input_specs = {
'image': layers.InputSpec(shape=[None, None, None, None, 3])
}
self._self_setattr_tracking = False
self._config_dict = {
'backbone': backbone,
'num_classes': num_classes,
'input_specs': input_specs,
'dropout_rate': dropout_rate,
'aggregate_endpoints': aggregate_endpoints,
'kernel_initializer': kernel_initializer,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
'require_endpoints': require_endpoints,
}
self._input_specs = input_specs
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._backbone = backbone
inputs = {
k: tf.keras.Input(shape=v.shape[1:]) for k, v in input_specs.items()
}
endpoints = backbone(inputs['image'])
if aggregate_endpoints:
pooled_feats = []
for endpoint in endpoints.values():
x_pool = tf.keras.layers.GlobalAveragePooling3D()(endpoint)
pooled_feats.append(x_pool)
x = tf.concat(pooled_feats, axis=1)
else:
if not require_endpoints:
# Uses the last endpoint for prediction.
x = endpoints[max(endpoints.keys())]
x = tf.keras.layers.GlobalAveragePooling3D()(x)
else:
# Concats all the required endpoints for prediction.
outputs = []
for name in require_endpoints:
x = endpoints[name]
x = tf.keras.layers.GlobalAveragePooling3D()(x)
outputs.append(x)
x = tf.concat(outputs, axis=1)
x = tf.keras.layers.Dropout(dropout_rate)(x)
x = tf.keras.layers.Dense(
num_classes, kernel_initializer=kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
x)
super(VideoClassificationModel, self).__init__(
inputs=inputs, outputs=x, **kwargs)
@property
def checkpoint_items(
self) -> Mapping[str, Union[tf.keras.Model, tf.keras.layers.Layer]]:
"""Returns a dictionary of items to be additionally checkpointed."""
return dict(backbone=self.backbone)
@property
def backbone(self) -> tf.keras.Model:
return self._backbone
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 4,703 | 35.465116 | 78 | py |
models | models-master/official/vision/modeling/classification_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build classification models."""
from typing import Any, Mapping, Optional
# Import libraries
import tensorflow as tf
layers = tf.keras.layers
@tf.keras.utils.register_keras_serializable(package='Vision')
class ClassificationModel(tf.keras.Model):
"""A classification class builder."""
def __init__(
self,
backbone: tf.keras.Model,
num_classes: int,
input_specs: tf.keras.layers.InputSpec = layers.InputSpec(
shape=[None, None, None, 3]),
dropout_rate: float = 0.0,
kernel_initializer: str = 'random_uniform',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
add_head_batch_norm: bool = False,
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
skip_logits_layer: bool = False,
**kwargs):
"""Classification initialization function.
Args:
backbone: a backbone network.
num_classes: `int` number of classes in classification task.
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
dropout_rate: `float` rate for dropout regularization.
kernel_initializer: kernel initializer for the dense layer.
kernel_regularizer: tf.keras.regularizers.Regularizer object. Default to
None.
bias_regularizer: tf.keras.regularizers.Regularizer object. Default to
None.
add_head_batch_norm: `bool` whether to add a batch normalization layer
before pool.
use_sync_bn: `bool` if True, use synchronized batch normalization.
norm_momentum: `float` normalization momentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
skip_logits_layer: `bool`, whether to skip the prediction layer.
**kwargs: keyword arguments to be passed.
"""
norm = tf.keras.layers.BatchNormalization
axis = -1 if tf.keras.backend.image_data_format() == 'channels_last' else 1
inputs = tf.keras.Input(shape=input_specs.shape[1:], name=input_specs.name)
endpoints = backbone(inputs)
x = endpoints[max(endpoints.keys())]
if add_head_batch_norm:
x = norm(
axis=axis,
momentum=norm_momentum,
epsilon=norm_epsilon,
synchronized=use_sync_bn,
)(x)
# Depending on the backbone type, backbone's output can be
# [batch_size, height, weight, channel_size] or
# [batch_size, token_size, hidden_size].
if len(x.shape) == 4:
x = tf.keras.layers.GlobalAveragePooling2D()(x)
elif len(x.shape) == 3:
x = tf.keras.layers.GlobalAveragePooling1D()(x)
if not skip_logits_layer:
x = tf.keras.layers.Dropout(dropout_rate)(x)
x = tf.keras.layers.Dense(
num_classes,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)(
x)
super(ClassificationModel, self).__init__(
inputs=inputs, outputs=x, **kwargs)
self._config_dict = {
'backbone': backbone,
'num_classes': num_classes,
'input_specs': input_specs,
'dropout_rate': dropout_rate,
'kernel_initializer': kernel_initializer,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
'add_head_batch_norm': add_head_batch_norm,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
}
self._input_specs = input_specs
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._backbone = backbone
self._norm = norm
@property
def checkpoint_items(self) -> Mapping[str, tf.keras.Model]:
"""Returns a dictionary of items to be additionally checkpointed."""
return dict(backbone=self.backbone)
@property
def backbone(self) -> tf.keras.Model:
return self._backbone
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 4,857 | 35.526316 | 79 | py |
models | models-master/official/vision/modeling/factory_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for factory.py."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.vision.configs import backbones
from official.vision.configs import backbones_3d
from official.vision.configs import image_classification as classification_cfg
from official.vision.configs import maskrcnn as maskrcnn_cfg
from official.vision.configs import retinanet as retinanet_cfg
from official.vision.configs import video_classification as video_classification_cfg
from official.vision.modeling import factory
from official.vision.modeling import factory_3d
class ClassificationModelBuilderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('resnet', (224, 224), 5e-5),
('resnet', (224, 224), None),
('resnet', (None, None), 5e-5),
('resnet', (None, None), None),
)
def test_builder(self, backbone_type, input_size, weight_decay):
num_classes = 2
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size[0], input_size[1], 3])
model_config = classification_cfg.ImageClassificationModel(
num_classes=num_classes,
backbone=backbones.Backbone(type=backbone_type))
l2_regularizer = (
tf.keras.regularizers.l2(weight_decay) if weight_decay else None)
_ = factory.build_classification_model(
input_specs=input_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
class MaskRCNNBuilderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('resnet', (640, 640)),
('resnet', (None, None)),
)
def test_builder(self, backbone_type, input_size):
num_classes = 2
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size[0], input_size[1], 3])
model_config = maskrcnn_cfg.MaskRCNN(
num_classes=num_classes,
backbone=backbones.Backbone(type=backbone_type))
l2_regularizer = tf.keras.regularizers.l2(5e-5)
_ = factory.build_maskrcnn(
input_specs=input_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
class RetinaNetBuilderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('resnet', (640, 640), False),
('resnet', (None, None), True),
)
def test_builder(self, backbone_type, input_size, has_att_heads):
num_classes = 2
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size[0], input_size[1], 3])
if has_att_heads:
attribute_heads_config = [
retinanet_cfg.AttributeHead(name='att1'),
retinanet_cfg.AttributeHead(
name='att2', type='classification', size=2),
]
else:
attribute_heads_config = None
model_config = retinanet_cfg.RetinaNet(
num_classes=num_classes,
backbone=backbones.Backbone(type=backbone_type),
head=retinanet_cfg.RetinaNetHead(
attribute_heads=attribute_heads_config))
l2_regularizer = tf.keras.regularizers.l2(5e-5)
_ = factory.build_retinanet(
input_specs=input_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
if has_att_heads:
self.assertEqual(
model_config.head.attribute_heads[0].as_dict(),
dict(
name='att1',
type='regression',
size=1,
prediction_tower_name='',
num_convs=None,
num_filters=None,
),
)
self.assertEqual(
model_config.head.attribute_heads[1].as_dict(),
dict(
name='att2',
type='classification',
size=2,
prediction_tower_name='',
num_convs=None,
num_filters=None,
),
)
class VideoClassificationModelBuilderTest(parameterized.TestCase,
tf.test.TestCase):
@parameterized.parameters(
('resnet_3d', (8, 224, 224), 5e-5),
('resnet_3d', (None, None, None), 5e-5),
)
def test_builder(self, backbone_type, input_size, weight_decay):
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size[0], input_size[1], input_size[2], 3])
model_config = video_classification_cfg.VideoClassificationModel(
backbone=backbones_3d.Backbone3D(type=backbone_type))
l2_regularizer = (
tf.keras.regularizers.l2(weight_decay) if weight_decay else None)
_ = factory_3d.build_video_classification_model(
input_specs=input_specs,
model_config=model_config,
num_classes=2,
l2_regularizer=l2_regularizer)
if __name__ == '__main__':
tf.test.main()
| 5,286 | 34.246667 | 84 | py |
models | models-master/official/vision/modeling/maskrcnn_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for maskrcnn_model.py."""
import os
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.vision.modeling import maskrcnn_model
from official.vision.modeling.backbones import resnet
from official.vision.modeling.decoders import fpn
from official.vision.modeling.heads import dense_prediction_heads
from official.vision.modeling.heads import instance_heads
from official.vision.modeling.layers import detection_generator
from official.vision.modeling.layers import mask_sampler
from official.vision.modeling.layers import roi_aligner
from official.vision.modeling.layers import roi_generator
from official.vision.modeling.layers import roi_sampler
from official.vision.ops import anchor
class MaskRCNNModelTest(parameterized.TestCase, tf.test.TestCase):
@combinations.generate(
combinations.combine(
include_mask=[True, False],
use_separable_conv=[True, False],
build_anchor_boxes=[True, False],
use_outer_boxes=[True, False],
is_training=[True, False]))
def test_build_model(self, include_mask, use_separable_conv,
build_anchor_boxes, use_outer_boxes, is_training):
num_classes = 3
min_level = 3
max_level = 7
num_scales = 3
aspect_ratios = [1.0]
anchor_size = 3
resnet_model_id = 50
num_anchors_per_location = num_scales * len(aspect_ratios)
image_size = 384
images = np.random.rand(2, image_size, image_size, 3)
image_shape = np.array([[image_size, image_size], [image_size, image_size]])
if build_anchor_boxes:
anchor_boxes = anchor.Anchor(
min_level=min_level,
max_level=max_level,
num_scales=num_scales,
aspect_ratios=aspect_ratios,
anchor_size=3,
image_size=(image_size, image_size)).multilevel_boxes
for l in anchor_boxes:
anchor_boxes[l] = tf.tile(
tf.expand_dims(anchor_boxes[l], axis=0), [2, 1, 1, 1])
else:
anchor_boxes = None
backbone = resnet.ResNet(model_id=resnet_model_id)
decoder = fpn.FPN(
input_specs=backbone.output_specs,
min_level=min_level,
max_level=max_level,
use_separable_conv=use_separable_conv)
rpn_head = dense_prediction_heads.RPNHead(
min_level=min_level,
max_level=max_level,
num_anchors_per_location=num_anchors_per_location,
num_convs=1)
detection_head = instance_heads.DetectionHead(num_classes=num_classes)
roi_generator_obj = roi_generator.MultilevelROIGenerator()
roi_sampler_obj = roi_sampler.ROISampler()
roi_aligner_obj = roi_aligner.MultilevelROIAligner()
detection_generator_obj = detection_generator.DetectionGenerator()
if include_mask:
mask_head = instance_heads.MaskHead(
num_classes=num_classes, upsample_factor=2)
mask_sampler_obj = mask_sampler.MaskSampler(
mask_target_size=28, num_sampled_masks=1)
mask_roi_aligner_obj = roi_aligner.MultilevelROIAligner(crop_size=14)
else:
mask_head = None
mask_sampler_obj = None
mask_roi_aligner_obj = None
model = maskrcnn_model.MaskRCNNModel(
backbone,
decoder,
rpn_head,
detection_head,
roi_generator_obj,
roi_sampler_obj,
roi_aligner_obj,
detection_generator_obj,
mask_head,
mask_sampler_obj,
mask_roi_aligner_obj,
min_level=min_level,
max_level=max_level,
num_scales=num_scales,
aspect_ratios=aspect_ratios,
anchor_size=anchor_size)
gt_boxes = np.array(
[[[10, 10, 15, 15], [2.5, 2.5, 7.5, 7.5], [-1, -1, -1, -1]],
[[100, 100, 150, 150], [-1, -1, -1, -1], [-1, -1, -1, -1]]],
dtype=np.float32)
gt_outer_boxes = None
if use_outer_boxes:
gt_outer_boxes = np.array(
[[[11, 11, 16.5, 16.5], [2.75, 2.75, 8.25, 8.25], [-1, -1, -1, -1]],
[[110, 110, 165, 165], [-1, -1, -1, -1], [-1, -1, -1, -1]]],
dtype=np.float32)
gt_classes = np.array([[2, 1, -1], [1, -1, -1]], dtype=np.int32)
if include_mask:
gt_masks = np.ones((2, 3, 100, 100))
else:
gt_masks = None
# Results will be checked in test_forward.
_ = model(
images,
image_shape,
anchor_boxes,
gt_boxes,
gt_classes,
gt_masks,
gt_outer_boxes,
training=is_training)
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
include_mask=[True, False],
build_anchor_boxes=[True, False],
use_cascade_heads=[True, False],
training=[True, False],
))
def test_forward(self, strategy, include_mask, build_anchor_boxes, training,
use_cascade_heads):
num_classes = 3
min_level = 3
max_level = 4
num_scales = 3
aspect_ratios = [1.0]
anchor_size = 3
if use_cascade_heads:
cascade_iou_thresholds = [0.6]
class_agnostic_bbox_pred = True
cascade_class_ensemble = True
else:
cascade_iou_thresholds = None
class_agnostic_bbox_pred = False
cascade_class_ensemble = False
image_size = (256, 256)
images = np.random.rand(2, image_size[0], image_size[1], 3)
image_shape = np.array([[224, 100], [100, 224]])
with strategy.scope():
if build_anchor_boxes:
anchor_boxes = anchor.Anchor(
min_level=min_level,
max_level=max_level,
num_scales=num_scales,
aspect_ratios=aspect_ratios,
anchor_size=anchor_size,
image_size=image_size).multilevel_boxes
else:
anchor_boxes = None
num_anchors_per_location = len(aspect_ratios) * num_scales
input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, 3])
backbone = resnet.ResNet(model_id=50, input_specs=input_specs)
decoder = fpn.FPN(
min_level=min_level,
max_level=max_level,
input_specs=backbone.output_specs)
rpn_head = dense_prediction_heads.RPNHead(
min_level=min_level,
max_level=max_level,
num_anchors_per_location=num_anchors_per_location)
detection_head = instance_heads.DetectionHead(
num_classes=num_classes,
class_agnostic_bbox_pred=class_agnostic_bbox_pred)
roi_generator_obj = roi_generator.MultilevelROIGenerator()
roi_sampler_cascade = []
roi_sampler_obj = roi_sampler.ROISampler()
roi_sampler_cascade.append(roi_sampler_obj)
if cascade_iou_thresholds:
for iou in cascade_iou_thresholds:
roi_sampler_obj = roi_sampler.ROISampler(
mix_gt_boxes=False,
foreground_iou_threshold=iou,
background_iou_high_threshold=iou,
background_iou_low_threshold=0.0,
skip_subsampling=True)
roi_sampler_cascade.append(roi_sampler_obj)
roi_aligner_obj = roi_aligner.MultilevelROIAligner()
detection_generator_obj = detection_generator.DetectionGenerator()
if include_mask:
mask_head = instance_heads.MaskHead(
num_classes=num_classes, upsample_factor=2)
mask_sampler_obj = mask_sampler.MaskSampler(
mask_target_size=28, num_sampled_masks=1)
mask_roi_aligner_obj = roi_aligner.MultilevelROIAligner(crop_size=14)
else:
mask_head = None
mask_sampler_obj = None
mask_roi_aligner_obj = None
model = maskrcnn_model.MaskRCNNModel(
backbone,
decoder,
rpn_head,
detection_head,
roi_generator_obj,
roi_sampler_obj,
roi_aligner_obj,
detection_generator_obj,
mask_head,
mask_sampler_obj,
mask_roi_aligner_obj,
class_agnostic_bbox_pred=class_agnostic_bbox_pred,
cascade_class_ensemble=cascade_class_ensemble,
min_level=min_level,
max_level=max_level,
num_scales=num_scales,
aspect_ratios=aspect_ratios,
anchor_size=anchor_size)
gt_boxes = np.array(
[[[10, 10, 15, 15], [2.5, 2.5, 7.5, 7.5], [-1, -1, -1, -1]],
[[100, 100, 150, 150], [-1, -1, -1, -1], [-1, -1, -1, -1]]],
dtype=np.float32)
gt_outer_boxes = np.array(
[[[11, 11, 16.5, 16.5], [2.75, 2.75, 8.25, 8.25], [-1, -1, -1, -1]],
[[110, 110, 165, 165], [-1, -1, -1, -1], [-1, -1, -1, -1]]],
dtype=np.float32)
gt_classes = np.array([[2, 1, -1], [1, -1, -1]], dtype=np.int32)
if include_mask:
gt_masks = np.ones((2, 3, 100, 100))
else:
gt_masks = None
results = model(
images,
image_shape,
anchor_boxes,
gt_boxes,
gt_classes,
gt_masks,
gt_outer_boxes,
training=training)
self.assertIn('rpn_boxes', results)
self.assertIn('rpn_scores', results)
if training:
self.assertIn('class_targets', results)
self.assertIn('box_targets', results)
self.assertIn('class_outputs', results)
self.assertIn('box_outputs', results)
if include_mask:
self.assertIn('mask_outputs', results)
else:
self.assertIn('detection_boxes', results)
self.assertIn('detection_scores', results)
self.assertIn('detection_classes', results)
self.assertIn('num_detections', results)
if include_mask:
self.assertIn('detection_masks', results)
@parameterized.parameters(
(False,),
(True,),
)
def test_serialize_deserialize(self, include_mask):
input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, 3])
backbone = resnet.ResNet(model_id=50, input_specs=input_specs)
decoder = fpn.FPN(
min_level=3, max_level=7, input_specs=backbone.output_specs)
rpn_head = dense_prediction_heads.RPNHead(
min_level=3, max_level=7, num_anchors_per_location=3)
detection_head = instance_heads.DetectionHead(num_classes=2)
roi_generator_obj = roi_generator.MultilevelROIGenerator()
roi_sampler_obj = roi_sampler.ROISampler()
roi_aligner_obj = roi_aligner.MultilevelROIAligner()
detection_generator_obj = detection_generator.DetectionGenerator()
if include_mask:
mask_head = instance_heads.MaskHead(num_classes=2, upsample_factor=2)
mask_sampler_obj = mask_sampler.MaskSampler(
mask_target_size=28, num_sampled_masks=1)
mask_roi_aligner_obj = roi_aligner.MultilevelROIAligner(crop_size=14)
else:
mask_head = None
mask_sampler_obj = None
mask_roi_aligner_obj = None
model = maskrcnn_model.MaskRCNNModel(
backbone,
decoder,
rpn_head,
detection_head,
roi_generator_obj,
roi_sampler_obj,
roi_aligner_obj,
detection_generator_obj,
mask_head,
mask_sampler_obj,
mask_roi_aligner_obj,
min_level=3,
max_level=7,
num_scales=3,
aspect_ratios=[1.0],
anchor_size=3)
config = model.get_config()
new_model = maskrcnn_model.MaskRCNNModel.from_config(config)
# Validate that the config can be forced to JSON.
_ = new_model.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(model.get_config(), new_model.get_config())
@parameterized.parameters(
(False,),
(True,),
)
def test_checkpoint(self, include_mask):
input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, 3])
backbone = resnet.ResNet(model_id=50, input_specs=input_specs)
decoder = fpn.FPN(
min_level=3, max_level=7, input_specs=backbone.output_specs)
rpn_head = dense_prediction_heads.RPNHead(
min_level=3, max_level=7, num_anchors_per_location=3)
detection_head = instance_heads.DetectionHead(num_classes=2)
roi_generator_obj = roi_generator.MultilevelROIGenerator()
roi_sampler_obj = roi_sampler.ROISampler()
roi_aligner_obj = roi_aligner.MultilevelROIAligner()
detection_generator_obj = detection_generator.DetectionGenerator()
if include_mask:
mask_head = instance_heads.MaskHead(num_classes=2, upsample_factor=2)
mask_sampler_obj = mask_sampler.MaskSampler(
mask_target_size=28, num_sampled_masks=1)
mask_roi_aligner_obj = roi_aligner.MultilevelROIAligner(crop_size=14)
else:
mask_head = None
mask_sampler_obj = None
mask_roi_aligner_obj = None
model = maskrcnn_model.MaskRCNNModel(
backbone,
decoder,
rpn_head,
detection_head,
roi_generator_obj,
roi_sampler_obj,
roi_aligner_obj,
detection_generator_obj,
mask_head,
mask_sampler_obj,
mask_roi_aligner_obj,
min_level=3,
max_level=7,
num_scales=3,
aspect_ratios=[1.0],
anchor_size=3)
expect_checkpoint_items = dict(
backbone=backbone,
decoder=decoder,
rpn_head=rpn_head,
detection_head=[detection_head])
if include_mask:
expect_checkpoint_items['mask_head'] = mask_head
self.assertAllEqual(expect_checkpoint_items, model.checkpoint_items)
# Test save and load checkpoints.
ckpt = tf.train.Checkpoint(model=model, **model.checkpoint_items)
save_dir = self.create_tempdir().full_path
ckpt.save(os.path.join(save_dir, 'ckpt'))
partial_ckpt = tf.train.Checkpoint(backbone=backbone)
partial_ckpt.read(tf.train.latest_checkpoint(
save_dir)).expect_partial().assert_existing_objects_matched()
if include_mask:
partial_ckpt_mask = tf.train.Checkpoint(
backbone=backbone, mask_head=mask_head)
partial_ckpt_mask.restore(tf.train.latest_checkpoint(
save_dir)).expect_partial().assert_existing_objects_matched()
if __name__ == '__main__':
tf.test.main()
| 14,834 | 35.094891 | 80 | py |
models | models-master/official/vision/modeling/maskrcnn_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""R-CNN(-RS) models."""
from typing import Any, List, Mapping, Optional, Tuple, Union
import tensorflow as tf
from official.vision.ops import anchor
from official.vision.ops import box_ops
@tf.keras.utils.register_keras_serializable(package='Vision')
class MaskRCNNModel(tf.keras.Model):
"""The Mask R-CNN(-RS) and Cascade RCNN-RS models."""
def __init__(self,
backbone: tf.keras.Model,
decoder: tf.keras.Model,
rpn_head: tf.keras.layers.Layer,
detection_head: Union[tf.keras.layers.Layer,
List[tf.keras.layers.Layer]],
roi_generator: tf.keras.layers.Layer,
roi_sampler: Union[tf.keras.layers.Layer,
List[tf.keras.layers.Layer]],
roi_aligner: tf.keras.layers.Layer,
detection_generator: tf.keras.layers.Layer,
mask_head: Optional[tf.keras.layers.Layer] = None,
mask_sampler: Optional[tf.keras.layers.Layer] = None,
mask_roi_aligner: Optional[tf.keras.layers.Layer] = None,
class_agnostic_bbox_pred: bool = False,
cascade_class_ensemble: bool = False,
min_level: Optional[int] = None,
max_level: Optional[int] = None,
num_scales: Optional[int] = None,
aspect_ratios: Optional[List[float]] = None,
anchor_size: Optional[float] = None,
outer_boxes_scale: float = 1.0,
**kwargs):
"""Initializes the R-CNN(-RS) model.
Args:
backbone: `tf.keras.Model`, the backbone network.
decoder: `tf.keras.Model`, the decoder network.
rpn_head: the RPN head.
detection_head: the detection head or a list of heads.
roi_generator: the ROI generator.
roi_sampler: a single ROI sampler or a list of ROI samplers for cascade
detection heads.
roi_aligner: the ROI aligner.
detection_generator: the detection generator.
mask_head: the mask head.
mask_sampler: the mask sampler.
mask_roi_aligner: the ROI alginer for mask prediction.
class_agnostic_bbox_pred: if True, perform class agnostic bounding box
prediction. Needs to be `True` for Cascade RCNN models.
cascade_class_ensemble: if True, ensemble classification scores over all
detection heads.
min_level: Minimum level in output feature maps.
max_level: Maximum level in output feature maps.
num_scales: A number representing intermediate scales added on each level.
For instances, num_scales=2 adds one additional intermediate anchor
scales [2^0, 2^0.5] on each level.
aspect_ratios: A list representing the aspect raito anchors added on each
level. The number indicates the ratio of width to height. For instances,
aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each scale level.
anchor_size: A number representing the scale of size of the base anchor to
the feature stride 2^level.
outer_boxes_scale: a float to scale up the bounding boxes to generate
more inclusive masks. The scale is expected to be >=1.0.
**kwargs: keyword arguments to be passed.
"""
super().__init__(**kwargs)
self._config_dict = {
'backbone': backbone,
'decoder': decoder,
'rpn_head': rpn_head,
'detection_head': detection_head,
'roi_generator': roi_generator,
'roi_sampler': roi_sampler,
'roi_aligner': roi_aligner,
'detection_generator': detection_generator,
'outer_boxes_scale': outer_boxes_scale,
'mask_head': mask_head,
'mask_sampler': mask_sampler,
'mask_roi_aligner': mask_roi_aligner,
'class_agnostic_bbox_pred': class_agnostic_bbox_pred,
'cascade_class_ensemble': cascade_class_ensemble,
'min_level': min_level,
'max_level': max_level,
'num_scales': num_scales,
'aspect_ratios': aspect_ratios,
'anchor_size': anchor_size,
}
self.backbone = backbone
self.decoder = decoder
self.rpn_head = rpn_head
if not isinstance(detection_head, (list, tuple)):
self.detection_head = [detection_head]
else:
self.detection_head = detection_head
self.roi_generator = roi_generator
if not isinstance(roi_sampler, (list, tuple)):
self.roi_sampler = [roi_sampler]
else:
self.roi_sampler = roi_sampler
if len(self.roi_sampler) > 1 and not class_agnostic_bbox_pred:
raise ValueError(
'`class_agnostic_bbox_pred` needs to be True if multiple detection heads are specified.'
)
self.roi_aligner = roi_aligner
self.detection_generator = detection_generator
self._include_mask = mask_head is not None
if outer_boxes_scale < 1.0:
raise ValueError('`outer_boxes_scale` should be a value >= 1.0.')
self.outer_boxes_scale = outer_boxes_scale
self.mask_head = mask_head
if self._include_mask and mask_sampler is None:
raise ValueError('`mask_sampler` is not provided in Mask R-CNN.')
self.mask_sampler = mask_sampler
if self._include_mask and mask_roi_aligner is None:
raise ValueError('`mask_roi_aligner` is not provided in Mask R-CNN.')
self.mask_roi_aligner = mask_roi_aligner
# Weights for the regression losses for each FRCNN layer.
# TODO(jiageng): Make the weights configurable.
self._cascade_layer_to_weights = [
[10.0, 10.0, 5.0, 5.0],
[20.0, 20.0, 10.0, 10.0],
[30.0, 30.0, 15.0, 15.0],
]
def call( # pytype: disable=signature-mismatch # overriding-parameter-count-checks
self,
images: tf.Tensor,
image_shape: tf.Tensor,
anchor_boxes: Optional[Mapping[str, tf.Tensor]] = None,
gt_boxes: Optional[tf.Tensor] = None,
gt_classes: Optional[tf.Tensor] = None,
gt_masks: Optional[tf.Tensor] = None,
gt_outer_boxes: Optional[tf.Tensor] = None,
training: Optional[bool] = None) -> Mapping[str, Optional[tf.Tensor]]:
call_box_outputs_kwargs = {
'images': images,
'image_shape': image_shape,
'anchor_boxes': anchor_boxes,
'gt_boxes': gt_boxes,
'gt_classes': gt_classes,
'training': training,
}
if self.outer_boxes_scale > 1.0:
call_box_outputs_kwargs['gt_outer_boxes'] = gt_outer_boxes
model_outputs, intermediate_outputs = self._call_box_outputs(
**call_box_outputs_kwargs)
if not self._include_mask:
return model_outputs
if self.outer_boxes_scale == 1.0:
current_rois = intermediate_outputs['current_rois']
matched_gt_boxes = intermediate_outputs['matched_gt_boxes']
else:
current_rois = box_ops.compute_outer_boxes(
intermediate_outputs['current_rois'],
tf.expand_dims(image_shape, axis=1), self.outer_boxes_scale)
matched_gt_boxes = intermediate_outputs['matched_gt_outer_boxes']
model_mask_outputs = self._call_mask_outputs(
model_box_outputs=model_outputs,
features=model_outputs['decoder_features'],
current_rois=current_rois,
matched_gt_indices=intermediate_outputs['matched_gt_indices'],
matched_gt_boxes=matched_gt_boxes,
matched_gt_classes=intermediate_outputs['matched_gt_classes'],
gt_masks=gt_masks,
training=training)
model_outputs.update(model_mask_outputs) # pytype: disable=attribute-error # dynamic-method-lookup
return model_outputs
def _get_backbone_and_decoder_features(self, images):
backbone_features = self.backbone(images)
if self.decoder:
features = self.decoder(backbone_features)
else:
features = backbone_features
return backbone_features, features
def _call_box_outputs(
self,
images: tf.Tensor,
image_shape: tf.Tensor,
anchor_boxes: Optional[Mapping[str, tf.Tensor]] = None,
gt_boxes: Optional[tf.Tensor] = None,
gt_classes: Optional[tf.Tensor] = None,
training: Optional[bool] = None,
gt_outer_boxes: Optional[tf.Tensor] = None,
) -> Tuple[Mapping[str, Any], Mapping[str, Any]]:
"""Implementation of the Faster-RCNN logic for boxes."""
model_outputs = {}
# Feature extraction.
(backbone_features,
decoder_features) = self._get_backbone_and_decoder_features(images)
# Region proposal network.
rpn_scores, rpn_boxes = self.rpn_head(decoder_features)
model_outputs.update({
'backbone_features': backbone_features,
'decoder_features': decoder_features,
'rpn_boxes': rpn_boxes,
'rpn_scores': rpn_scores
})
# Generate anchor boxes for this batch if not provided.
if anchor_boxes is None:
_, image_height, image_width, _ = images.get_shape().as_list()
anchor_boxes = anchor.Anchor(
min_level=self._config_dict['min_level'],
max_level=self._config_dict['max_level'],
num_scales=self._config_dict['num_scales'],
aspect_ratios=self._config_dict['aspect_ratios'],
anchor_size=self._config_dict['anchor_size'],
image_size=(image_height, image_width)).multilevel_boxes
for l in anchor_boxes:
anchor_boxes[l] = tf.tile(
tf.expand_dims(anchor_boxes[l], axis=0),
[tf.shape(images)[0], 1, 1, 1])
# Generate RoIs.
current_rois, _ = self.roi_generator(rpn_boxes, rpn_scores, anchor_boxes,
image_shape, training)
next_rois = current_rois
all_class_outputs = []
for cascade_num in range(len(self.roi_sampler)):
# In cascade RCNN we want the higher layers to have different regression
# weights as the predicted deltas become smaller and smaller.
regression_weights = self._cascade_layer_to_weights[cascade_num]
current_rois = next_rois
if self.outer_boxes_scale == 1.0:
(class_outputs, box_outputs, model_outputs, matched_gt_boxes,
matched_gt_classes, matched_gt_indices,
current_rois) = self._run_frcnn_head(
features=decoder_features,
rois=current_rois,
gt_boxes=gt_boxes,
gt_classes=gt_classes,
training=training,
model_outputs=model_outputs,
cascade_num=cascade_num,
regression_weights=regression_weights)
else:
(class_outputs, box_outputs, model_outputs,
(matched_gt_boxes, matched_gt_outer_boxes), matched_gt_classes,
matched_gt_indices, current_rois) = self._run_frcnn_head(
features=decoder_features,
rois=current_rois,
gt_boxes=gt_boxes,
gt_outer_boxes=gt_outer_boxes,
gt_classes=gt_classes,
training=training,
model_outputs=model_outputs,
cascade_num=cascade_num,
regression_weights=regression_weights)
all_class_outputs.append(class_outputs)
# Generate ROIs for the next cascade head if there is any.
if cascade_num < len(self.roi_sampler) - 1:
next_rois = box_ops.decode_boxes(
tf.cast(box_outputs, tf.float32),
current_rois,
weights=regression_weights)
next_rois = box_ops.clip_boxes(next_rois,
tf.expand_dims(image_shape, axis=1))
if not training:
if self._config_dict['cascade_class_ensemble']:
class_outputs = tf.add_n(all_class_outputs) / len(all_class_outputs)
detections = self.detection_generator(
box_outputs,
class_outputs,
current_rois,
image_shape,
regression_weights,
bbox_per_class=(not self._config_dict['class_agnostic_bbox_pred']))
model_outputs.update({
'cls_outputs': class_outputs,
'box_outputs': box_outputs,
})
if self.detection_generator.get_config()['apply_nms']:
model_outputs.update({
'detection_boxes': detections['detection_boxes'],
'detection_scores': detections['detection_scores'],
'detection_classes': detections['detection_classes'],
'num_detections': detections['num_detections']
})
if self.outer_boxes_scale > 1.0:
detection_outer_boxes = box_ops.compute_outer_boxes(
detections['detection_boxes'],
tf.expand_dims(image_shape, axis=1), self.outer_boxes_scale)
model_outputs['detection_outer_boxes'] = detection_outer_boxes
else:
model_outputs.update({
'decoded_boxes': detections['decoded_boxes'],
'decoded_box_scores': detections['decoded_box_scores']
})
intermediate_outputs = {
'matched_gt_boxes': matched_gt_boxes,
'matched_gt_indices': matched_gt_indices,
'matched_gt_classes': matched_gt_classes,
'current_rois': current_rois,
}
if self.outer_boxes_scale > 1.0:
intermediate_outputs['matched_gt_outer_boxes'] = matched_gt_outer_boxes
return (model_outputs, intermediate_outputs)
def _call_mask_outputs(
self,
model_box_outputs: Mapping[str, tf.Tensor],
features: tf.Tensor,
current_rois: tf.Tensor,
matched_gt_indices: tf.Tensor,
matched_gt_boxes: tf.Tensor,
matched_gt_classes: tf.Tensor,
gt_masks: tf.Tensor,
training: Optional[bool] = None) -> Mapping[str, tf.Tensor]:
"""Implementation of Mask-RCNN mask prediction logic."""
model_outputs = dict(model_box_outputs)
if training:
current_rois, roi_classes, roi_masks = self.mask_sampler(
current_rois, matched_gt_boxes, matched_gt_classes,
matched_gt_indices, gt_masks)
roi_masks = tf.stop_gradient(roi_masks)
model_outputs.update({
'mask_class_targets': roi_classes,
'mask_targets': roi_masks,
})
else:
if self.outer_boxes_scale == 1.0:
current_rois = model_outputs['detection_boxes']
else:
current_rois = model_outputs['detection_outer_boxes']
roi_classes = model_outputs['detection_classes']
mask_logits, mask_probs = self._features_to_mask_outputs(
features, current_rois, roi_classes)
if training:
model_outputs.update({
'mask_outputs': mask_logits,
})
else:
model_outputs.update({
'detection_masks': mask_probs,
})
return model_outputs
def _run_frcnn_head(self,
features,
rois,
gt_boxes,
gt_classes,
training,
model_outputs,
cascade_num,
regression_weights,
gt_outer_boxes=None):
"""Runs the frcnn head that does both class and box prediction.
Args:
features: `list` of features from the feature extractor.
rois: `list` of current rois that will be used to predict bbox refinement
and classes from.
gt_boxes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4].
This tensor might have paddings with a negative value.
gt_classes: [batch_size, MAX_INSTANCES] representing the groundtruth box
classes. It is padded with -1s to indicate the invalid classes.
training: `bool`, if model is training or being evaluated.
model_outputs: `dict`, used for storing outputs used for eval and losses.
cascade_num: `int`, the current frcnn layer in the cascade.
regression_weights: `list`, weights used for l1 loss in bounding box
regression.
gt_outer_boxes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES,
4]. This tensor might have paddings with a negative value. Default to
None.
Returns:
class_outputs: Class predictions for rois.
box_outputs: Box predictions for rois. These are formatted for the
regression loss and need to be converted before being used as rois
in the next stage.
model_outputs: Updated dict with predictions used for losses and eval.
matched_gt_boxes: If `is_training` is true, then these give the gt box
location of its positive match.
matched_gt_classes: If `is_training` is true, then these give the gt class
of the predicted box.
matched_gt_boxes: If `is_training` is true, then these give the box
location of its positive match.
matched_gt_outer_boxes: If `is_training` is true, then these give the
outer box location of its positive match. Only exist if
outer_boxes_scale is greater than 1.0.
matched_gt_indices: If `is_training` is true, then gives the index of
the positive box match. Used for mask prediction.
rois: The sampled rois used for this layer.
"""
# Only used during training.
matched_gt_boxes, matched_gt_classes, matched_gt_indices = None, None, None
if self.outer_boxes_scale > 1.0:
matched_gt_outer_boxes = None
if training and gt_boxes is not None:
rois = tf.stop_gradient(rois)
current_roi_sampler = self.roi_sampler[cascade_num]
if self.outer_boxes_scale == 1.0:
rois, matched_gt_boxes, matched_gt_classes, matched_gt_indices = (
current_roi_sampler(rois, gt_boxes, gt_classes))
else:
(rois, matched_gt_boxes, matched_gt_outer_boxes, matched_gt_classes,
matched_gt_indices) = current_roi_sampler(rois, gt_boxes, gt_classes,
gt_outer_boxes)
# Create bounding box training targets.
box_targets = box_ops.encode_boxes(
matched_gt_boxes, rois, weights=regression_weights)
# If the target is background, the box target is set to all 0s.
box_targets = tf.where(
tf.tile(
tf.expand_dims(tf.equal(matched_gt_classes, 0), axis=-1),
[1, 1, 4]), tf.zeros_like(box_targets), box_targets)
model_outputs.update({
'class_targets_{}'.format(cascade_num)
if cascade_num else 'class_targets':
matched_gt_classes,
'box_targets_{}'.format(cascade_num)
if cascade_num else 'box_targets':
box_targets,
})
# Get roi features.
roi_features = self.roi_aligner(features, rois)
# Run frcnn head to get class and bbox predictions.
current_detection_head = self.detection_head[cascade_num]
class_outputs, box_outputs = current_detection_head(roi_features)
model_outputs.update({
'class_outputs_{}'.format(cascade_num)
if cascade_num else 'class_outputs':
class_outputs,
'box_outputs_{}'.format(cascade_num) if cascade_num else 'box_outputs':
box_outputs,
})
if self.outer_boxes_scale == 1.0:
return (class_outputs, box_outputs, model_outputs, matched_gt_boxes,
matched_gt_classes, matched_gt_indices, rois)
else:
return (class_outputs, box_outputs, model_outputs,
(matched_gt_boxes, matched_gt_outer_boxes), matched_gt_classes,
matched_gt_indices, rois)
def _features_to_mask_outputs(self, features, rois, roi_classes):
# Mask RoI align.
mask_roi_features = self.mask_roi_aligner(features, rois)
# Mask head.
raw_masks = self.mask_head([mask_roi_features, roi_classes])
return raw_masks, tf.nn.sigmoid(raw_masks)
@property
def checkpoint_items(
self) -> Mapping[str, Union[tf.keras.Model, tf.keras.layers.Layer]]:
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(
backbone=self.backbone,
rpn_head=self.rpn_head,
detection_head=self.detection_head)
if self.decoder is not None:
items.update(decoder=self.decoder)
if self._include_mask:
items.update(mask_head=self.mask_head)
return items
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
| 20,733 | 39.734774 | 104 | py |
models | models-master/official/vision/modeling/retinanet_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RetinaNet."""
from typing import Any, Mapping, List, Optional, Union, Sequence
# Import libraries
import tensorflow as tf
from official.vision.ops import anchor
@tf.keras.utils.register_keras_serializable(package='Vision')
class RetinaNetModel(tf.keras.Model):
"""The RetinaNet model class."""
def __init__(self,
backbone: tf.keras.Model,
decoder: tf.keras.Model,
head: tf.keras.layers.Layer,
detection_generator: tf.keras.layers.Layer,
min_level: Optional[int] = None,
max_level: Optional[int] = None,
num_scales: Optional[int] = None,
aspect_ratios: Optional[List[float]] = None,
anchor_size: Optional[float] = None,
**kwargs):
"""Detection initialization function.
Args:
backbone: `tf.keras.Model` a backbone network.
decoder: `tf.keras.Model` a decoder network.
head: `RetinaNetHead`, the RetinaNet head.
detection_generator: the detection generator.
min_level: Minimum level in output feature maps.
max_level: Maximum level in output feature maps.
num_scales: A number representing intermediate scales added
on each level. For instances, num_scales=2 adds one additional
intermediate anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: A list representing the aspect raito
anchors added on each level. The number indicates the ratio of width to
height. For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors
on each scale level.
anchor_size: A number representing the scale of size of the base
anchor to the feature stride 2^level.
**kwargs: keyword arguments to be passed.
"""
super(RetinaNetModel, self).__init__(**kwargs)
self._config_dict = {
'backbone': backbone,
'decoder': decoder,
'head': head,
'detection_generator': detection_generator,
'min_level': min_level,
'max_level': max_level,
'num_scales': num_scales,
'aspect_ratios': aspect_ratios,
'anchor_size': anchor_size,
}
self._backbone = backbone
self._decoder = decoder
self._head = head
self._detection_generator = detection_generator
def call(self,
images: Union[tf.Tensor, Sequence[tf.Tensor]],
image_shape: Optional[tf.Tensor] = None,
anchor_boxes: Optional[Mapping[str, tf.Tensor]] = None,
output_intermediate_features: bool = False,
training: bool = None) -> Mapping[str, tf.Tensor]:
"""Forward pass of the RetinaNet model.
Args:
images: `Tensor` or a sequence of `Tensor`, the input batched images to
the backbone network, whose shape(s) is [batch, height, width, 3]. If it
is a sequence of `Tensor`, we will assume the anchors are generated
based on the shape of the first image(s).
image_shape: `Tensor`, the actual shape of the input images, whose shape
is [batch, 2] where the last dimension is [height, width]. Note that
this is the actual image shape excluding paddings. For example, images
in the batch may be resized into different shapes before padding to the
fixed size.
anchor_boxes: a dict of tensors which includes multilevel anchors.
- key: `str`, the level of the multilevel predictions.
- values: `Tensor`, the anchor coordinates of a particular feature
level, whose shape is [height_l, width_l, num_anchors_per_location].
output_intermediate_features: `bool` indicating whether to return the
intermediate feature maps generated by backbone and decoder.
training: `bool`, indicating whether it is in training mode.
Returns:
scores: a dict of tensors which includes scores of the predictions.
- key: `str`, the level of the multilevel predictions.
- values: `Tensor`, the box scores predicted from a particular feature
level, whose shape is
[batch, height_l, width_l, num_classes * num_anchors_per_location].
boxes: a dict of tensors which includes coordinates of the predictions.
- key: `str`, the level of the multilevel predictions.
- values: `Tensor`, the box coordinates predicted from a particular
feature level, whose shape is
[batch, height_l, width_l, 4 * num_anchors_per_location].
attributes: a dict of (attribute_name, attribute_predictions). Each
attribute prediction is a dict that includes:
- key: `str`, the level of the multilevel predictions.
- values: `Tensor`, the attribute predictions from a particular
feature level, whose shape is
[batch, height_l, width_l, att_size * num_anchors_per_location].
"""
outputs = {}
# Feature extraction.
features = self.backbone(images)
if output_intermediate_features:
outputs.update(
{'backbone_{}'.format(k): v for k, v in features.items()})
if self.decoder:
features = self.decoder(features)
if output_intermediate_features:
outputs.update(
{'decoder_{}'.format(k): v for k, v in features.items()})
# Dense prediction. `raw_attributes` can be empty.
raw_scores, raw_boxes, raw_attributes = self.head(features)
if training:
outputs.update({
'cls_outputs': raw_scores,
'box_outputs': raw_boxes,
})
if raw_attributes:
outputs.update({'attribute_outputs': raw_attributes})
return outputs
else:
# Generate anchor boxes for this batch if not provided.
if anchor_boxes is None:
if isinstance(images, Sequence):
primary_images = images[0]
elif isinstance(images, tf.Tensor):
primary_images = images
else:
raise ValueError(
'Input should be a tf.Tensor or a sequence of tf.Tensor, not {}.'
.format(type(images)))
_, image_height, image_width, _ = primary_images.get_shape().as_list()
anchor_boxes = anchor.Anchor(
min_level=self._config_dict['min_level'],
max_level=self._config_dict['max_level'],
num_scales=self._config_dict['num_scales'],
aspect_ratios=self._config_dict['aspect_ratios'],
anchor_size=self._config_dict['anchor_size'],
image_size=(image_height, image_width)).multilevel_boxes
for l in anchor_boxes:
anchor_boxes[l] = tf.tile(
tf.expand_dims(anchor_boxes[l], axis=0),
[tf.shape(primary_images)[0], 1, 1, 1])
# Post-processing.
final_results = self.detection_generator(raw_boxes, raw_scores,
anchor_boxes, image_shape,
raw_attributes)
outputs.update({
'cls_outputs': raw_scores,
'box_outputs': raw_boxes,
})
def _update_decoded_results():
outputs.update({
'decoded_boxes': final_results['decoded_boxes'],
'decoded_box_scores': final_results['decoded_box_scores'],
})
if final_results.get('decoded_box_attributes') is not None:
outputs['decoded_box_attributes'] = final_results[
'decoded_box_attributes'
]
if self.detection_generator.get_config()['apply_nms']:
outputs.update({
'detection_boxes': final_results['detection_boxes'],
'detection_scores': final_results['detection_scores'],
'detection_classes': final_results['detection_classes'],
'num_detections': final_results['num_detections'],
})
# Users can choose to include the decoded results (boxes before NMS) in
# the output tensor dict even if `apply_nms` is set to `True`.
if self.detection_generator.get_config()['return_decoded']:
_update_decoded_results()
else:
_update_decoded_results()
if raw_attributes:
outputs.update({
'attribute_outputs': raw_attributes,
'detection_attributes': final_results['detection_attributes'],
})
return outputs
@property
def checkpoint_items(
self) -> Mapping[str, Union[tf.keras.Model, tf.keras.layers.Layer]]:
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(backbone=self.backbone, head=self.head)
if self.decoder is not None:
items.update(decoder=self.decoder)
return items
@property
def backbone(self) -> tf.keras.Model:
return self._backbone
@property
def decoder(self) -> tf.keras.Model:
return self._decoder
@property
def head(self) -> tf.keras.layers.Layer:
return self._head
@property
def detection_generator(self) -> tf.keras.layers.Layer:
return self._detection_generator
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
| 9,695 | 39.4 | 80 | py |
models | models-master/official/vision/modeling/retinanet_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for RetinaNet models."""
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.vision.modeling import retinanet_model
from official.vision.modeling.backbones import resnet
from official.vision.modeling.decoders import fpn
from official.vision.modeling.heads import dense_prediction_heads
from official.vision.modeling.layers import detection_generator
from official.vision.ops import anchor
class RetinaNetTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
{
'use_separable_conv': True,
'build_anchor_boxes': True,
'is_training': False,
'has_att_heads': False
},
{
'use_separable_conv': False,
'build_anchor_boxes': True,
'is_training': False,
'has_att_heads': False
},
{
'use_separable_conv': False,
'build_anchor_boxes': False,
'is_training': False,
'has_att_heads': False
},
{
'use_separable_conv': False,
'build_anchor_boxes': False,
'is_training': True,
'has_att_heads': False
},
{
'use_separable_conv': False,
'build_anchor_boxes': True,
'is_training': True,
'has_att_heads': True
},
{
'use_separable_conv': False,
'build_anchor_boxes': True,
'is_training': False,
'has_att_heads': True
},
)
def test_build_model(self, use_separable_conv, build_anchor_boxes,
is_training, has_att_heads):
num_classes = 3
min_level = 3
max_level = 7
num_scales = 3
aspect_ratios = [1.0]
anchor_size = 3
fpn_num_filters = 256
head_num_convs = 4
head_num_filters = 256
num_anchors_per_location = num_scales * len(aspect_ratios)
image_size = 384
images = np.random.rand(2, image_size, image_size, 3)
image_shape = np.array([[image_size, image_size], [image_size, image_size]])
if build_anchor_boxes:
anchor_boxes = anchor.Anchor(
min_level=min_level,
max_level=max_level,
num_scales=num_scales,
aspect_ratios=aspect_ratios,
anchor_size=anchor_size,
image_size=(image_size, image_size)).multilevel_boxes
for l in anchor_boxes:
anchor_boxes[l] = tf.tile(
tf.expand_dims(anchor_boxes[l], axis=0), [2, 1, 1, 1])
else:
anchor_boxes = None
if has_att_heads:
attribute_heads = [
dict(
name='depth', type='regression', size=1, prediction_tower_name='')
]
else:
attribute_heads = None
backbone = resnet.ResNet(model_id=50)
decoder = fpn.FPN(
input_specs=backbone.output_specs,
min_level=min_level,
max_level=max_level,
num_filters=fpn_num_filters,
use_separable_conv=use_separable_conv)
head = dense_prediction_heads.RetinaNetHead(
min_level=min_level,
max_level=max_level,
num_classes=num_classes,
attribute_heads=attribute_heads,
num_anchors_per_location=num_anchors_per_location,
use_separable_conv=use_separable_conv,
num_convs=head_num_convs,
num_filters=head_num_filters)
generator = detection_generator.MultilevelDetectionGenerator(
max_num_detections=10)
model = retinanet_model.RetinaNetModel(
backbone=backbone,
decoder=decoder,
head=head,
detection_generator=generator,
min_level=min_level,
max_level=max_level,
num_scales=num_scales,
aspect_ratios=aspect_ratios,
anchor_size=anchor_size)
_ = model(images, image_shape, anchor_boxes, training=is_training)
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
image_size=[
(128, 128),
],
training=[True, False],
has_att_heads=[True, False],
output_intermediate_features=[True, False],
soft_nms_sigma=[None, 0.0, 0.1],
))
def test_forward(self, strategy, image_size, training, has_att_heads,
output_intermediate_features, soft_nms_sigma):
"""Test for creation of a R50-FPN RetinaNet."""
tf.keras.backend.set_image_data_format('channels_last')
num_classes = 3
min_level = 3
max_level = 7
num_scales = 3
aspect_ratios = [1.0]
num_anchors_per_location = num_scales * len(aspect_ratios)
images = np.random.rand(2, image_size[0], image_size[1], 3)
image_shape = np.array(
[[image_size[0], image_size[1]], [image_size[0], image_size[1]]])
with strategy.scope():
anchor_gen = anchor.build_anchor_generator(
min_level=min_level,
max_level=max_level,
num_scales=num_scales,
aspect_ratios=aspect_ratios,
anchor_size=3)
anchor_boxes = anchor_gen(image_size)
for l in anchor_boxes:
anchor_boxes[l] = tf.tile(
tf.expand_dims(anchor_boxes[l], axis=0), [2, 1, 1, 1])
backbone = resnet.ResNet(model_id=50)
decoder = fpn.FPN(
input_specs=backbone.output_specs,
min_level=min_level,
max_level=max_level)
if has_att_heads:
attribute_heads = [
dict(
name='depth',
type='regression',
size=1,
prediction_tower_name='')
]
else:
attribute_heads = None
head = dense_prediction_heads.RetinaNetHead(
min_level=min_level,
max_level=max_level,
num_classes=num_classes,
attribute_heads=attribute_heads,
num_anchors_per_location=num_anchors_per_location)
generator = detection_generator.MultilevelDetectionGenerator(
max_num_detections=10,
nms_version='v1',
use_cpu_nms=soft_nms_sigma is not None,
soft_nms_sigma=soft_nms_sigma)
model = retinanet_model.RetinaNetModel(
backbone=backbone,
decoder=decoder,
head=head,
detection_generator=generator)
model_outputs = model(
images,
image_shape,
anchor_boxes,
output_intermediate_features=output_intermediate_features,
training=training)
if training:
cls_outputs = model_outputs['cls_outputs']
box_outputs = model_outputs['box_outputs']
for level in range(min_level, max_level + 1):
self.assertIn(str(level), cls_outputs)
self.assertIn(str(level), box_outputs)
self.assertAllEqual([
2,
image_size[0] // 2**level,
image_size[1] // 2**level,
num_classes * num_anchors_per_location
], cls_outputs[str(level)].numpy().shape)
self.assertAllEqual([
2,
image_size[0] // 2**level,
image_size[1] // 2**level,
4 * num_anchors_per_location
], box_outputs[str(level)].numpy().shape)
if has_att_heads:
att_outputs = model_outputs['attribute_outputs']
for att in att_outputs.values():
self.assertAllEqual([
2, image_size[0] // 2**level, image_size[1] // 2**level,
1 * num_anchors_per_location
], att[str(level)].numpy().shape)
else:
self.assertIn('detection_boxes', model_outputs)
self.assertIn('detection_scores', model_outputs)
self.assertIn('detection_classes', model_outputs)
self.assertIn('num_detections', model_outputs)
self.assertAllEqual(
[2, 10, 4], model_outputs['detection_boxes'].numpy().shape)
self.assertAllEqual(
[2, 10], model_outputs['detection_scores'].numpy().shape)
self.assertAllEqual(
[2, 10], model_outputs['detection_classes'].numpy().shape)
self.assertAllEqual(
[2,], model_outputs['num_detections'].numpy().shape)
if has_att_heads:
self.assertIn('detection_attributes', model_outputs)
self.assertAllEqual(
[2, 10, 1],
model_outputs['detection_attributes']['depth'].numpy().shape)
if output_intermediate_features:
for l in range(2, 6):
self.assertIn('backbone_{}'.format(l), model_outputs)
self.assertAllEqual([
2, image_size[0] // 2**l, image_size[1] // 2**l,
backbone.output_specs[str(l)].as_list()[-1]
], model_outputs['backbone_{}'.format(l)].numpy().shape)
for l in range(min_level, max_level + 1):
self.assertIn('decoder_{}'.format(l), model_outputs)
self.assertAllEqual([
2, image_size[0] // 2**l, image_size[1] // 2**l,
decoder.output_specs[str(l)].as_list()[-1]
], model_outputs['decoder_{}'.format(l)].numpy().shape)
def test_serialize_deserialize(self):
"""Validate the network can be serialized and deserialized."""
num_classes = 3
min_level = 3
max_level = 7
num_scales = 3
aspect_ratios = [1.0]
num_anchors_per_location = num_scales * len(aspect_ratios)
backbone = resnet.ResNet(model_id=50)
decoder = fpn.FPN(
input_specs=backbone.output_specs,
min_level=min_level,
max_level=max_level)
head = dense_prediction_heads.RetinaNetHead(
min_level=min_level,
max_level=max_level,
num_classes=num_classes,
num_anchors_per_location=num_anchors_per_location)
generator = detection_generator.MultilevelDetectionGenerator(
max_num_detections=10)
model = retinanet_model.RetinaNetModel(
backbone=backbone,
decoder=decoder,
head=head,
detection_generator=generator,
min_level=min_level,
max_level=max_level,
num_scales=num_scales,
aspect_ratios=aspect_ratios,
anchor_size=3)
config = model.get_config()
new_model = retinanet_model.RetinaNetModel.from_config(config)
# Validate that the config can be forced to JSON.
_ = new_model.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(model.get_config(), new_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 11,148 | 33.517028 | 80 | py |
models | models-master/official/vision/modeling/segmentation_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for segmentation network."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.vision.modeling import backbones
from official.vision.modeling import segmentation_model
from official.vision.modeling.decoders import fpn
from official.vision.modeling.heads import segmentation_heads
class SegmentationNetworkTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(128, 2),
(128, 3),
(128, 4),
(256, 2),
(256, 3),
(256, 4),
)
def test_segmentation_network_creation(
self, input_size, level):
"""Test for creation of a segmentation network."""
num_classes = 10
inputs = np.random.rand(2, input_size, input_size, 3)
tf.keras.backend.set_image_data_format('channels_last')
backbone = backbones.ResNet(model_id=50)
decoder = fpn.FPN(
input_specs=backbone.output_specs, min_level=2, max_level=7)
head = segmentation_heads.SegmentationHead(num_classes, level=level)
model = segmentation_model.SegmentationModel(
backbone=backbone,
decoder=decoder,
head=head,
mask_scoring_head=None,
)
outputs = model(inputs)
self.assertAllEqual(
[2, input_size // (2**level), input_size // (2**level), num_classes],
outputs['logits'].numpy().shape)
def test_serialize_deserialize(self):
"""Validate the network can be serialized and deserialized."""
num_classes = 3
backbone = backbones.ResNet(model_id=50)
decoder = fpn.FPN(
input_specs=backbone.output_specs, min_level=3, max_level=7)
head = segmentation_heads.SegmentationHead(num_classes, level=3)
model = segmentation_model.SegmentationModel(
backbone=backbone,
decoder=decoder,
head=head
)
config = model.get_config()
new_model = segmentation_model.SegmentationModel.from_config(config)
# Validate that the config can be forced to JSON.
_ = new_model.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(model.get_config(), new_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 2,807 | 31.651163 | 79 | py |
models | models-master/official/vision/modeling/classification_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for classification network."""
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.vision.modeling import backbones
from official.vision.modeling import classification_model
class ClassificationNetworkTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(192 * 4, 3, 12, 192, 5524416),
(384 * 4, 6, 12, 384, 21665664),
)
def test_vision_transformer_creation(self, mlp_dim, num_heads, num_layers,
hidden_size, num_params):
"""Test for creation of a Vision Transformer classifier."""
inputs = np.random.rand(2, 224, 224, 3)
tf.keras.backend.set_image_data_format('channels_last')
backbone = backbones.VisionTransformer(
mlp_dim=mlp_dim,
num_heads=num_heads,
num_layers=num_layers,
hidden_size=hidden_size,
input_specs=tf.keras.layers.InputSpec(shape=[None, 224, 224, 3]),
)
self.assertEqual(backbone.count_params(), num_params)
num_classes = 1000
model = classification_model.ClassificationModel(
backbone=backbone,
num_classes=num_classes,
dropout_rate=0.2,
)
logits = model(inputs)
self.assertAllEqual([2, num_classes], logits.numpy().shape)
@parameterized.parameters(
(128, 50, 'relu'),
(128, 50, 'relu'),
(128, 50, 'swish'),
)
def test_resnet_network_creation(self, input_size, resnet_model_id,
activation):
"""Test for creation of a ResNet-50 classifier."""
inputs = np.random.rand(2, input_size, input_size, 3)
tf.keras.backend.set_image_data_format('channels_last')
backbone = backbones.ResNet(model_id=resnet_model_id, activation=activation)
self.assertEqual(backbone.count_params(), 23561152)
num_classes = 1000
model = classification_model.ClassificationModel(
backbone=backbone,
num_classes=num_classes,
dropout_rate=0.2,
)
self.assertEqual(model.count_params(), 25610152)
logits = model(inputs)
self.assertAllEqual([2, num_classes], logits.numpy().shape)
def test_revnet_network_creation(self):
"""Test for creation of a RevNet-56 classifier."""
revnet_model_id = 56
inputs = np.random.rand(2, 224, 224, 3)
tf.keras.backend.set_image_data_format('channels_last')
backbone = backbones.RevNet(model_id=revnet_model_id)
self.assertEqual(backbone.count_params(), 19473792)
num_classes = 1000
model = classification_model.ClassificationModel(
backbone=backbone,
num_classes=num_classes,
dropout_rate=0.2,
add_head_batch_norm=True,
)
self.assertEqual(model.count_params(), 22816104)
logits = model(inputs)
self.assertAllEqual([2, num_classes], logits.numpy().shape)
@combinations.generate(
combinations.combine(
mobilenet_model_id=[
'MobileNetV1',
'MobileNetV2',
'MobileNetV3Large',
'MobileNetV3Small',
'MobileNetV3EdgeTPU',
'MobileNetMultiAVG',
'MobileNetMultiMAX',
],
filter_size_scale=[1.0, 0.75],
))
def test_mobilenet_network_creation(self, mobilenet_model_id,
filter_size_scale):
"""Test for creation of a MobileNet classifier."""
inputs = np.random.rand(2, 224, 224, 3)
tf.keras.backend.set_image_data_format('channels_last')
backbone = backbones.MobileNet(
model_id=mobilenet_model_id, filter_size_scale=filter_size_scale)
num_classes = 1001
model = classification_model.ClassificationModel(
backbone=backbone,
num_classes=num_classes,
dropout_rate=0.2,
)
logits = model(inputs)
self.assertAllEqual([2, num_classes], logits.numpy().shape)
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
use_sync_bn=[False, True],
))
def test_sync_bn_multiple_devices(self, strategy, use_sync_bn):
"""Test for sync bn on TPU and GPU devices."""
inputs = np.random.rand(64, 128, 128, 3)
tf.keras.backend.set_image_data_format('channels_last')
with strategy.scope():
backbone = backbones.ResNet(model_id=50, use_sync_bn=use_sync_bn)
model = classification_model.ClassificationModel(
backbone=backbone,
num_classes=1000,
dropout_rate=0.2,
)
_ = model(inputs)
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.one_device_strategy_gpu,
],
data_format=['channels_last', 'channels_first'],
input_dim=[1, 3, 4]))
def test_data_format_gpu(self, strategy, data_format, input_dim):
"""Test for different data formats on GPU devices."""
if data_format == 'channels_last':
inputs = np.random.rand(2, 128, 128, input_dim)
else:
inputs = np.random.rand(2, input_dim, 128, 128)
input_specs = tf.keras.layers.InputSpec(shape=inputs.shape)
tf.keras.backend.set_image_data_format(data_format)
with strategy.scope():
backbone = backbones.ResNet(model_id=50, input_specs=input_specs)
model = classification_model.ClassificationModel(
backbone=backbone,
num_classes=1000,
input_specs=input_specs,
)
_ = model(inputs)
def test_serialize_deserialize(self):
"""Validate the classification net can be serialized and deserialized."""
tf.keras.backend.set_image_data_format('channels_last')
backbone = backbones.ResNet(model_id=50)
model = classification_model.ClassificationModel(
backbone=backbone, num_classes=1000)
config = model.get_config()
new_model = classification_model.ClassificationModel.from_config(config)
# Validate that the config can be forced to JSON.
_ = new_model.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(model.get_config(), new_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 7,007 | 31.901408 | 80 | py |
models | models-master/official/vision/modeling/factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory methods to build models."""
from typing import Optional
import tensorflow as tf
from official.vision.configs import image_classification as classification_cfg
from official.vision.configs import maskrcnn as maskrcnn_cfg
from official.vision.configs import retinanet as retinanet_cfg
from official.vision.configs import semantic_segmentation as segmentation_cfg
from official.vision.modeling import backbones
from official.vision.modeling import classification_model
from official.vision.modeling import decoders
from official.vision.modeling import maskrcnn_model
from official.vision.modeling import retinanet_model
from official.vision.modeling import segmentation_model
from official.vision.modeling.heads import dense_prediction_heads
from official.vision.modeling.heads import instance_heads
from official.vision.modeling.heads import segmentation_heads
from official.vision.modeling.layers import detection_generator
from official.vision.modeling.layers import mask_sampler
from official.vision.modeling.layers import roi_aligner
from official.vision.modeling.layers import roi_generator
from official.vision.modeling.layers import roi_sampler
def build_classification_model(
input_specs: tf.keras.layers.InputSpec,
model_config: classification_cfg.ImageClassificationModel,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
skip_logits_layer: bool = False,
backbone: Optional[tf.keras.Model] = None) -> tf.keras.Model:
"""Builds the classification model."""
norm_activation_config = model_config.norm_activation
if not backbone:
backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=model_config.backbone,
norm_activation_config=norm_activation_config,
l2_regularizer=l2_regularizer)
model = classification_model.ClassificationModel(
backbone=backbone,
num_classes=model_config.num_classes,
input_specs=input_specs,
dropout_rate=model_config.dropout_rate,
kernel_initializer=model_config.kernel_initializer,
kernel_regularizer=l2_regularizer,
add_head_batch_norm=model_config.add_head_batch_norm,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
skip_logits_layer=skip_logits_layer)
return model
def build_maskrcnn(input_specs: tf.keras.layers.InputSpec,
model_config: maskrcnn_cfg.MaskRCNN,
l2_regularizer: Optional[
tf.keras.regularizers.Regularizer] = None,
backbone: Optional[tf.keras.Model] = None,
decoder: Optional[tf.keras.Model] = None) -> tf.keras.Model:
"""Builds Mask R-CNN model."""
norm_activation_config = model_config.norm_activation
if not backbone:
backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=model_config.backbone,
norm_activation_config=norm_activation_config,
l2_regularizer=l2_regularizer)
backbone_features = backbone(tf.keras.Input(input_specs.shape[1:]))
if not decoder:
decoder = decoders.factory.build_decoder(
input_specs=backbone.output_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
rpn_head_config = model_config.rpn_head
roi_generator_config = model_config.roi_generator
roi_sampler_config = model_config.roi_sampler
roi_aligner_config = model_config.roi_aligner
detection_head_config = model_config.detection_head
generator_config = model_config.detection_generator
num_anchors_per_location = (
len(model_config.anchor.aspect_ratios) * model_config.anchor.num_scales)
rpn_head = dense_prediction_heads.RPNHead(
min_level=model_config.min_level,
max_level=model_config.max_level,
num_anchors_per_location=num_anchors_per_location,
num_convs=rpn_head_config.num_convs,
num_filters=rpn_head_config.num_filters,
use_separable_conv=rpn_head_config.use_separable_conv,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
detection_head = instance_heads.DetectionHead(
num_classes=model_config.num_classes,
num_convs=detection_head_config.num_convs,
num_filters=detection_head_config.num_filters,
use_separable_conv=detection_head_config.use_separable_conv,
num_fcs=detection_head_config.num_fcs,
fc_dims=detection_head_config.fc_dims,
class_agnostic_bbox_pred=detection_head_config.class_agnostic_bbox_pred,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer,
name='detection_head')
if decoder:
decoder_features = decoder(backbone_features)
rpn_head(decoder_features)
if roi_sampler_config.cascade_iou_thresholds:
detection_head_cascade = [detection_head]
for cascade_num in range(len(roi_sampler_config.cascade_iou_thresholds)):
detection_head = instance_heads.DetectionHead(
num_classes=model_config.num_classes,
num_convs=detection_head_config.num_convs,
num_filters=detection_head_config.num_filters,
use_separable_conv=detection_head_config.use_separable_conv,
num_fcs=detection_head_config.num_fcs,
fc_dims=detection_head_config.fc_dims,
class_agnostic_bbox_pred=detection_head_config
.class_agnostic_bbox_pred,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer,
name='detection_head_{}'.format(cascade_num + 1))
detection_head_cascade.append(detection_head)
detection_head = detection_head_cascade
roi_generator_obj = roi_generator.MultilevelROIGenerator(
pre_nms_top_k=roi_generator_config.pre_nms_top_k,
pre_nms_score_threshold=roi_generator_config.pre_nms_score_threshold,
pre_nms_min_size_threshold=(
roi_generator_config.pre_nms_min_size_threshold),
nms_iou_threshold=roi_generator_config.nms_iou_threshold,
num_proposals=roi_generator_config.num_proposals,
test_pre_nms_top_k=roi_generator_config.test_pre_nms_top_k,
test_pre_nms_score_threshold=(
roi_generator_config.test_pre_nms_score_threshold),
test_pre_nms_min_size_threshold=(
roi_generator_config.test_pre_nms_min_size_threshold),
test_nms_iou_threshold=roi_generator_config.test_nms_iou_threshold,
test_num_proposals=roi_generator_config.test_num_proposals,
use_batched_nms=roi_generator_config.use_batched_nms)
roi_sampler_cascade = []
roi_sampler_obj = roi_sampler.ROISampler(
mix_gt_boxes=roi_sampler_config.mix_gt_boxes,
num_sampled_rois=roi_sampler_config.num_sampled_rois,
foreground_fraction=roi_sampler_config.foreground_fraction,
foreground_iou_threshold=roi_sampler_config.foreground_iou_threshold,
background_iou_high_threshold=(
roi_sampler_config.background_iou_high_threshold),
background_iou_low_threshold=(
roi_sampler_config.background_iou_low_threshold))
roi_sampler_cascade.append(roi_sampler_obj)
# Initialize additional roi simplers for cascade heads.
if roi_sampler_config.cascade_iou_thresholds:
for iou in roi_sampler_config.cascade_iou_thresholds:
roi_sampler_obj = roi_sampler.ROISampler(
mix_gt_boxes=False,
num_sampled_rois=roi_sampler_config.num_sampled_rois,
foreground_iou_threshold=iou,
background_iou_high_threshold=iou,
background_iou_low_threshold=0.0,
skip_subsampling=True)
roi_sampler_cascade.append(roi_sampler_obj)
roi_aligner_obj = roi_aligner.MultilevelROIAligner(
crop_size=roi_aligner_config.crop_size,
sample_offset=roi_aligner_config.sample_offset)
detection_generator_obj = detection_generator.DetectionGenerator(
apply_nms=generator_config.apply_nms,
pre_nms_top_k=generator_config.pre_nms_top_k,
pre_nms_score_threshold=generator_config.pre_nms_score_threshold,
nms_iou_threshold=generator_config.nms_iou_threshold,
max_num_detections=generator_config.max_num_detections,
nms_version=generator_config.nms_version,
use_cpu_nms=generator_config.use_cpu_nms,
soft_nms_sigma=generator_config.soft_nms_sigma,
use_sigmoid_probability=generator_config.use_sigmoid_probability)
if model_config.include_mask:
mask_head = instance_heads.MaskHead(
num_classes=model_config.num_classes,
upsample_factor=model_config.mask_head.upsample_factor,
num_convs=model_config.mask_head.num_convs,
num_filters=model_config.mask_head.num_filters,
use_separable_conv=model_config.mask_head.use_separable_conv,
activation=model_config.norm_activation.activation,
norm_momentum=model_config.norm_activation.norm_momentum,
norm_epsilon=model_config.norm_activation.norm_epsilon,
kernel_regularizer=l2_regularizer,
class_agnostic=model_config.mask_head.class_agnostic)
mask_sampler_obj = mask_sampler.MaskSampler(
mask_target_size=(
model_config.mask_roi_aligner.crop_size *
model_config.mask_head.upsample_factor),
num_sampled_masks=model_config.mask_sampler.num_sampled_masks)
mask_roi_aligner_obj = roi_aligner.MultilevelROIAligner(
crop_size=model_config.mask_roi_aligner.crop_size,
sample_offset=model_config.mask_roi_aligner.sample_offset)
else:
mask_head = None
mask_sampler_obj = None
mask_roi_aligner_obj = None
model = maskrcnn_model.MaskRCNNModel(
backbone=backbone,
decoder=decoder,
rpn_head=rpn_head,
detection_head=detection_head,
roi_generator=roi_generator_obj,
roi_sampler=roi_sampler_cascade,
roi_aligner=roi_aligner_obj,
detection_generator=detection_generator_obj,
mask_head=mask_head,
mask_sampler=mask_sampler_obj,
mask_roi_aligner=mask_roi_aligner_obj,
class_agnostic_bbox_pred=detection_head_config.class_agnostic_bbox_pred,
cascade_class_ensemble=detection_head_config.cascade_class_ensemble,
min_level=model_config.min_level,
max_level=model_config.max_level,
num_scales=model_config.anchor.num_scales,
aspect_ratios=model_config.anchor.aspect_ratios,
anchor_size=model_config.anchor.anchor_size,
outer_boxes_scale=model_config.outer_boxes_scale)
return model
def build_retinanet(
input_specs: tf.keras.layers.InputSpec,
model_config: retinanet_cfg.RetinaNet,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
backbone: Optional[tf.keras.Model] = None,
decoder: Optional[tf.keras.Model] = None
) -> tf.keras.Model:
"""Builds RetinaNet model."""
norm_activation_config = model_config.norm_activation
if not backbone:
backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=model_config.backbone,
norm_activation_config=norm_activation_config,
l2_regularizer=l2_regularizer)
backbone_features = backbone(tf.keras.Input(input_specs.shape[1:]))
if not decoder:
decoder = decoders.factory.build_decoder(
input_specs=backbone.output_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
head_config = model_config.head
generator_config = model_config.detection_generator
num_anchors_per_location = (
len(model_config.anchor.aspect_ratios) * model_config.anchor.num_scales)
head = dense_prediction_heads.RetinaNetHead(
min_level=model_config.min_level,
max_level=model_config.max_level,
num_classes=model_config.num_classes,
num_anchors_per_location=num_anchors_per_location,
num_convs=head_config.num_convs,
num_filters=head_config.num_filters,
attribute_heads=[
cfg.as_dict() for cfg in (head_config.attribute_heads or [])
],
share_classification_heads=head_config.share_classification_heads,
use_separable_conv=head_config.use_separable_conv,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer,
share_level_convs=head_config.share_level_convs,
)
# Builds decoder and head so that their trainable weights are initialized
if decoder:
decoder_features = decoder(backbone_features)
_ = head(decoder_features)
# Add `input_image_size` into `tflite_post_processing_config`.
tflite_post_processing_config = generator_config.tflite_post_processing.as_dict(
)
tflite_post_processing_config['input_image_size'] = (input_specs.shape[1],
input_specs.shape[2])
detection_generator_obj = detection_generator.MultilevelDetectionGenerator(
apply_nms=generator_config.apply_nms,
pre_nms_top_k=generator_config.pre_nms_top_k,
pre_nms_score_threshold=generator_config.pre_nms_score_threshold,
nms_iou_threshold=generator_config.nms_iou_threshold,
max_num_detections=generator_config.max_num_detections,
nms_version=generator_config.nms_version,
use_cpu_nms=generator_config.use_cpu_nms,
soft_nms_sigma=generator_config.soft_nms_sigma,
tflite_post_processing_config=tflite_post_processing_config,
return_decoded=generator_config.return_decoded,
use_class_agnostic_nms=generator_config.use_class_agnostic_nms,
)
model = retinanet_model.RetinaNetModel(
backbone,
decoder,
head,
detection_generator_obj,
min_level=model_config.min_level,
max_level=model_config.max_level,
num_scales=model_config.anchor.num_scales,
aspect_ratios=model_config.anchor.aspect_ratios,
anchor_size=model_config.anchor.anchor_size)
return model
def build_segmentation_model(
input_specs: tf.keras.layers.InputSpec,
model_config: segmentation_cfg.SemanticSegmentationModel,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
backbone: Optional[tf.keras.Model] = None,
decoder: Optional[tf.keras.Model] = None
) -> tf.keras.Model:
"""Builds Segmentation model."""
norm_activation_config = model_config.norm_activation
if not backbone:
backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=model_config.backbone,
norm_activation_config=norm_activation_config,
l2_regularizer=l2_regularizer)
if not decoder:
decoder = decoders.factory.build_decoder(
input_specs=backbone.output_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
head_config = model_config.head
head = segmentation_heads.SegmentationHead(
num_classes=model_config.num_classes,
level=head_config.level,
num_convs=head_config.num_convs,
prediction_kernel_size=head_config.prediction_kernel_size,
num_filters=head_config.num_filters,
use_depthwise_convolution=head_config.use_depthwise_convolution,
upsample_factor=head_config.upsample_factor,
feature_fusion=head_config.feature_fusion,
low_level=head_config.low_level,
low_level_num_filters=head_config.low_level_num_filters,
activation=norm_activation_config.activation,
logit_activation=head_config.logit_activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
mask_scoring_head = None
if model_config.mask_scoring_head:
mask_scoring_head = segmentation_heads.MaskScoring(
num_classes=model_config.num_classes,
**model_config.mask_scoring_head.as_dict(),
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
model = segmentation_model.SegmentationModel(
backbone, decoder, head, mask_scoring_head=mask_scoring_head)
return model
| 17,496 | 42.633416 | 82 | py |
models | models-master/official/vision/modeling/factory_3d.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory methods to build models."""
# Import libraries
import tensorflow as tf
from official.core import registry
from official.vision.configs import video_classification as video_classification_cfg
from official.vision.modeling import video_classification_model
from official.vision.modeling import backbones
_REGISTERED_MODEL_CLS = {}
def register_model_builder(key: str):
"""Decorates a builder of model class.
The builder should be a Callable (a class or a function).
This decorator supports registration of backbone builder as follows:
```
class MyModel(tf.keras.Model):
pass
@register_backbone_builder('mybackbone')
def builder(input_specs, config, l2_reg):
return MyModel(...)
# Builds a MyModel object.
my_backbone = build_backbone_3d(input_specs, config, l2_reg)
```
Args:
key: the key to look up the builder.
Returns:
A callable for use as class decorator that registers the decorated class
for creation from an instance of model class.
"""
return registry.register(_REGISTERED_MODEL_CLS, key)
def build_model(
model_type: str,
input_specs: tf.keras.layers.InputSpec,
model_config: video_classification_cfg.hyperparams.Config,
num_classes: int,
l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model:
"""Builds backbone from a config.
Args:
model_type: string name of model type. It should be consistent with
ModelConfig.model_type.
input_specs: tf.keras.layers.InputSpec.
model_config: a OneOfConfig. Model config.
num_classes: number of classes.
l2_regularizer: tf.keras.regularizers.Regularizer instance. Default to None.
Returns:
tf.keras.Model instance of the backbone.
"""
model_builder = registry.lookup(_REGISTERED_MODEL_CLS, model_type)
return model_builder(input_specs, model_config, num_classes, l2_regularizer)
@register_model_builder('video_classification')
def build_video_classification_model(
input_specs: tf.keras.layers.InputSpec,
model_config: video_classification_cfg.VideoClassificationModel,
num_classes: int,
l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model:
"""Builds the video classification model."""
input_specs_dict = {'image': input_specs}
norm_activation_config = model_config.norm_activation
backbone = backbones.factory.build_backbone(
input_specs=input_specs,
backbone_config=model_config.backbone,
norm_activation_config=norm_activation_config,
l2_regularizer=l2_regularizer)
model = video_classification_model.VideoClassificationModel(
backbone=backbone,
num_classes=num_classes,
input_specs=input_specs_dict,
dropout_rate=model_config.dropout_rate,
aggregate_endpoints=model_config.aggregate_endpoints,
kernel_regularizer=l2_regularizer,
require_endpoints=model_config.require_endpoints)
return model
| 3,530 | 32.951923 | 84 | py |
models | models-master/official/vision/modeling/video_classification_model_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for video classification network."""
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.vision.modeling import backbones
from official.vision.modeling import video_classification_model
class VideoClassificationNetworkTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(50, 8, 112, 'relu', False),
(50, 8, 112, 'swish', True),
)
def test_resnet3d_network_creation(self, model_id, temporal_size,
spatial_size, activation,
aggregate_endpoints):
"""Test for creation of a ResNet3D-50 classifier."""
input_specs = tf.keras.layers.InputSpec(
shape=[None, temporal_size, spatial_size, spatial_size, 3])
temporal_strides = [1, 1, 1, 1]
temporal_kernel_sizes = [(3, 3, 3), (3, 1, 3, 1), (3, 1, 3, 1, 3, 1),
(1, 3, 1)]
tf.keras.backend.set_image_data_format('channels_last')
backbone = backbones.ResNet3D(
model_id=model_id,
temporal_strides=temporal_strides,
temporal_kernel_sizes=temporal_kernel_sizes,
input_specs=input_specs,
activation=activation)
num_classes = 1000
model = video_classification_model.VideoClassificationModel(
backbone=backbone,
num_classes=num_classes,
input_specs={'image': input_specs},
dropout_rate=0.2,
aggregate_endpoints=aggregate_endpoints,
)
inputs = np.random.rand(2, temporal_size, spatial_size, spatial_size, 3)
logits = model(inputs)
self.assertAllEqual([2, num_classes], logits.numpy().shape)
def test_serialize_deserialize(self):
"""Validate the classification network can be serialized and deserialized."""
model_id = 50
temporal_strides = [1, 1, 1, 1]
temporal_kernel_sizes = [(3, 3, 3), (3, 1, 3, 1), (3, 1, 3, 1, 3, 1),
(1, 3, 1)]
backbone = backbones.ResNet3D(
model_id=model_id,
temporal_strides=temporal_strides,
temporal_kernel_sizes=temporal_kernel_sizes)
model = video_classification_model.VideoClassificationModel(
backbone=backbone, num_classes=1000)
config = model.get_config()
new_model = video_classification_model.VideoClassificationModel.from_config(
config)
# Validate that the config can be forced to JSON.
_ = new_model.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(model.get_config(), new_model.get_config())
if __name__ == '__main__':
tf.test.main()
| 3,261 | 34.456522 | 81 | py |
models | models-master/official/vision/modeling/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modeling package definition."""
from official.vision.modeling import backbones
from official.vision.modeling import decoders
from official.vision.modeling import heads
from official.vision.modeling import layers
from official.vision.modeling import models
| 869 | 38.545455 | 74 | py |
models | models-master/official/vision/modeling/segmentation_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build segmentation models."""
from typing import Any, Mapping, Union, Optional, Dict
# Import libraries
import tensorflow as tf
layers = tf.keras.layers
@tf.keras.utils.register_keras_serializable(package='Vision')
class SegmentationModel(tf.keras.Model):
"""A Segmentation class model.
Input images are passed through backbone first. Decoder network is then
applied, and finally, segmentation head is applied on the output of the
decoder network. Layers such as ASPP should be part of decoder. Any feature
fusion is done as part of the segmentation head (i.e. deeplabv3+ feature
fusion is not part of the decoder, instead it is part of the segmentation
head). This way, different feature fusion techniques can be combined with
different backbones, and decoders.
"""
def __init__(self, backbone: tf.keras.Model, decoder: tf.keras.Model,
head: tf.keras.layers.Layer,
mask_scoring_head: Optional[tf.keras.layers.Layer] = None,
**kwargs):
"""Segmentation initialization function.
Args:
backbone: a backbone network.
decoder: a decoder network. E.g. FPN.
head: segmentation head.
mask_scoring_head: mask scoring head.
**kwargs: keyword arguments to be passed.
"""
super(SegmentationModel, self).__init__(**kwargs)
self._config_dict = {
'backbone': backbone,
'decoder': decoder,
'head': head,
'mask_scoring_head': mask_scoring_head,
}
self.backbone = backbone
self.decoder = decoder
self.head = head
self.mask_scoring_head = mask_scoring_head
def call(self, inputs: tf.Tensor, training: bool = None # pytype: disable=signature-mismatch # overriding-parameter-count-checks
) -> Dict[str, tf.Tensor]:
backbone_features = self.backbone(inputs)
if self.decoder:
decoder_features = self.decoder(backbone_features)
else:
decoder_features = backbone_features
logits = self.head((backbone_features, decoder_features))
outputs = {'logits': logits}
if self.mask_scoring_head:
mask_scores = self.mask_scoring_head(logits)
outputs.update({'mask_scores': mask_scores})
return outputs
@property
def checkpoint_items(
self) -> Mapping[str, Union[tf.keras.Model, tf.keras.layers.Layer]]:
"""Returns a dictionary of items to be additionally checkpointed."""
items = dict(backbone=self.backbone, head=self.head)
if self.decoder is not None:
items.update(decoder=self.decoder)
if self.mask_scoring_head is not None:
items.update(mask_scoring_head=self.mask_scoring_head)
return items
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 3,428 | 35.094737 | 132 | py |
models | models-master/official/vision/modeling/models/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models under Vision package."""
from official.vision.modeling.classification_model import ClassificationModel
from official.vision.modeling.maskrcnn_model import MaskRCNNModel
from official.vision.modeling.retinanet_model import RetinaNetModel
from official.vision.modeling.segmentation_model import SegmentationModel
from official.vision.modeling.video_classification_model import VideoClassificationModel
| 1,020 | 45.409091 | 88 | py |
models | models-master/official/vision/modeling/decoders/aspp.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of Atrous Spatial Pyramid Pooling (ASPP) decoder."""
from typing import Any, List, Mapping, Optional, Union
# Import libraries
import tensorflow as tf
from official.modeling import hyperparams
from official.vision.modeling.decoders import factory
from official.vision.modeling.layers import deeplab
from official.vision.modeling.layers import nn_layers
TensorMapUnion = Union[tf.Tensor, Mapping[str, tf.Tensor]]
@tf.keras.utils.register_keras_serializable(package='Vision')
class ASPP(tf.keras.layers.Layer):
"""Creates an Atrous Spatial Pyramid Pooling (ASPP) layer."""
def __init__(
self,
level: int,
dilation_rates: List[int],
num_filters: int = 256,
pool_kernel_size: Optional[int] = None,
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
activation: str = 'relu',
dropout_rate: float = 0.0,
kernel_initializer: str = 'VarianceScaling',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
interpolation: str = 'bilinear',
use_depthwise_convolution: bool = False,
spp_layer_version: str = 'v1',
output_tensor: bool = False,
**kwargs):
"""Initializes an Atrous Spatial Pyramid Pooling (ASPP) layer.
Args:
level: An `int` level to apply ASPP.
dilation_rates: A `list` of dilation rates.
num_filters: An `int` number of output filters in ASPP.
pool_kernel_size: A `list` of [height, width] of pooling kernel size or
None. Pooling size is with respect to original image size, it will be
scaled down by 2**level. If None, global average pooling is used.
use_sync_bn: A `bool`. If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
activation: A `str` activation to be used in ASPP.
dropout_rate: A `float` rate for dropout regularization.
kernel_initializer: A `str` name of kernel_initializer for convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
interpolation: A `str` of interpolation method. It should be one of
`bilinear`, `nearest`, `bicubic`, `area`, `lanczos3`, `lanczos5`,
`gaussian`, or `mitchellcubic`.
use_depthwise_convolution: If True depthwise separable convolutions will
be added to the Atrous spatial pyramid pooling.
spp_layer_version: A `str` of spatial pyramid pooling layer version.
output_tensor: Whether to output a single tensor or a dictionary of tensor.
Default is false.
**kwargs: Additional keyword arguments to be passed.
"""
super().__init__(**kwargs)
self._config_dict = {
'level': level,
'dilation_rates': dilation_rates,
'num_filters': num_filters,
'pool_kernel_size': pool_kernel_size,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'activation': activation,
'dropout_rate': dropout_rate,
'kernel_initializer': kernel_initializer,
'kernel_regularizer': kernel_regularizer,
'interpolation': interpolation,
'use_depthwise_convolution': use_depthwise_convolution,
'spp_layer_version': spp_layer_version,
'output_tensor': output_tensor
}
self._aspp_layer = deeplab.SpatialPyramidPooling if self._config_dict[
'spp_layer_version'] == 'v1' else nn_layers.SpatialPyramidPooling
def build(self, input_shape):
pool_kernel_size = None
if self._config_dict['pool_kernel_size']:
pool_kernel_size = [
int(p_size // 2**self._config_dict['level'])
for p_size in self._config_dict['pool_kernel_size'] # pytype: disable=attribute-error # trace-all-classes
]
self.aspp = self._aspp_layer(
output_channels=self._config_dict['num_filters'],
dilation_rates=self._config_dict['dilation_rates'],
pool_kernel_size=pool_kernel_size,
use_sync_bn=self._config_dict['use_sync_bn'],
batchnorm_momentum=self._config_dict['norm_momentum'],
batchnorm_epsilon=self._config_dict['norm_epsilon'],
activation=self._config_dict['activation'],
dropout=self._config_dict['dropout_rate'],
kernel_initializer=self._config_dict['kernel_initializer'],
kernel_regularizer=self._config_dict['kernel_regularizer'],
interpolation=self._config_dict['interpolation'],
use_depthwise_convolution=self._config_dict['use_depthwise_convolution']
)
def call(self, inputs: TensorMapUnion) -> TensorMapUnion:
"""Calls the Atrous Spatial Pyramid Pooling (ASPP) layer on an input.
The output of ASPP will be a dict of {`level`, `tf.Tensor`} even if only one
level is present, if output_tensor is false. Hence, this will be compatible
with the rest of the segmentation model interfaces.
If output_tensor is true, a single tensot is output.
Args:
inputs: A `tf.Tensor` of shape [batch, height_l, width_l, filter_size] or
a `dict` of `tf.Tensor` where
- key: A `str` of the level of the multilevel feature maps.
- values: A `tf.Tensor` of shape [batch, height_l, width_l,
filter_size].
Returns:
A `tf.Tensor` of shape [batch, height_l, width_l, filter_size] or a `dict`
of `tf.Tensor` where
- key: A `str` of the level of the multilevel feature maps.
- values: A `tf.Tensor` of output of ASPP module.
"""
outputs = {}
level = str(self._config_dict['level'])
backbone_output = inputs[level] if isinstance(inputs, dict) else inputs
outputs = self.aspp(backbone_output)
return outputs if self._config_dict['output_tensor'] else {level: outputs}
def get_config(self) -> Mapping[str, Any]:
base_config = super().get_config()
return dict(list(base_config.items()) + list(self._config_dict.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@factory.register_decoder_builder('aspp')
def build_aspp_decoder(
input_specs: Mapping[str, tf.TensorShape],
model_config: hyperparams.Config,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None
) -> tf.keras.Model:
"""Builds ASPP decoder from a config.
Args:
input_specs: A `dict` of input specifications. A dictionary consists of
{level: TensorShape} from a backbone. Note this is for consistent
interface, and is not used by ASPP decoder.
model_config: A OneOfConfig. Model config.
l2_regularizer: A `tf.keras.regularizers.Regularizer` instance. Default to
None.
Returns:
A `tf.keras.Model` instance of the ASPP decoder.
Raises:
ValueError: If the model_config.decoder.type is not `aspp`.
"""
del input_specs # input_specs is not used by ASPP decoder.
decoder_type = model_config.decoder.type
decoder_cfg = model_config.decoder.get()
if decoder_type != 'aspp':
raise ValueError(f'Inconsistent decoder type {decoder_type}. '
'Need to be `aspp`.')
norm_activation_config = model_config.norm_activation
return ASPP(
level=decoder_cfg.level,
dilation_rates=decoder_cfg.dilation_rates,
num_filters=decoder_cfg.num_filters,
use_depthwise_convolution=decoder_cfg.use_depthwise_convolution,
pool_kernel_size=decoder_cfg.pool_kernel_size,
dropout_rate=decoder_cfg.dropout_rate,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
activation=norm_activation_config.activation,
kernel_regularizer=l2_regularizer,
spp_layer_version=decoder_cfg.spp_layer_version,
output_tensor=decoder_cfg.output_tensor)
| 8,600 | 41.161765 | 117 | py |
models | models-master/official/vision/modeling/decoders/aspp_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for aspp."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.vision.modeling.backbones import resnet
from official.vision.modeling.decoders import aspp
class ASPPTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(3, [6, 12, 18, 24], 128, 'v1'),
(3, [6, 12, 18], 128, 'v1'),
(3, [6, 12], 256, 'v1'),
(4, [6, 12, 18, 24], 128, 'v2'),
(4, [6, 12, 18], 128, 'v2'),
(4, [6, 12], 256, 'v2'),
)
def test_network_creation(self, level, dilation_rates, num_filters,
spp_layer_version):
"""Test creation of ASPP."""
input_size = 256
tf.keras.backend.set_image_data_format('channels_last')
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
backbone = resnet.ResNet(model_id=50)
network = aspp.ASPP(
level=level,
dilation_rates=dilation_rates,
num_filters=num_filters,
spp_layer_version=spp_layer_version)
endpoints = backbone(inputs)
feats = network(endpoints)
self.assertIn(str(level), feats)
self.assertAllEqual(
[1, input_size // 2**level, input_size // 2**level, num_filters],
feats[str(level)].shape.as_list())
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
level=3,
dilation_rates=[6, 12],
num_filters=256,
pool_kernel_size=None,
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
activation='relu',
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
interpolation='bilinear',
dropout_rate=0.2,
use_depthwise_convolution='false',
spp_layer_version='v1',
output_tensor=False,
dtype='float32',
name='aspp',
trainable=True)
network = aspp.ASPP(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = aspp.ASPP.from_config(network.get_config())
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 3,001 | 30.93617 | 79 | py |
models | models-master/official/vision/modeling/decoders/factory_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for decoder factory functions."""
from absl.testing import parameterized
import tensorflow as tf
from tensorflow.python.distribute import combinations
from official.vision import configs
from official.vision.configs import decoders as decoders_cfg
from official.vision.modeling import decoders
from official.vision.modeling.decoders import factory
class FactoryTest(tf.test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
num_filters=[128, 256], use_separable_conv=[True, False]))
def test_fpn_decoder_creation(self, num_filters, use_separable_conv):
"""Test creation of FPN decoder."""
min_level = 3
max_level = 7
input_specs = {}
for level in range(min_level, max_level):
input_specs[str(level)] = tf.TensorShape(
[1, 128 // (2**level), 128 // (2**level), 3])
network = decoders.FPN(
input_specs=input_specs,
num_filters=num_filters,
use_separable_conv=use_separable_conv,
use_sync_bn=True)
model_config = configs.retinanet.RetinaNet()
model_config.min_level = min_level
model_config.max_level = max_level
model_config.num_classes = 10
model_config.input_size = [None, None, 3]
model_config.decoder = decoders_cfg.Decoder(
type='fpn',
fpn=decoders_cfg.FPN(
num_filters=num_filters, use_separable_conv=use_separable_conv))
factory_network = factory.build_decoder(
input_specs=input_specs, model_config=model_config)
network_config = network.get_config()
factory_network_config = factory_network.get_config()
self.assertEqual(network_config, factory_network_config)
@combinations.generate(
combinations.combine(
num_filters=[128, 256],
num_repeats=[3, 5],
use_separable_conv=[True, False]))
def test_nasfpn_decoder_creation(self, num_filters, num_repeats,
use_separable_conv):
"""Test creation of NASFPN decoder."""
min_level = 3
max_level = 7
input_specs = {}
for level in range(min_level, max_level):
input_specs[str(level)] = tf.TensorShape(
[1, 128 // (2**level), 128 // (2**level), 3])
network = decoders.NASFPN(
input_specs=input_specs,
num_filters=num_filters,
num_repeats=num_repeats,
use_separable_conv=use_separable_conv,
use_sync_bn=True)
model_config = configs.retinanet.RetinaNet()
model_config.min_level = min_level
model_config.max_level = max_level
model_config.num_classes = 10
model_config.input_size = [None, None, 3]
model_config.decoder = decoders_cfg.Decoder(
type='nasfpn',
nasfpn=decoders_cfg.NASFPN(
num_filters=num_filters,
num_repeats=num_repeats,
use_separable_conv=use_separable_conv))
factory_network = factory.build_decoder(
input_specs=input_specs, model_config=model_config)
network_config = network.get_config()
factory_network_config = factory_network.get_config()
self.assertEqual(network_config, factory_network_config)
@combinations.generate(
combinations.combine(
level=[3, 4],
dilation_rates=[[6, 12, 18], [6, 12]],
num_filters=[128, 256]))
def test_aspp_decoder_creation(self, level, dilation_rates, num_filters):
"""Test creation of ASPP decoder."""
input_specs = {'1': tf.TensorShape([1, 128, 128, 3])}
network = decoders.ASPP(
level=level,
dilation_rates=dilation_rates,
num_filters=num_filters,
use_sync_bn=True)
model_config = configs.semantic_segmentation.SemanticSegmentationModel()
model_config.num_classes = 10
model_config.input_size = [None, None, 3]
model_config.decoder = decoders_cfg.Decoder(
type='aspp',
aspp=decoders_cfg.ASPP(
level=level, dilation_rates=dilation_rates,
num_filters=num_filters))
factory_network = factory.build_decoder(
input_specs=input_specs, model_config=model_config)
network_config = network.get_config()
factory_network_config = factory_network.get_config()
# Due to calling `super().get_config()` in aspp layer, everything but the
# the name of two layer instances are the same, so we force equal name so it
# will not give false alarm.
factory_network_config['name'] = network_config['name']
self.assertEqual(network_config, factory_network_config)
def test_identity_decoder_creation(self):
"""Test creation of identity decoder."""
model_config = configs.retinanet.RetinaNet()
model_config.num_classes = 2
model_config.input_size = [None, None, 3]
model_config.decoder = decoders_cfg.Decoder(
type='identity', identity=decoders_cfg.Identity())
factory_network = factory.build_decoder(
input_specs=None, model_config=model_config)
self.assertIsNone(factory_network)
if __name__ == '__main__':
tf.test.main()
| 5,621 | 34.1375 | 80 | py |
models | models-master/official/vision/modeling/decoders/fpn_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for FPN."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.vision.modeling.backbones import mobilenet
from official.vision.modeling.backbones import resnet
from official.vision.modeling.decoders import fpn
class FPNTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(256, 3, 7, False, False, 'sum'),
(256, 3, 7, False, True, 'sum'),
(256, 3, 7, True, False, 'concat'),
(256, 3, 7, True, True, 'concat'),
)
def test_network_creation(self, input_size, min_level, max_level,
use_separable_conv, use_keras_layer, fusion_type):
"""Test creation of FPN."""
tf.keras.backend.set_image_data_format('channels_last')
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
backbone = resnet.ResNet(model_id=50)
network = fpn.FPN(
input_specs=backbone.output_specs,
min_level=min_level,
max_level=max_level,
fusion_type=fusion_type,
use_separable_conv=use_separable_conv,
use_keras_layer=use_keras_layer)
endpoints = backbone(inputs)
feats = network(endpoints)
for level in range(min_level, max_level + 1):
self.assertIn(str(level), feats)
self.assertAllEqual(
[1, input_size // 2**level, input_size // 2**level, 256],
feats[str(level)].shape.as_list())
@parameterized.parameters(
(256, 3, 7, False, False),
(256, 3, 7, False, True),
(256, 3, 7, True, False),
(256, 3, 7, True, True),
)
def test_network_creation_with_mobilenet(self, input_size, min_level,
max_level, use_separable_conv,
use_keras_layer):
"""Test creation of FPN with mobilenet backbone."""
tf.keras.backend.set_image_data_format('channels_last')
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
backbone = mobilenet.MobileNet(model_id='MobileNetV2')
network = fpn.FPN(
input_specs=backbone.output_specs,
min_level=min_level,
max_level=max_level,
use_separable_conv=use_separable_conv,
use_keras_layer=use_keras_layer)
endpoints = backbone(inputs)
feats = network(endpoints)
for level in range(min_level, max_level + 1):
self.assertIn(str(level), feats)
self.assertAllEqual(
[1, input_size // 2**level, input_size // 2**level, 256],
feats[str(level)].shape.as_list())
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
input_specs=resnet.ResNet(model_id=50).output_specs,
min_level=3,
max_level=7,
num_filters=256,
fusion_type='sum',
use_separable_conv=False,
use_keras_layer=False,
use_sync_bn=False,
activation='relu',
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
)
network = fpn.FPN(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = fpn.FPN.from_config(network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 4,274 | 33.2 | 79 | py |
models | models-master/official/vision/modeling/decoders/nasfpn_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for NAS-FPN."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.vision.modeling.backbones import resnet
from official.vision.modeling.decoders import nasfpn
class NASFPNTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(256, 3, 7, False),
(256, 3, 7, True),
)
def test_network_creation(self, input_size, min_level, max_level,
use_separable_conv):
"""Test creation of NAS-FPN."""
tf.keras.backend.set_image_data_format('channels_last')
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
num_filters = 256
backbone = resnet.ResNet(model_id=50)
network = nasfpn.NASFPN(
input_specs=backbone.output_specs,
min_level=min_level,
max_level=max_level,
num_filters=num_filters,
use_separable_conv=use_separable_conv)
endpoints = backbone(inputs)
feats = network(endpoints)
for level in range(min_level, max_level + 1):
self.assertIn(str(level), feats)
self.assertAllEqual(
[1, input_size // 2**level, input_size // 2**level, num_filters],
feats[str(level)].shape.as_list())
if __name__ == '__main__':
tf.test.main()
| 1,887 | 31 | 76 | py |
models | models-master/official/vision/modeling/decoders/nasfpn.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of NAS-FPN."""
from typing import Any, List, Mapping, Optional, Tuple
# Import libraries
from absl import logging
import tensorflow as tf
from official.modeling import hyperparams
from official.modeling import tf_utils
from official.vision.modeling.decoders import factory
from official.vision.ops import spatial_transform_ops
# The fixed NAS-FPN architecture discovered by NAS.
# Each element represents a specification of a building block:
# (block_level, combine_fn, (input_offset0, input_offset1), is_output).
NASFPN_BLOCK_SPECS = [
(4, 'attention', (1, 3), False),
(4, 'sum', (1, 5), False),
(3, 'sum', (0, 6), True),
(4, 'sum', (6, 7), True),
(5, 'attention', (7, 8), True),
(7, 'attention', (6, 9), True),
(6, 'attention', (9, 10), True),
]
class BlockSpec():
"""A container class that specifies the block configuration for NAS-FPN."""
def __init__(self, level: int, combine_fn: str,
input_offsets: Tuple[int, int], is_output: bool):
self.level = level
self.combine_fn = combine_fn
self.input_offsets = input_offsets
self.is_output = is_output
def build_block_specs(
block_specs: Optional[List[Tuple[Any, ...]]] = None) -> List[BlockSpec]:
"""Builds the list of BlockSpec objects for NAS-FPN."""
if not block_specs:
block_specs = NASFPN_BLOCK_SPECS
logging.info('Building NAS-FPN block specs: %s', block_specs)
return [BlockSpec(*b) for b in block_specs]
@tf.keras.utils.register_keras_serializable(package='Vision')
class NASFPN(tf.keras.Model):
"""Creates a NAS-FPN model.
This implements the paper:
Golnaz Ghiasi, Tsung-Yi Lin, Ruoming Pang, Quoc V. Le.
NAS-FPN: Learning Scalable Feature Pyramid Architecture for Object Detection.
(https://arxiv.org/abs/1904.07392)
"""
def __init__(
self,
input_specs: Mapping[str, tf.TensorShape],
min_level: int = 3,
max_level: int = 7,
block_specs: Optional[List[BlockSpec]] = None,
num_filters: int = 256,
num_repeats: int = 5,
use_separable_conv: bool = False,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_initializer: str = 'VarianceScaling',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initializes a NAS-FPN model.
Args:
input_specs: A `dict` of input specifications. A dictionary consists of
{level: TensorShape} from a backbone.
min_level: An `int` of minimum level in FPN output feature maps.
max_level: An `int` of maximum level in FPN output feature maps.
block_specs: a list of BlockSpec objects that specifies the NAS-FPN
network topology. By default, the previously discovered architecture is
used.
num_filters: An `int` number of filters in FPN layers.
num_repeats: number of repeats for feature pyramid network.
use_separable_conv: A `bool`. If True use separable convolution for
convolution in FPN layers.
activation: A `str` name of the activation function.
use_sync_bn: A `bool`. If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_initializer: A `str` name of kernel_initializer for convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
**kwargs: Additional keyword arguments to be passed.
"""
self._config_dict = {
'input_specs': input_specs,
'min_level': min_level,
'max_level': max_level,
'num_filters': num_filters,
'num_repeats': num_repeats,
'use_separable_conv': use_separable_conv,
'activation': activation,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'kernel_initializer': kernel_initializer,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
}
self._min_level = min_level
self._max_level = max_level
self._block_specs = (
build_block_specs() if block_specs is None else block_specs
)
self._num_repeats = num_repeats
self._conv_op = (tf.keras.layers.SeparableConv2D
if self._config_dict['use_separable_conv']
else tf.keras.layers.Conv2D)
self._norm_op = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._norm_kwargs = {
'axis': self._bn_axis,
'momentum': self._config_dict['norm_momentum'],
'epsilon': self._config_dict['norm_epsilon'],
'synchronized': self._config_dict['use_sync_bn'],
}
self._activation = tf_utils.get_activation(activation)
# Gets input feature pyramid from backbone.
inputs = self._build_input_pyramid(input_specs, min_level)
# Projects the input features.
feats = []
for level in range(self._min_level, self._max_level + 1):
if str(level) in inputs.keys():
feats.append(self._resample_feature_map(
inputs[str(level)], level, level, self._config_dict['num_filters']))
else:
feats.append(self._resample_feature_map(
feats[-1], level - 1, level, self._config_dict['num_filters']))
# Repeatly builds the NAS-FPN modules.
for _ in range(self._num_repeats):
output_feats = self._build_feature_pyramid(feats)
feats = [output_feats[level]
for level in range(self._min_level, self._max_level + 1)]
self._output_specs = {
str(level): output_feats[level].get_shape()
for level in range(min_level, max_level + 1)
}
output_feats = {str(level): output_feats[level]
for level in output_feats.keys()}
super(NASFPN, self).__init__(inputs=inputs, outputs=output_feats, **kwargs)
def _build_input_pyramid(self, input_specs: Mapping[str, tf.TensorShape],
min_level: int):
assert isinstance(input_specs, dict)
if min(input_specs.keys()) > str(min_level):
raise ValueError(
'Backbone min level should be less or equal to FPN min level')
inputs = {}
for level, spec in input_specs.items():
inputs[level] = tf.keras.Input(shape=spec[1:])
return inputs
def _resample_feature_map(self,
inputs,
input_level,
target_level,
target_num_filters=256):
x = inputs
_, _, _, input_num_filters = x.get_shape().as_list()
if input_num_filters != target_num_filters:
x = self._conv_op(
filters=target_num_filters,
kernel_size=1,
padding='same',
**self._conv_kwargs)(x)
x = self._norm_op(**self._norm_kwargs)(x)
if input_level < target_level:
stride = int(2 ** (target_level - input_level))
return tf.keras.layers.MaxPool2D(
pool_size=stride, strides=stride, padding='same')(x)
if input_level > target_level:
scale = int(2 ** (input_level - target_level))
return spatial_transform_ops.nearest_upsampling(x, scale=scale)
# Force output x to be the same dtype as mixed precision policy. This avoids
# dtype mismatch when one input (by default float32 dtype) does not meet all
# the above conditions and is output unchanged, while other inputs are
# processed to have different dtype, e.g., using bfloat16 on TPU.
compute_dtype = tf.keras.layers.Layer().dtype_policy.compute_dtype
if (compute_dtype is not None) and (x.dtype != compute_dtype):
return tf.cast(x, dtype=compute_dtype)
else:
return x
@property
def _conv_kwargs(self):
if self._config_dict['use_separable_conv']:
return {
'depthwise_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'pointwise_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'bias_initializer': tf.zeros_initializer(),
'depthwise_regularizer': self._config_dict['kernel_regularizer'],
'pointwise_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
}
else:
return {
'kernel_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'bias_initializer': tf.zeros_initializer(),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
}
def _global_attention(self, feat0, feat1):
m = tf.math.reduce_max(feat0, axis=[1, 2], keepdims=True)
m = tf.math.sigmoid(m)
return feat0 + feat1 * m
def _build_feature_pyramid(self, feats):
num_output_connections = [0] * len(feats)
num_output_levels = self._max_level - self._min_level + 1
feat_levels = list(range(self._min_level, self._max_level + 1))
for i, block_spec in enumerate(self._block_specs):
new_level = block_spec.level
# Checks the range of input_offsets.
for input_offset in block_spec.input_offsets:
if input_offset >= len(feats):
raise ValueError(
'input_offset ({}) is larger than num feats({})'.format(
input_offset, len(feats)))
input0 = block_spec.input_offsets[0]
input1 = block_spec.input_offsets[1]
# Update graph with inputs.
node0 = feats[input0]
node0_level = feat_levels[input0]
num_output_connections[input0] += 1
node0 = self._resample_feature_map(node0, node0_level, new_level)
node1 = feats[input1]
node1_level = feat_levels[input1]
num_output_connections[input1] += 1
node1 = self._resample_feature_map(node1, node1_level, new_level)
# Combine node0 and node1 to create new feat.
if block_spec.combine_fn == 'sum':
new_node = node0 + node1
elif block_spec.combine_fn == 'attention':
if node0_level >= node1_level:
new_node = self._global_attention(node0, node1)
else:
new_node = self._global_attention(node1, node0)
else:
raise ValueError('unknown combine_fn `{}`.'
.format(block_spec.combine_fn))
# Add intermediate nodes that do not have any connections to output.
if block_spec.is_output:
for j, (feat, feat_level, num_output) in enumerate(
zip(feats, feat_levels, num_output_connections)):
if num_output == 0 and feat_level == new_level:
num_output_connections[j] += 1
feat_ = self._resample_feature_map(feat, feat_level, new_level)
new_node += feat_
new_node = self._activation(new_node)
new_node = self._conv_op(
filters=self._config_dict['num_filters'],
kernel_size=(3, 3),
padding='same',
**self._conv_kwargs)(new_node)
new_node = self._norm_op(**self._norm_kwargs)(new_node)
feats.append(new_node)
feat_levels.append(new_level)
num_output_connections.append(0)
output_feats = {}
for i in range(len(feats) - num_output_levels, len(feats)):
level = feat_levels[i]
output_feats[level] = feats[i]
logging.info('Output feature pyramid: %s', output_feats)
return output_feats
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self) -> Mapping[str, tf.TensorShape]:
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_decoder_builder('nasfpn')
def build_nasfpn_decoder(
input_specs: Mapping[str, tf.TensorShape],
model_config: hyperparams.Config,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None
) -> tf.keras.Model:
"""Builds NASFPN decoder from a config.
Args:
input_specs: A `dict` of input specifications. A dictionary consists of
{level: TensorShape} from a backbone.
model_config: A OneOfConfig. Model config.
l2_regularizer: A `tf.keras.regularizers.Regularizer` instance. Default to
None.
Returns:
A `tf.keras.Model` instance of the NASFPN decoder.
Raises:
ValueError: If the model_config.decoder.type is not `nasfpn`.
"""
decoder_type = model_config.decoder.type
decoder_cfg = model_config.decoder.get()
if decoder_type != 'nasfpn':
raise ValueError(f'Inconsistent decoder type {decoder_type}. '
'Need to be `nasfpn`.')
norm_activation_config = model_config.norm_activation
return NASFPN(
input_specs=input_specs,
min_level=model_config.min_level,
max_level=model_config.max_level,
num_filters=decoder_cfg.num_filters,
num_repeats=decoder_cfg.num_repeats,
use_separable_conv=decoder_cfg.use_separable_conv,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
| 14,430 | 37.689008 | 80 | py |
models | models-master/official/vision/modeling/decoders/factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decoder registers and factory method.
One can register a new decoder model by the following two steps:
1 Import the factory and register the build in the decoder file.
2 Import the decoder class and add a build in __init__.py.
```
# my_decoder.py
from modeling.decoders import factory
class MyDecoder():
...
@factory.register_decoder_builder('my_decoder')
def build_my_decoder():
return MyDecoder()
# decoders/__init__.py adds import
from modeling.decoders.my_decoder import MyDecoder
```
If one wants the MyDecoder class to be used only by those binary
then don't imported the decoder module in decoders/__init__.py, but import it
in place that uses it.
"""
from typing import Any, Callable, Mapping, Optional, Union
# Import libraries
import tensorflow as tf
from official.core import registry
from official.modeling import hyperparams
_REGISTERED_DECODER_CLS = {}
def register_decoder_builder(key: str) -> Callable[..., Any]:
"""Decorates a builder of decoder class.
The builder should be a Callable (a class or a function).
This decorator supports registration of decoder builder as follows:
```
class MyDecoder(tf.keras.Model):
pass
@register_decoder_builder('mydecoder')
def builder(input_specs, config, l2_reg):
return MyDecoder(...)
# Builds a MyDecoder object.
my_decoder = build_decoder_3d(input_specs, config, l2_reg)
```
Args:
key: A `str` of key to look up the builder.
Returns:
A callable for using as class decorator that registers the decorated class
for creation from an instance of task_config_cls.
"""
return registry.register(_REGISTERED_DECODER_CLS, key)
@register_decoder_builder('identity')
def build_identity(
input_specs: Optional[Mapping[str, tf.TensorShape]] = None,
model_config: Optional[hyperparams.Config] = None,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None) -> None:
"""Builds identity decoder from a config.
All the input arguments are not used by identity decoder but kept here to
ensure the interface is consistent.
Args:
input_specs: A `dict` of input specifications. A dictionary consists of
{level: TensorShape} from a backbone.
model_config: A `OneOfConfig` of model config.
l2_regularizer: A `tf.keras.regularizers.Regularizer` object. Default to
None.
Returns:
An instance of the identity decoder.
"""
del input_specs, model_config, l2_regularizer # Unused by identity decoder.
def build_decoder(
input_specs: Mapping[str, tf.TensorShape],
model_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None,
**kwargs) -> Union[None, tf.keras.Model, tf.keras.layers.Layer]: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds decoder from a config.
A decoder can be a keras.Model, a keras.layers.Layer, or None. If it is not
None, the decoder will take features from the backbone as input and generate
decoded feature maps. If it is None, such as an identity decoder, the decoder
is skipped and features from the backbone are regarded as model output.
Args:
input_specs: A `dict` of input specifications. A dictionary consists of
{level: TensorShape} from a backbone.
model_config: A `OneOfConfig` of model config.
l2_regularizer: A `tf.keras.regularizers.Regularizer` object. Default to
None.
**kwargs: Additional keyword args to be passed to decoder builder.
Returns:
An instance of the decoder.
"""
decoder_builder = registry.lookup(_REGISTERED_DECODER_CLS,
model_config.decoder.type)
return decoder_builder(
input_specs=input_specs,
model_config=model_config,
l2_regularizer=l2_regularizer,
**kwargs)
| 4,387 | 31.264706 | 127 | py |
models | models-master/official/vision/modeling/decoders/fpn.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the definitions of Feature Pyramid Networks (FPN)."""
from typing import Any, Mapping, Optional
# Import libraries
from absl import logging
import tensorflow as tf
from official.modeling import hyperparams
from official.modeling import tf_utils
from official.vision.modeling.decoders import factory
from official.vision.ops import spatial_transform_ops
@tf.keras.utils.register_keras_serializable(package='Vision')
class FPN(tf.keras.Model):
"""Creates a Feature Pyramid Network (FPN).
This implements the paper:
Tsung-Yi Lin, Piotr Dollar, Ross Girshick, Kaiming He, Bharath Hariharan, and
Serge Belongie.
Feature Pyramid Networks for Object Detection.
(https://arxiv.org/pdf/1612.03144)
"""
def __init__(
self,
input_specs: Mapping[str, tf.TensorShape],
min_level: int = 3,
max_level: int = 7,
num_filters: int = 256,
fusion_type: str = 'sum',
use_separable_conv: bool = False,
use_keras_layer: bool = False,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_initializer: str = 'VarianceScaling',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initializes a Feature Pyramid Network (FPN).
Args:
input_specs: A `dict` of input specifications. A dictionary consists of
{level: TensorShape} from a backbone.
min_level: An `int` of minimum level in FPN output feature maps.
max_level: An `int` of maximum level in FPN output feature maps.
num_filters: An `int` number of filters in FPN layers.
fusion_type: A `str` of `sum` or `concat`. Whether performing sum or
concat for feature fusion.
use_separable_conv: A `bool`. If True use separable convolution for
convolution in FPN layers.
use_keras_layer: A `bool`. If Ture use keras layers as many as possible.
activation: A `str` name of the activation function.
use_sync_bn: A `bool`. If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_initializer: A `str` name of kernel_initializer for convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
**kwargs: Additional keyword arguments to be passed.
"""
self._config_dict = {
'input_specs': input_specs,
'min_level': min_level,
'max_level': max_level,
'num_filters': num_filters,
'fusion_type': fusion_type,
'use_separable_conv': use_separable_conv,
'use_keras_layer': use_keras_layer,
'activation': activation,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'kernel_initializer': kernel_initializer,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
}
if use_separable_conv:
conv2d = tf.keras.layers.SeparableConv2D
else:
conv2d = tf.keras.layers.Conv2D
norm = tf.keras.layers.BatchNormalization
activation_fn = tf_utils.get_activation(activation, use_keras_layer=True)
# Build input feature pyramid.
if tf.keras.backend.image_data_format() == 'channels_last':
bn_axis = -1
else:
bn_axis = 1
# Get input feature pyramid from backbone.
logging.info('FPN input_specs: %s', input_specs)
inputs = self._build_input_pyramid(input_specs, min_level)
backbone_max_level = min(int(max(inputs.keys())), max_level)
# Build lateral connections.
feats_lateral = {}
for level in range(min_level, backbone_max_level + 1):
feats_lateral[str(level)] = conv2d(
filters=num_filters,
kernel_size=1,
padding='same',
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name=f'lateral_{level}')(
inputs[str(level)])
# Build top-down path.
feats = {str(backbone_max_level): feats_lateral[str(backbone_max_level)]}
for level in range(backbone_max_level - 1, min_level - 1, -1):
feat_a = spatial_transform_ops.nearest_upsampling(
feats[str(level + 1)], 2, use_keras_layer=use_keras_layer)
feat_b = feats_lateral[str(level)]
if fusion_type == 'sum':
if use_keras_layer:
feats[str(level)] = tf.keras.layers.Add()([feat_a, feat_b])
else:
feats[str(level)] = feat_a + feat_b
elif fusion_type == 'concat':
if use_keras_layer:
feats[str(level)] = tf.keras.layers.Concatenate(axis=-1)(
[feat_a, feat_b])
else:
feats[str(level)] = tf.concat([feat_a, feat_b], axis=-1)
else:
raise ValueError('Fusion type {} not supported.'.format(fusion_type))
# TODO(fyangf): experiment with removing bias in conv2d.
# Build post-hoc 3x3 convolution kernel.
for level in range(min_level, backbone_max_level + 1):
feats[str(level)] = conv2d(
filters=num_filters,
strides=1,
kernel_size=3,
padding='same',
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name=f'post_hoc_{level}')(
feats[str(level)])
# TODO(fyangf): experiment with removing bias in conv2d.
# Build coarser FPN levels introduced for RetinaNet.
for level in range(backbone_max_level + 1, max_level + 1):
feats_in = feats[str(level - 1)]
if level > backbone_max_level + 1:
feats_in = activation_fn(feats_in)
feats[str(level)] = conv2d(
filters=num_filters,
strides=2,
kernel_size=3,
padding='same',
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name=f'coarser_{level}')(
feats_in)
# Apply batch norm layers.
for level in range(min_level, max_level + 1):
feats[str(level)] = norm(
axis=bn_axis,
momentum=norm_momentum,
epsilon=norm_epsilon,
synchronized=use_sync_bn,
name=f'norm_{level}')(
feats[str(level)])
self._output_specs = {
str(level): feats[str(level)].get_shape()
for level in range(min_level, max_level + 1)
}
super(FPN, self).__init__(inputs=inputs, outputs=feats, **kwargs)
def _build_input_pyramid(self, input_specs: Mapping[str, tf.TensorShape],
min_level: int):
assert isinstance(input_specs, dict)
if min(input_specs.keys()) > str(min_level):
raise ValueError(
'Backbone min level should be less or equal to FPN min level')
inputs = {}
for level, spec in input_specs.items():
inputs[level] = tf.keras.Input(shape=spec[1:])
return inputs
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self) -> Mapping[str, tf.TensorShape]:
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_decoder_builder('fpn')
def build_fpn_decoder(
input_specs: Mapping[str, tf.TensorShape],
model_config: hyperparams.Config,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None
) -> tf.keras.Model:
"""Builds FPN decoder from a config.
Args:
input_specs: A `dict` of input specifications. A dictionary consists of
{level: TensorShape} from a backbone.
model_config: A OneOfConfig. Model config.
l2_regularizer: A `tf.keras.regularizers.Regularizer` instance. Default to
None.
Returns:
A `tf.keras.Model` instance of the FPN decoder.
Raises:
ValueError: If the model_config.decoder.type is not `fpn`.
"""
decoder_type = model_config.decoder.type
decoder_cfg = model_config.decoder.get()
if decoder_type != 'fpn':
raise ValueError(f'Inconsistent decoder type {decoder_type}. '
'Need to be `fpn`.')
norm_activation_config = model_config.norm_activation
return FPN(
input_specs=input_specs,
min_level=model_config.min_level,
max_level=model_config.max_level,
num_filters=decoder_cfg.num_filters,
fusion_type=decoder_cfg.fusion_type,
use_separable_conv=decoder_cfg.use_separable_conv,
use_keras_layer=decoder_cfg.use_keras_layer,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
| 9,828 | 36.659004 | 80 | py |
models | models-master/official/vision/modeling/decoders/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decoders package definition."""
from official.vision.modeling.decoders.aspp import ASPP
from official.vision.modeling.decoders.fpn import FPN
from official.vision.modeling.decoders.nasfpn import NASFPN
| 815 | 39.8 | 74 | py |
models | models-master/official/vision/modeling/layers/deeplab_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ASPP."""
from absl.testing import parameterized
import tensorflow as tf
from official.vision.modeling.layers import deeplab
class DeeplabTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
(None,),
([32, 32],),
)
def test_aspp(self, pool_kernel_size):
del pool_kernel_size
inputs = tf.keras.Input(shape=(64, 64, 128), dtype=tf.float32)
layer = deeplab.SpatialPyramidPooling(output_channels=256,
dilation_rates=[6, 12, 18],
pool_kernel_size=None)
output = layer(inputs)
self.assertAllEqual([None, 64, 64, 256], output.shape)
def test_aspp_invalid_shape(self):
inputs = tf.keras.Input(shape=(64, 64), dtype=tf.float32)
layer = deeplab.SpatialPyramidPooling(output_channels=256,
dilation_rates=[6, 12, 18])
with self.assertRaises(ValueError):
_ = layer(inputs)
def test_config_with_custom_name(self):
layer = deeplab.SpatialPyramidPooling(256, [5], name='aspp')
config = layer.get_config()
layer_1 = deeplab.SpatialPyramidPooling.from_config(config)
self.assertEqual(layer_1.name, layer.name)
if __name__ == '__main__':
tf.test.main()
| 1,894 | 34.092593 | 74 | py |
models | models-master/official/vision/modeling/layers/roi_sampler.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of ROI sampler."""
from typing import Optional, Tuple, Union
# Import libraries
import tensorflow as tf
from official.vision.modeling.layers import box_sampler
from official.vision.ops import box_matcher
from official.vision.ops import iou_similarity
from official.vision.ops import target_gather
# The return type can be a tuple of 4 or 5 tf.Tensor.
ROISamplerReturnType = Union[
Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor],
Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]]
@tf.keras.utils.register_keras_serializable(package='Vision')
class ROISampler(tf.keras.layers.Layer):
"""Samples ROIs and assigns targets to the sampled ROIs."""
def __init__(self,
mix_gt_boxes: bool = True,
num_sampled_rois: int = 512,
foreground_fraction: float = 0.25,
foreground_iou_threshold: float = 0.5,
background_iou_high_threshold: float = 0.5,
background_iou_low_threshold: float = 0,
skip_subsampling: bool = False,
**kwargs):
"""Initializes a ROI sampler.
Args:
mix_gt_boxes: A `bool` of whether to mix the groundtruth boxes with
proposed ROIs.
num_sampled_rois: An `int` of the number of sampled ROIs per image.
foreground_fraction: A `float` in [0, 1], what percentage of proposed ROIs
should be sampled from the foreground boxes.
foreground_iou_threshold: A `float` that represents the IoU threshold for
a box to be considered as positive (if >= `foreground_iou_threshold`).
background_iou_high_threshold: A `float` that represents the IoU threshold
for a box to be considered as negative (if overlap in
[`background_iou_low_threshold`, `background_iou_high_threshold`]).
background_iou_low_threshold: A `float` that represents the IoU threshold
for a box to be considered as negative (if overlap in
[`background_iou_low_threshold`, `background_iou_high_threshold`])
skip_subsampling: a bool that determines if we want to skip the sampling
procedure than balances the fg/bg classes. Used for upper frcnn layers
in cascade RCNN.
**kwargs: Additional keyword arguments passed to Layer.
"""
self._config_dict = {
'mix_gt_boxes': mix_gt_boxes,
'num_sampled_rois': num_sampled_rois,
'foreground_fraction': foreground_fraction,
'foreground_iou_threshold': foreground_iou_threshold,
'background_iou_high_threshold': background_iou_high_threshold,
'background_iou_low_threshold': background_iou_low_threshold,
'skip_subsampling': skip_subsampling,
}
self._sim_calc = iou_similarity.IouSimilarity()
self._box_matcher = box_matcher.BoxMatcher(
thresholds=[
background_iou_low_threshold, background_iou_high_threshold,
foreground_iou_threshold
],
indicators=[-3, -1, -2, 1])
self._target_gather = target_gather.TargetGather()
self._sampler = box_sampler.BoxSampler(
num_sampled_rois, foreground_fraction)
super().__init__(**kwargs)
def call(
self,
boxes: tf.Tensor,
gt_boxes: tf.Tensor,
gt_classes: tf.Tensor,
gt_outer_boxes: Optional[tf.Tensor] = None) -> ROISamplerReturnType:
"""Assigns the proposals with groundtruth classes and performs subsmpling.
Given `proposed_boxes`, `gt_boxes`, and `gt_classes`, the function uses the
following algorithm to generate the final `num_samples_per_image` RoIs.
1. Calculates the IoU between each proposal box and each gt_boxes.
2. Assigns each proposed box with a groundtruth class and box by choosing
the largest IoU overlap.
3. Samples `num_samples_per_image` boxes from all proposed boxes, and
returns box_targets, class_targets, and RoIs.
Args:
boxes: A `tf.Tensor` of shape of [batch_size, N, 4]. N is the number of
proposals before groundtruth assignment. The last dimension is the
box coordinates w.r.t. the scaled images in [ymin, xmin, ymax, xmax]
format.
gt_boxes: A `tf.Tensor` of shape of [batch_size, MAX_NUM_INSTANCES, 4].
The coordinates of gt_boxes are in the pixel coordinates of the scaled
image. This tensor might have padding of values -1 indicating the
invalid box coordinates.
gt_classes: A `tf.Tensor` with a shape of [batch_size, MAX_NUM_INSTANCES].
This tensor might have paddings with values of -1 indicating the invalid
classes.
gt_outer_boxes: A `tf.Tensor` of shape of [batch_size, MAX_NUM_INSTANCES,
4]. The corrdinates of gt_outer_boxes are in the pixel coordinates of
the scaled image. This tensor might have padding of values -1 indicating
the invalid box coordinates. Ignored if not provided.
Returns:
sampled_rois: A `tf.Tensor` of shape of [batch_size, K, 4], representing
the coordinates of the sampled RoIs, where K is the number of the
sampled RoIs, i.e. K = num_samples_per_image.
sampled_gt_boxes: A `tf.Tensor` of shape of [batch_size, K, 4], storing
the box coordinates of the matched groundtruth boxes of the samples
RoIs.
sampled_gt_outer_boxes: A `tf.Tensor` of shape of [batch_size, K, 4],
storing the box coordinates of the matched groundtruth outer boxes of
the samples RoIs. This field is missing if gt_outer_boxes is None.
sampled_gt_classes: A `tf.Tensor` of shape of [batch_size, K], storing the
classes of the matched groundtruth boxes of the sampled RoIs.
sampled_gt_indices: A `tf.Tensor` of shape of [batch_size, K], storing the
indices of the sampled groudntruth boxes in the original `gt_boxes`
tensor, i.e.,
gt_boxes[sampled_gt_indices[:, i]] = sampled_gt_boxes[:, i].
"""
gt_boxes = tf.cast(gt_boxes, dtype=boxes.dtype)
if self._config_dict['mix_gt_boxes']:
boxes = tf.concat([boxes, gt_boxes], axis=1)
boxes_invalid_mask = tf.less(
tf.reduce_max(boxes, axis=-1, keepdims=True), 0.0)
gt_invalid_mask = tf.less(
tf.reduce_max(gt_boxes, axis=-1, keepdims=True), 0.0)
similarity_matrix = self._sim_calc(boxes, gt_boxes, boxes_invalid_mask,
gt_invalid_mask)
matched_gt_indices, match_indicators = self._box_matcher(similarity_matrix)
positive_matches = tf.greater_equal(match_indicators, 0)
negative_matches = tf.equal(match_indicators, -1)
ignored_matches = tf.equal(match_indicators, -2)
invalid_matches = tf.equal(match_indicators, -3)
background_mask = tf.expand_dims(
tf.logical_or(negative_matches, invalid_matches), -1)
gt_classes = tf.expand_dims(gt_classes, axis=-1)
matched_gt_classes = self._target_gather(gt_classes, matched_gt_indices,
background_mask)
matched_gt_classes = tf.where(background_mask,
tf.zeros_like(matched_gt_classes),
matched_gt_classes)
matched_gt_boxes = self._target_gather(gt_boxes, matched_gt_indices,
tf.tile(background_mask, [1, 1, 4]))
matched_gt_boxes = tf.where(background_mask,
tf.zeros_like(matched_gt_boxes),
matched_gt_boxes)
if gt_outer_boxes is not None:
matched_gt_outer_boxes = self._target_gather(
gt_outer_boxes, matched_gt_indices, tf.tile(background_mask,
[1, 1, 4]))
matched_gt_outer_boxes = tf.where(background_mask,
tf.zeros_like(matched_gt_outer_boxes),
matched_gt_outer_boxes)
matched_gt_indices = tf.where(
tf.squeeze(background_mask, -1), -tf.ones_like(matched_gt_indices),
matched_gt_indices)
if self._config_dict['skip_subsampling']:
matched_gt_classes = tf.squeeze(matched_gt_classes, axis=-1)
if gt_outer_boxes is None:
return (boxes, matched_gt_boxes, matched_gt_classes, matched_gt_indices)
return (boxes, matched_gt_boxes, matched_gt_outer_boxes,
matched_gt_classes, matched_gt_indices)
sampled_indices = self._sampler(
positive_matches, negative_matches, ignored_matches)
sampled_rois = self._target_gather(boxes, sampled_indices)
sampled_gt_boxes = self._target_gather(matched_gt_boxes, sampled_indices)
sampled_gt_classes = tf.squeeze(self._target_gather(
matched_gt_classes, sampled_indices), axis=-1)
sampled_gt_indices = tf.squeeze(self._target_gather(
tf.expand_dims(matched_gt_indices, -1), sampled_indices), axis=-1)
if gt_outer_boxes is None:
return (sampled_rois, sampled_gt_boxes, sampled_gt_classes,
sampled_gt_indices)
sampled_gt_outer_boxes = self._target_gather(matched_gt_outer_boxes,
sampled_indices)
return (sampled_rois, sampled_gt_boxes, sampled_gt_outer_boxes,
sampled_gt_classes, sampled_gt_indices)
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
| 9,996 | 47.0625 | 80 | py |
models | models-master/official/vision/modeling/layers/detection_generator.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of generators to generate the final detections."""
import contextlib
from typing import Any, Dict, List, Optional, Mapping, Sequence, Tuple
# Import libraries
import numpy as np
import tensorflow as tf
from official.vision.modeling.layers import edgetpu
from official.vision.ops import box_ops
from official.vision.ops import nms
from official.vision.ops import preprocess_ops
def _generate_detections_v1(
boxes: tf.Tensor,
scores: tf.Tensor,
attributes: Optional[Mapping[str, tf.Tensor]] = None,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
soft_nms_sigma: Optional[float] = None,
):
"""Generates the final detections given the model outputs.
The implementation unrolls the batch dimension and process images one by one.
It required the batch dimension to be statically known and it is TPU
compatible.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]` for box predictions on all feature levels. The N
is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
attributes: None or a dict of (attribute_name, attributes) pairs. Each
attributes is a `tf.Tensor` with shape `[batch_size, N, num_classes,
attribute_size]` or `[batch_size, N, 1, attribute_size]` for attribute
predictions on all feature levels. The N is the number of total anchors on
all levels. Can be None if no attribute learning is required.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A scalar representing maximum number of boxes retained
over all classes.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0 (which is default), we fall back to standard NMS.
Returns:
nms_boxes: A `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections, 4]` representing top detected boxes in
`[y1, x1, y2, x2]`.
nms_scores: A `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections]` representing sorted confidence scores
for detected boxes. The values are between `[0, 1]`.
nms_classes: An `int` type `tf.Tensor` of shape
`[batch_size, max_num_detections]` representing classes for detected
boxes.
valid_detections: An `int` type `tf.Tensor` of shape `[batch_size]` only the
top `valid_detections` boxes are valid detections.
nms_attributes: None or a dict of (attribute_name, attributes). Each
attribute is a `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections, attribute_size]` representing attribute
predictions for detected boxes. Can be an empty dict if no attribute
learning is required.
"""
with tf.name_scope('generate_detections'):
batch_size = scores.get_shape().as_list()[0]
nmsed_boxes = []
nmsed_classes = []
nmsed_scores = []
valid_detections = []
if attributes:
nmsed_attributes = {att_name: [] for att_name in attributes.keys()}
else:
nmsed_attributes = {}
for i in range(batch_size):
(
nmsed_boxes_i,
nmsed_scores_i,
nmsed_classes_i,
valid_detections_i,
nmsed_att_i,
) = _generate_detections_per_image(
boxes[i],
scores[i],
attributes={att_name: att[i] for att_name, att in attributes.items()}
if attributes
else {},
pre_nms_top_k=pre_nms_top_k,
pre_nms_score_threshold=pre_nms_score_threshold,
nms_iou_threshold=nms_iou_threshold,
max_num_detections=max_num_detections,
soft_nms_sigma=soft_nms_sigma,
)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
valid_detections.append(valid_detections_i)
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name].append(nmsed_att_i[att_name])
nmsed_boxes = tf.stack(nmsed_boxes, axis=0)
nmsed_scores = tf.stack(nmsed_scores, axis=0)
nmsed_classes = tf.stack(nmsed_classes, axis=0)
valid_detections = tf.stack(valid_detections, axis=0)
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name] = tf.stack(nmsed_attributes[att_name], axis=0)
return (
nmsed_boxes,
nmsed_scores,
nmsed_classes,
valid_detections,
nmsed_attributes,
)
def _generate_detections_per_image(
boxes: tf.Tensor,
scores: tf.Tensor,
attributes: Optional[Mapping[str, tf.Tensor]] = None,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
soft_nms_sigma: Optional[float] = None,
):
"""Generates the final detections per image given the model outputs.
Args:
boxes: A `tf.Tensor` with shape `[N, num_classes, 4]` or `[N, 1, 4]`, which
box predictions on all feature levels. The N is the number of total
anchors on all levels.
scores: A `tf.Tensor` with shape `[N, num_classes]`, which stacks class
probability on all feature levels. The N is the number of total anchors on
all levels. The num_classes is the number of classes predicted by the
model. Note that the class_outputs here is the raw score.
attributes: If not None, a dict of `tf.Tensor`. Each value is in shape `[N,
num_classes, attribute_size]` or `[N, 1, attribute_size]` of attribute
predictions on all feature levels. The N is the number of total anchors on
all levels.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS. If set to None,
`tf.image.non_max_suppression_padded` is called instead.
Returns:
nms_boxes: A `float` tf.Tensor of shape `[max_num_detections, 4]`
representing top detected boxes in `[y1, x1, y2, x2]`.
nms_scores: A `float` tf.Tensor of shape `[max_num_detections]` representing
sorted confidence scores for detected boxes. The values are between [0,
1].
nms_classes: An `int` tf.Tensor of shape `[max_num_detections]` representing
classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [1] only the top
`valid_detections` boxes are valid detections.
nms_attributes: None or a dict. Each value is a `float` tf.Tensor of shape
`[max_num_detections, attribute_size]` representing attribute predictions
for detected boxes. Can be an empty dict if `attributes` is None.
"""
nmsed_boxes = []
nmsed_scores = []
nmsed_classes = []
num_classes_for_box = boxes.get_shape().as_list()[1]
num_classes = scores.get_shape().as_list()[1]
if attributes:
nmsed_attributes = {att_name: [] for att_name in attributes.keys()}
else:
nmsed_attributes = {}
for i in range(num_classes):
boxes_i = boxes[:, min(num_classes_for_box - 1, i)]
scores_i = scores[:, i]
# Obtains pre_nms_top_k before running NMS.
scores_i, indices = tf.nn.top_k(
scores_i, k=tf.minimum(tf.shape(scores_i)[-1], pre_nms_top_k)
)
boxes_i = tf.gather(boxes_i, indices)
if soft_nms_sigma is not None:
(nmsed_indices_i, nmsed_scores_i) = (
tf.image.non_max_suppression_with_scores(
tf.cast(boxes_i, tf.float32),
tf.cast(scores_i, tf.float32),
max_num_detections,
iou_threshold=nms_iou_threshold,
score_threshold=pre_nms_score_threshold,
soft_nms_sigma=soft_nms_sigma,
name='nms_detections_' + str(i),
)
)
nmsed_boxes_i = tf.gather(boxes_i, nmsed_indices_i)
nmsed_boxes_i = preprocess_ops.clip_or_pad_to_fixed_size(
nmsed_boxes_i, max_num_detections, 0.0
)
nmsed_scores_i = preprocess_ops.clip_or_pad_to_fixed_size(
nmsed_scores_i, max_num_detections, -1.0
)
else:
(nmsed_indices_i, nmsed_num_valid_i) = (
tf.image.non_max_suppression_padded(
tf.cast(boxes_i, tf.float32),
tf.cast(scores_i, tf.float32),
max_num_detections,
iou_threshold=nms_iou_threshold,
score_threshold=pre_nms_score_threshold,
pad_to_max_output_size=True,
name='nms_detections_' + str(i),
)
)
nmsed_boxes_i = tf.gather(boxes_i, nmsed_indices_i)
nmsed_scores_i = tf.gather(scores_i, nmsed_indices_i)
# Sets scores of invalid boxes to -1.
nmsed_scores_i = tf.where(
tf.less(tf.range(max_num_detections), [nmsed_num_valid_i]),
nmsed_scores_i,
-tf.ones_like(nmsed_scores_i),
)
nmsed_classes_i = tf.fill([max_num_detections], i)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
if attributes:
for att_name, att in attributes.items():
num_classes_for_attr = att.get_shape().as_list()[1]
att_i = att[:, min(num_classes_for_attr - 1, i)]
att_i = tf.gather(att_i, indices)
nmsed_att_i = tf.gather(att_i, nmsed_indices_i)
nmsed_att_i = preprocess_ops.clip_or_pad_to_fixed_size(
nmsed_att_i, max_num_detections, 0.0
)
nmsed_attributes[att_name].append(nmsed_att_i)
# Concats results from all classes and sort them.
nmsed_boxes = tf.concat(nmsed_boxes, axis=0)
nmsed_scores = tf.concat(nmsed_scores, axis=0)
nmsed_classes = tf.concat(nmsed_classes, axis=0)
nmsed_scores, indices = tf.nn.top_k(
nmsed_scores, k=max_num_detections, sorted=True
)
nmsed_boxes = tf.gather(nmsed_boxes, indices)
nmsed_classes = tf.gather(nmsed_classes, indices)
valid_detections = tf.reduce_sum(
tf.cast(tf.greater(nmsed_scores, -1), tf.int32)
)
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name] = tf.concat(nmsed_attributes[att_name], axis=0)
nmsed_attributes[att_name] = tf.gather(
nmsed_attributes[att_name], indices
)
return (
nmsed_boxes,
nmsed_scores,
nmsed_classes,
valid_detections,
nmsed_attributes,
)
def _select_top_k_scores(scores_in: tf.Tensor, pre_nms_num_detections: int):
"""Selects top_k scores and indices for each class.
Args:
scores_in: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class logit outputs on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model.
pre_nms_num_detections: Number of candidates before NMS.
Returns:
scores and indices: A `tf.Tensor` with shape
`[batch_size, pre_nms_num_detections, num_classes]`.
"""
batch_size, num_anchors, num_class = scores_in.get_shape().as_list()
if batch_size is None:
batch_size = tf.shape(scores_in)[0]
scores_trans = tf.transpose(scores_in, perm=[0, 2, 1])
scores_trans = tf.reshape(scores_trans, [-1, num_anchors])
top_k_scores, top_k_indices = tf.nn.top_k(
scores_trans, k=pre_nms_num_detections, sorted=True
)
top_k_scores = tf.reshape(
top_k_scores, [batch_size, num_class, pre_nms_num_detections]
)
top_k_indices = tf.reshape(
top_k_indices, [batch_size, num_class, pre_nms_num_detections]
)
return tf.transpose(top_k_scores, [0, 2, 1]), tf.transpose(
top_k_indices, [0, 2, 1]
)
def _generate_detections_v2_class_agnostic(
boxes: tf.Tensor,
scores: tf.Tensor,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100
):
"""Generates the final detections by applying class-agnostic NMS.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]`, which box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
Returns:
nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4]
representing top detected boxes in [y1, x1, y2, x2].
nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections]
representing sorted confidence scores for detected boxes. The values are
between [0, 1].
nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections]
representing classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [batch_size] only the top
`valid_detections` boxes are valid detections.
"""
with tf.name_scope('generate_detections_class_agnostic'):
nmsed_boxes = []
nmsed_classes = []
nmsed_scores = []
valid_detections = []
batch_size, _, num_classes_for_box, _ = boxes.get_shape().as_list()
if batch_size is None:
batch_size = tf.shape(boxes)[0]
_, total_anchors, _ = scores.get_shape().as_list()
# Keeps only the class with highest score for each predicted box.
scores_condensed, classes_ids = tf.nn.top_k(
scores, k=1, sorted=True
)
scores_condensed = tf.squeeze(scores_condensed, axis=[2])
if num_classes_for_box > 1:
boxes = tf.gather(boxes, classes_ids, axis=2, batch_dims=2)
boxes_condensed = tf.squeeze(boxes, axis=[2])
classes_condensed = tf.squeeze(classes_ids, axis=[2])
# Selects top pre_nms_num scores and indices before NMS.
num_anchors_filtered = min(total_anchors, pre_nms_top_k)
scores_filtered, indices_filtered = tf.nn.top_k(
scores_condensed, k=num_anchors_filtered, sorted=True
)
classes_filtered = tf.gather(
classes_condensed, indices_filtered, axis=1, batch_dims=1
)
boxes_filtered = tf.gather(
boxes_condensed, indices_filtered, axis=1, batch_dims=1
)
tf.ensure_shape(boxes_filtered, [None, num_anchors_filtered, 4])
tf.ensure_shape(classes_filtered, [None, num_anchors_filtered])
tf.ensure_shape(scores_filtered, [None, num_anchors_filtered])
boxes_filtered = tf.cast(
boxes_filtered, tf.float32
)
scores_filtered = tf.cast(
scores_filtered, tf.float32
)
# Apply class-agnostic NMS on boxes.
(nmsed_indices_padded, valid_detections) = (
tf.image.non_max_suppression_padded(
boxes=boxes_filtered,
scores=scores_filtered,
max_output_size=max_num_detections,
iou_threshold=nms_iou_threshold,
pad_to_max_output_size=True,
score_threshold=pre_nms_score_threshold,
sorted_input=True,
name='nms_detections'
)
)
nmsed_boxes = tf.gather(
boxes_filtered, nmsed_indices_padded, batch_dims=1, axis=1
)
nmsed_scores = tf.gather(
scores_filtered, nmsed_indices_padded, batch_dims=1, axis=1
)
nmsed_classes = tf.gather(
classes_filtered, nmsed_indices_padded, batch_dims=1, axis=1
)
# Sets the padded boxes, scores, and classes to 0.
padding_mask = tf.reshape(
tf.range(max_num_detections), [1, -1]
) < tf.reshape(valid_detections, [-1, 1])
nmsed_boxes = nmsed_boxes * tf.cast(
tf.expand_dims(padding_mask, axis=2), nmsed_boxes.dtype
)
nmsed_scores = nmsed_scores * tf.cast(padding_mask, nmsed_scores.dtype)
nmsed_classes = nmsed_classes * tf.cast(padding_mask, nmsed_classes.dtype)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections
def _generate_detections_v2_class_aware(
boxes: tf.Tensor,
scores: tf.Tensor,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
):
"""Generates the final detections by using class-aware NMS.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]`, which box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
Returns:
nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4]
representing top detected boxes in [y1, x1, y2, x2].
nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections]
representing sorted confidence scores for detected boxes. The values are
between [0, 1].
nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections]
representing classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [batch_size] only the top
`valid_detections` boxes are valid detections.
"""
with tf.name_scope('generate_detections'):
nmsed_boxes = []
nmsed_classes = []
nmsed_scores = []
valid_detections = []
batch_size, _, num_classes_for_box, _ = boxes.get_shape().as_list()
if batch_size is None:
batch_size = tf.shape(boxes)[0]
_, total_anchors, num_classes = scores.get_shape().as_list()
# Selects top pre_nms_num scores and indices before NMS.
scores, indices = _select_top_k_scores(
scores, min(total_anchors, pre_nms_top_k)
)
for i in range(num_classes):
boxes_i = boxes[:, :, min(num_classes_for_box - 1, i), :]
scores_i = scores[:, :, i]
# Obtains pre_nms_top_k before running NMS.
boxes_i = tf.gather(boxes_i, indices[:, :, i], batch_dims=1, axis=1)
# Filter out scores.
boxes_i, scores_i = box_ops.filter_boxes_by_scores(
boxes_i, scores_i, min_score_threshold=pre_nms_score_threshold
)
(nmsed_scores_i, nmsed_boxes_i) = nms.sorted_non_max_suppression_padded(
tf.cast(scores_i, tf.float32),
tf.cast(boxes_i, tf.float32),
max_num_detections,
iou_threshold=nms_iou_threshold,
)
nmsed_classes_i = tf.fill([batch_size, max_num_detections], i)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
nmsed_boxes = tf.concat(nmsed_boxes, axis=1)
nmsed_scores = tf.concat(nmsed_scores, axis=1)
nmsed_classes = tf.concat(nmsed_classes, axis=1)
nmsed_scores, indices = tf.nn.top_k(
nmsed_scores, k=max_num_detections, sorted=True
)
nmsed_boxes = tf.gather(nmsed_boxes, indices, batch_dims=1, axis=1)
nmsed_classes = tf.gather(nmsed_classes, indices, batch_dims=1)
valid_detections = tf.reduce_sum(
input_tensor=tf.cast(tf.greater(nmsed_scores, 0.0), tf.int32), axis=1
)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections
def _generate_detections_v2(
boxes: tf.Tensor,
scores: tf.Tensor,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
use_class_agnostic_nms: Optional[bool] = None,
):
"""Generates the final detections given the model outputs.
This implementation unrolls classes dimension while using the tf.while_loop
to implement the batched NMS, so that it can be parallelized at the batch
dimension. It should give better performance comparing to v1 implementation.
It is TPU compatible.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]`, which box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
use_class_agnostic_nms: A `bool` of whether non max suppression is operated
on all the boxes using max scores across all classes.
Returns:
nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4]
representing top detected boxes in [y1, x1, y2, x2].
nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections]
representing sorted confidence scores for detected boxes. The values are
between [0, 1].
nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections]
representing classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [batch_size] only the top
`valid_detections` boxes are valid detections.
"""
if use_class_agnostic_nms:
return _generate_detections_v2_class_agnostic(
boxes=boxes,
scores=scores,
pre_nms_top_k=pre_nms_top_k,
pre_nms_score_threshold=pre_nms_score_threshold,
nms_iou_threshold=nms_iou_threshold,
max_num_detections=max_num_detections,
)
return _generate_detections_v2_class_aware(
boxes=boxes,
scores=scores,
pre_nms_top_k=pre_nms_top_k,
pre_nms_score_threshold=pre_nms_score_threshold,
nms_iou_threshold=nms_iou_threshold,
max_num_detections=max_num_detections,
)
def _generate_detections_v3(
boxes: tf.Tensor,
scores: tf.Tensor,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
refinements: int = 2,
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
"""Generates the detections given the model outputs using NMS for EdgeTPU.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, num_classes, N, 4]` or
`[batch_size, 1, N, 4]`, which box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, num_classes, N]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
refinements: Quality parameter for NMS algorithm.
Returns:
nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4]
representing top detected boxes in [y1, x1, y2, x2].
nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections]
representing sorted confidence scores for detected boxes. The values are
between [0, 1].
nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections]
representing classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [batch_size] only the top
`valid_detections` boxes are valid detections.
Raises:
ValueError if inputs shapes are not valid.
"""
one = tf.constant(1, dtype=scores.dtype)
with tf.name_scope('generate_detections'):
batch_size, num_box_classes, box_locations, sides = (
boxes.get_shape().as_list()
)
if batch_size is None:
batch_size = tf.shape(boxes)[0]
_, num_classes, locations = scores.get_shape().as_list()
if num_box_classes != 1 and num_box_classes != num_classes:
raise ValueError('Boxes should have either 1 class or same as scores.')
if locations != box_locations:
raise ValueError('Number of locations is different.')
if sides != 4:
raise ValueError('Number of sides is incorrect.')
# Selects pre_nms_score_threshold scores before NMS.
boxes, scores = box_ops.filter_boxes_by_scores(
boxes, scores, min_score_threshold=pre_nms_score_threshold
)
# EdgeTPU-friendly class-wise NMS, -1 for invalid.
indices = edgetpu.non_max_suppression_padded(
boxes,
scores,
max_num_detections,
iou_threshold=nms_iou_threshold,
refinements=refinements,
)
# Gather NMS-ed boxes and scores.
safe_indices = tf.nn.relu(indices) # 0 for invalid
invalid_detections = safe_indices - indices # 1 for invalid, 0 for valid
valid_detections = one - invalid_detections # 0 for invalid, 1 for valid
safe_indices = tf.cast(safe_indices, tf.int32)
boxes = tf.gather(boxes, safe_indices, axis=2, batch_dims=2)
boxes = tf.cast(tf.expand_dims(valid_detections, -1), boxes.dtype) * boxes
scores = valid_detections * tf.gather(
scores, safe_indices, axis=2, batch_dims=2
)
# Compliment with class numbers.
classes = tf.constant(np.arange(num_classes), dtype=scores.dtype)
classes = tf.reshape(classes, [1, num_classes, 1])
classes = tf.tile(classes, [batch_size, 1, max_num_detections])
# Flatten classes, locations. Class = -1 for invalid detection
scores = tf.reshape(scores, [batch_size, num_classes * max_num_detections])
boxes = tf.reshape(boxes, [batch_size, num_classes * max_num_detections, 4])
classes = tf.reshape(
valid_detections * classes - invalid_detections,
[batch_size, num_classes * max_num_detections],
)
# Filter top-k across boxes of all classes
scores, indices = tf.nn.top_k(scores, k=max_num_detections, sorted=True)
boxes = tf.gather(boxes, indices, batch_dims=1, axis=1)
classes = tf.gather(classes, indices, batch_dims=1, axis=1)
invalid_detections = tf.nn.relu(classes) - classes
valid_detections = tf.reduce_sum(one - invalid_detections, axis=1)
return boxes, scores, classes, valid_detections
def _generate_detections_batched(
boxes: tf.Tensor,
scores: tf.Tensor,
pre_nms_score_threshold: float,
nms_iou_threshold: float,
max_num_detections: int,
):
"""Generates detected boxes with scores and classes for one-stage detector.
The function takes output of multi-level ConvNets and anchor boxes and
generates detected boxes. Note that this used batched nms, which is not
supported on TPU currently.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]`, which box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
Returns:
nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4]
representing top detected boxes in [y1, x1, y2, x2].
nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections]
representing sorted confidence scores for detected boxes. The values are
between [0, 1].
nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections]
representing classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [batch_size] only the top
`valid_detections` boxes are valid detections.
"""
with tf.name_scope('generate_detections'):
nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections = (
tf.image.combined_non_max_suppression(
boxes,
scores,
max_output_size_per_class=max_num_detections,
max_total_size=max_num_detections,
iou_threshold=nms_iou_threshold,
score_threshold=pre_nms_score_threshold,
pad_per_class=False,
clip_boxes=False,
)
)
nmsed_classes = tf.cast(nmsed_classes, tf.int32)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections
def _generate_detections_tflite_implements_signature(
config: Dict[str, Any]
) -> str:
"""Returns `experimental_implements` signature for TFLite's custom NMS op.
This signature encodes the arguments to correctly initialize TFLite's custom
post-processing op in the MLIR converter.
For details on `experimental_implements` see here:
https://www.tensorflow.org/api_docs/python/tf/function
Args:
config: A dictionary of configs defining parameters for TFLite NMS op.
Returns:
An `experimental_implements` signature string.
"""
scale_value = 1.0
implements_signature = [
'name: "%s"' % 'TFLite_Detection_PostProcess',
'attr { key: "max_detections" value { i: %d } }'
% config['max_detections'],
'attr { key: "max_classes_per_detection" value { i: %d } }'
% config['max_classes_per_detection'],
'attr { key: "use_regular_nms" value { b: %s } }'
% str(config['use_regular_nms']).lower(),
'attr { key: "nms_score_threshold" value { f: %f } }'
% config['nms_score_threshold'],
'attr { key: "nms_iou_threshold" value { f: %f } }'
% config['nms_iou_threshold'],
'attr { key: "y_scale" value { f: %f } }' % scale_value,
'attr { key: "x_scale" value { f: %f } }' % scale_value,
'attr { key: "h_scale" value { f: %f } }' % scale_value,
'attr { key: "w_scale" value { f: %f } }' % scale_value,
'attr { key: "num_classes" value { i: %d } }' % config['num_classes'],
]
implements_signature = ' '.join(implements_signature)
return implements_signature
def _generate_detections_tflite(
raw_boxes: Mapping[str, tf.Tensor],
raw_scores: Mapping[str, tf.Tensor],
anchor_boxes: Mapping[str, tf.Tensor],
config: Dict[str, Any],
) -> Sequence[Any]:
"""Generate detections for conversion to TFLite.
Mathematically same as class-agnostic NMS, except that the last portion of
the TF graph constitutes a dummy `tf.function` that contains an annotation
for conversion to TFLite's custom NMS op. Using this custom op allows
features like post-training quantization & accelerator support.
NOTE: This function does NOT return a valid output, and is only meant to
generate a SavedModel for TFLite conversion via MLIR. The generated SavedModel
should not be used for inference.
For TFLite op details, see tensorflow/lite/kernels/detection_postprocess.cc
Args:
raw_boxes: A dictionary of tensors for raw boxes. Key is level of features
and value is a tensor denoting a level of boxes with shape [1, H, W, 4 *
num_anchors].
raw_scores: A dictionary of tensors for classes. Key is level of features
and value is a tensor denoting a level of logits with shape [1, H, W,
num_class * num_anchors].
anchor_boxes: A dictionary of tensors for anchor boxes. Key is level of
features and value is a tensor denoting a level of anchors with shape
[num_anchors, 4].
config: A dictionary of configs defining parameters for TFLite NMS op.
Returns:
A (dummy) tuple of (boxes, scores, classess, num_detections).
Raises:
ValueError: If the last dimension of predicted boxes is not divisible by 4,
or the last dimension of predicted scores is not divisible by number of
anchors per location.
"""
scores, boxes, anchors = [], [], []
levels = list(raw_scores.keys())
min_level = int(min(levels))
max_level = int(max(levels))
batch_size = tf.shape(raw_scores[str(min_level)])[0]
num_anchors_per_locations_times_4 = (
raw_boxes[str(min_level)].get_shape().as_list()[-1]
)
if num_anchors_per_locations_times_4 % 4 != 0:
raise ValueError(
'The last dimension of predicted boxes should be divisible by 4.'
)
num_anchors_per_locations = num_anchors_per_locations_times_4 // 4
if num_anchors_per_locations_times_4 % 4 != 0:
raise ValueError(
'The last dimension of predicted scores should be divisible by'
f' {num_anchors_per_locations}.'
)
num_classes = (
raw_scores[str(min_level)].get_shape().as_list()[-1]
// num_anchors_per_locations
)
config.update({'num_classes': num_classes})
for i in range(min_level, max_level + 1):
scores.append(tf.reshape(raw_scores[str(i)], [batch_size, -1, num_classes]))
boxes.append(tf.reshape(raw_boxes[str(i)], [batch_size, -1, 4]))
anchors.append(tf.reshape(anchor_boxes[str(i)], [-1, 4]))
scores = tf.sigmoid(tf.concat(scores, 1))
boxes = tf.concat(boxes, 1)
anchors = tf.concat(anchors, 0)
ycenter_a = (anchors[..., 0] + anchors[..., 2]) / 2
xcenter_a = (anchors[..., 1] + anchors[..., 3]) / 2
ha = anchors[..., 2] - anchors[..., 0]
wa = anchors[..., 3] - anchors[..., 1]
anchors = tf.stack([ycenter_a, xcenter_a, ha, wa], axis=-1)
if config.get('normalize_anchor_coordinates', False):
# TFLite's object detection APIs require normalized anchors.
height, width = config['input_image_size']
normalize_factor = tf.constant(
[height, width, height, width], dtype=tf.float32
)
anchors = anchors / normalize_factor
# There is no TF equivalent for TFLite's custom post-processing op.
# So we add an 'empty' composite function here, that is legalized to the
# custom op with MLIR.
# For details, see: tensorflow/compiler/mlir/lite/utils/nms_utils.cc
@tf.function(
experimental_implements=_generate_detections_tflite_implements_signature(
config
)
)
# pylint: disable=g-unused-argument,unused-argument
def dummy_post_processing(input_boxes, input_scores, input_anchors):
boxes = tf.constant(0.0, dtype=tf.float32, name='boxes')
scores = tf.constant(0.0, dtype=tf.float32, name='scores')
classes = tf.constant(0.0, dtype=tf.float32, name='classes')
num_detections = tf.constant(0.0, dtype=tf.float32, name='num_detections')
return boxes, classes, scores, num_detections
if config.get('omit_nms', False):
dummy_classes = tf.constant(0.0, dtype=tf.float32, name='classes')
dummy_num_detections = tf.constant(
0.0, dtype=tf.float32, name='num_detections')
return boxes, dummy_classes, scores, dummy_num_detections
return dummy_post_processing(boxes, scores, anchors)[::-1]
@tf.keras.utils.register_keras_serializable(package='Vision')
class DetectionGenerator(tf.keras.layers.Layer):
"""Generates the final detected boxes with scores and classes."""
def __init__(
self,
apply_nms: bool = True,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
nms_version: str = 'v2',
use_cpu_nms: bool = False,
soft_nms_sigma: Optional[float] = None,
use_sigmoid_probability: bool = False,
**kwargs,
):
"""Initializes a detection generator.
Args:
apply_nms: A `bool` of whether or not apply non maximum suppression. If
False, the decoded boxes and their scores are returned.
pre_nms_top_k: An `int` of the number of top scores proposals to be kept
before applying NMS.
pre_nms_score_threshold: A `float` of the score threshold to apply before
applying NMS. Proposals whose scores are below this threshold are
thrown away.
nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold.
max_num_detections: An `int` of the final number of total detections to
generate.
nms_version: A string of `batched`, `v1` or `v2` specifies NMS version.
use_cpu_nms: A `bool` of whether or not enforce NMS to run on CPU.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS.
use_sigmoid_probability: A `bool`, if true, use sigmoid to get
probability, otherwise use softmax.
**kwargs: Additional keyword arguments passed to Layer.
"""
self._config_dict = {
'apply_nms': apply_nms,
'pre_nms_top_k': pre_nms_top_k,
'pre_nms_score_threshold': pre_nms_score_threshold,
'nms_iou_threshold': nms_iou_threshold,
'max_num_detections': max_num_detections,
'nms_version': nms_version,
'use_cpu_nms': use_cpu_nms,
'soft_nms_sigma': soft_nms_sigma,
'use_sigmoid_probability': use_sigmoid_probability,
}
super(DetectionGenerator, self).__init__(**kwargs)
def __call__(
self,
raw_boxes: tf.Tensor,
raw_scores: tf.Tensor,
anchor_boxes: tf.Tensor,
image_shape: tf.Tensor,
regression_weights: Optional[List[float]] = None,
bbox_per_class: bool = True,
):
"""Generates final detections.
Args:
raw_boxes: A `tf.Tensor` of shape of `[batch_size, K, num_classes * 4]`
representing the class-specific box coordinates relative to anchors.
raw_scores: A `tf.Tensor` of shape of `[batch_size, K, num_classes]`
representing the class logits before applying score activiation.
anchor_boxes: A `tf.Tensor` of shape of `[batch_size, K, 4]` representing
the corresponding anchor boxes w.r.t `box_outputs`.
image_shape: A `tf.Tensor` of shape of `[batch_size, 2]` storing the image
height and width w.r.t. the scaled image, i.e. the same image space as
`box_outputs` and `anchor_boxes`.
regression_weights: A list of four float numbers to scale coordinates.
bbox_per_class: A `bool`. If True, perform per-class box regression.
Returns:
If `apply_nms` = True, the return is a dictionary with keys:
`detection_boxes`: A `float` tf.Tensor of shape
[batch, max_num_detections, 4] representing top detected boxes in
[y1, x1, y2, x2].
`detection_scores`: A `float` `tf.Tensor` of shape
[batch, max_num_detections] representing sorted confidence scores for
detected boxes. The values are between [0, 1].
`detection_classes`: An `int` tf.Tensor of shape
[batch, max_num_detections] representing classes for detected boxes.
`num_detections`: An `int` tf.Tensor of shape [batch] only the first
`num_detections` boxes are valid detections
If `apply_nms` = False, the return is a dictionary with keys:
`decoded_boxes`: A `float` tf.Tensor of shape [batch, num_raw_boxes, 4]
representing all the decoded boxes.
`decoded_box_scores`: A `float` tf.Tensor of shape
[batch, num_raw_boxes] representing socres of all the decoded boxes.
"""
if self._config_dict['use_sigmoid_probability']:
box_scores = tf.math.sigmoid(raw_scores)
else:
box_scores = tf.nn.softmax(raw_scores, axis=-1)
# Removes the background class.
box_scores_shape = tf.shape(box_scores)
box_scores_shape_list = box_scores.get_shape().as_list()
batch_size = box_scores_shape[0]
num_locations = box_scores_shape_list[1]
num_classes = box_scores_shape_list[-1]
box_scores = tf.slice(box_scores, [0, 0, 1], [-1, -1, -1])
if bbox_per_class:
num_detections = num_locations * (num_classes - 1)
raw_boxes = tf.reshape(
raw_boxes, [batch_size, num_locations, num_classes, 4]
)
raw_boxes = tf.slice(raw_boxes, [0, 0, 1, 0], [-1, -1, -1, -1])
anchor_boxes = tf.tile(
tf.expand_dims(anchor_boxes, axis=2), [1, 1, num_classes - 1, 1]
)
raw_boxes = tf.reshape(raw_boxes, [batch_size, num_detections, 4])
anchor_boxes = tf.reshape(anchor_boxes, [batch_size, num_detections, 4])
# Box decoding.
decoded_boxes = box_ops.decode_boxes(
raw_boxes, anchor_boxes, weights=regression_weights
)
# Box clipping.
if image_shape is not None:
decoded_boxes = box_ops.clip_boxes(
decoded_boxes, tf.expand_dims(image_shape, axis=1)
)
if bbox_per_class:
decoded_boxes = tf.reshape(
decoded_boxes, [batch_size, num_locations, num_classes - 1, 4]
)
else:
decoded_boxes = tf.expand_dims(decoded_boxes, axis=2)
if not self._config_dict['apply_nms']:
return {
'decoded_boxes': decoded_boxes,
'decoded_box_scores': box_scores,
}
# Optionally force the NMS be run on CPU.
if self._config_dict['use_cpu_nms']:
nms_context = tf.device('cpu:0')
else:
nms_context = contextlib.nullcontext()
with nms_context:
if self._config_dict['nms_version'] == 'batched':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_batched(
decoded_boxes,
box_scores,
self._config_dict['pre_nms_score_threshold'],
self._config_dict['nms_iou_threshold'],
self._config_dict['max_num_detections'],
)
)
elif self._config_dict['nms_version'] == 'v1':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, _) = (
_generate_detections_v1(
decoded_boxes,
box_scores,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self._config_dict[
'pre_nms_score_threshold'
],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections'],
soft_nms_sigma=self._config_dict['soft_nms_sigma'],
)
)
elif self._config_dict['nms_version'] == 'v2':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_v2(
decoded_boxes,
box_scores,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self._config_dict[
'pre_nms_score_threshold'
],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections'],
)
)
else:
raise ValueError(
'NMS version {} not supported.'.format(
self._config_dict['nms_version']
)
)
# Adds 1 to offset the background class which has index 0.
nmsed_classes += 1
return {
'num_detections': valid_detections,
'detection_boxes': nmsed_boxes,
'detection_classes': nmsed_classes,
'detection_scores': nmsed_scores,
}
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
@tf.keras.utils.register_keras_serializable(package='Vision')
class MultilevelDetectionGenerator(tf.keras.layers.Layer):
"""Generates detected boxes with scores and classes for one-stage detector."""
def __init__(
self,
apply_nms: bool = True,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
nms_version: str = 'v1',
use_cpu_nms: bool = False,
soft_nms_sigma: Optional[float] = None,
tflite_post_processing_config: Optional[Dict[str, Any]] = None,
pre_nms_top_k_sharding_block: Optional[int] = None,
nms_v3_refinements: Optional[int] = None,
return_decoded: Optional[bool] = None,
use_class_agnostic_nms: Optional[bool] = None,
**kwargs,
):
"""Initializes a multi-level detection generator.
Args:
apply_nms: A `bool` of whether or not apply non maximum suppression. If
False, the decoded boxes and their scores are returned.
pre_nms_top_k: An `int` of the number of top scores proposals to be kept
before applying NMS.
pre_nms_score_threshold: A `float` of the score threshold to apply before
applying NMS. Proposals whose scores are below this threshold are thrown
away.
nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold.
max_num_detections: An `int` of the final number of total detections to
generate.
nms_version: A string of `batched`, `v1` or `v2` specifies NMS version
use_cpu_nms: A `bool` of whether or not enforce NMS to run on CPU.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS.
tflite_post_processing_config: An optional dictionary containing
post-processing parameters used for TFLite custom NMS op.
pre_nms_top_k_sharding_block: For v3 (edge tpu friendly) NMS, avoids
creating long axis for pre_nms_top_k. Will do top_k in shards of size
[num_classes, pre_nms_top_k_sharding_block * boxes_per_location]
nms_v3_refinements: For v3 (edge tpu friendly) NMS, sets how close result
should be to standard NMS. When None, 2 is used. Here is some
experimental deviations for different refinement values:
if == 0, AP is reduced 1.0%, AR is reduced 5% on COCO
if == 1, AP is reduced 0.2%, AR is reduced 2% on COCO
if == 2, AP is reduced <0.1%, AR is reduced <1% on COCO
return_decoded: A `bool` of whether to return decoded boxes before NMS
regardless of whether `apply_nms` is True or not.
use_class_agnostic_nms: A `bool` of whether non max suppression is
operated on all the boxes using max scores across all classes.
**kwargs: Additional keyword arguments passed to Layer.
Raises:
ValueError: If `use_class_agnostic_nms` is required by `nms_version` is
not specified as `v2`.
"""
if use_class_agnostic_nms and nms_version != 'v2':
raise ValueError(
'If not using TFLite custom NMS, `use_class_agnostic_nms` can only be'
' enabled for NMS v2 for now, but NMS {} is used! If you are using'
' TFLite NMS, please configure TFLite custom NMS for class-agnostic'
' NMS.'.format(nms_version)
)
self._config_dict = {
'apply_nms': apply_nms,
'pre_nms_top_k': pre_nms_top_k,
'pre_nms_score_threshold': pre_nms_score_threshold,
'nms_iou_threshold': nms_iou_threshold,
'max_num_detections': max_num_detections,
'nms_version': nms_version,
'use_cpu_nms': use_cpu_nms,
'soft_nms_sigma': soft_nms_sigma,
'return_decoded': return_decoded,
'use_class_agnostic_nms': use_class_agnostic_nms,
}
# Don't store if were not defined
if pre_nms_top_k_sharding_block is not None:
self._config_dict['pre_nms_top_k_sharding_block'] = (
pre_nms_top_k_sharding_block
)
if nms_v3_refinements is not None:
self._config_dict['nms_v3_refinements'] = nms_v3_refinements
if tflite_post_processing_config is not None:
self._config_dict.update(
{'tflite_post_processing_config': tflite_post_processing_config}
)
super().__init__(**kwargs)
def _decode_multilevel_outputs(
self,
raw_boxes: Mapping[str, tf.Tensor],
raw_scores: Mapping[str, tf.Tensor],
anchor_boxes: Mapping[str, tf.Tensor],
image_shape: tf.Tensor,
raw_attributes: Optional[Mapping[str, tf.Tensor]] = None,
):
"""Collects dict of multilevel boxes, scores, attributes into lists."""
boxes = []
scores = []
if raw_attributes:
attributes = {att_name: [] for att_name in raw_attributes.keys()}
else:
attributes = {}
levels = list(raw_boxes.keys())
min_level = int(min(levels))
max_level = int(max(levels))
for i in range(min_level, max_level + 1):
raw_boxes_i = raw_boxes[str(i)]
raw_scores_i = raw_scores[str(i)]
batch_size = tf.shape(raw_boxes_i)[0]
(_, feature_h_i, feature_w_i, num_anchors_per_locations_times_4) = (
raw_boxes_i.get_shape().as_list()
)
num_locations = feature_h_i * feature_w_i
num_anchors_per_locations = num_anchors_per_locations_times_4 // 4
num_classes = (
raw_scores_i.get_shape().as_list()[-1] // num_anchors_per_locations
)
# Applies score transformation and remove the implicit background class.
scores_i = tf.sigmoid(
tf.reshape(
raw_scores_i,
[
batch_size,
num_locations * num_anchors_per_locations,
num_classes,
],
)
)
scores_i = tf.slice(scores_i, [0, 0, 1], [-1, -1, -1])
# Box decoding.
# The anchor boxes are shared for all data in a batch.
# One stage detector only supports class agnostic box regression.
anchor_boxes_i = tf.reshape(
anchor_boxes[str(i)],
[batch_size, num_locations * num_anchors_per_locations, 4],
)
raw_boxes_i = tf.reshape(
raw_boxes_i,
[batch_size, num_locations * num_anchors_per_locations, 4],
)
boxes_i = box_ops.decode_boxes(raw_boxes_i, anchor_boxes_i)
# Box clipping.
if image_shape is not None:
boxes_i = box_ops.clip_boxes(
boxes_i, tf.expand_dims(image_shape, axis=1)
)
boxes.append(boxes_i)
scores.append(scores_i)
if raw_attributes:
for att_name, raw_att in raw_attributes.items():
attribute_size = (
raw_att[str(i)].get_shape().as_list()[-1]
// num_anchors_per_locations
)
att_i = tf.reshape(
raw_att[str(i)],
[
batch_size,
num_locations * num_anchors_per_locations,
attribute_size,
],
)
attributes[att_name].append(att_i)
boxes = tf.concat(boxes, axis=1)
boxes = tf.expand_dims(boxes, axis=2)
scores = tf.concat(scores, axis=1)
if raw_attributes:
for att_name in raw_attributes.keys():
attributes[att_name] = tf.concat(attributes[att_name], axis=1)
attributes[att_name] = tf.expand_dims(attributes[att_name], axis=2)
return boxes, scores, attributes
def _decode_multilevel_outputs_and_pre_nms_top_k(
self,
raw_boxes: Mapping[str, tf.Tensor],
raw_scores: Mapping[str, tf.Tensor],
anchor_boxes: Mapping[str, tf.Tensor],
image_shape: tf.Tensor,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Collects dict of multilevel boxes, scores into lists."""
boxes = None
scores = None
pre_nms_top_k = self._config_dict['pre_nms_top_k']
# TODO(b/258007436): consider removing when compiler be able to handle
# it on its own.
pre_nms_top_k_sharding_block = self._config_dict.get(
'pre_nms_top_k_sharding_block', 128
)
levels = list(raw_boxes.keys())
min_level = int(min(levels))
max_level = int(max(levels))
if image_shape is not None:
clip_shape = tf.expand_dims(tf.expand_dims(image_shape, axis=1), axis=1)
else:
clip_shape = None
for i in range(max_level, min_level - 1, -1):
(
batch_size,
unsharded_h,
unsharded_w,
num_anchors_per_locations_times_4,
) = (
raw_boxes[str(i)].get_shape().as_list()
)
num_anchors_per_locations = num_anchors_per_locations_times_4 // 4
if batch_size is None:
batch_size = tf.shape(raw_boxes[str(i)])[0]
block = max(1, pre_nms_top_k_sharding_block // unsharded_w)
boxes_shape = [
batch_size,
unsharded_h,
unsharded_w * num_anchors_per_locations,
4,
]
decoded_boxes = box_ops.decode_boxes(
tf.reshape(raw_boxes[str(i)], boxes_shape),
tf.reshape(anchor_boxes[str(i)], boxes_shape),
)
if clip_shape is not None:
decoded_boxes = box_ops.clip_boxes(
decoded_boxes,
clip_shape,
)
for raw_scores_i, decoded_boxes_i in edgetpu.shard_tensors(
1, block, (raw_scores[str(i)], decoded_boxes)
):
(_, feature_h_i, feature_w_i, _) = raw_scores_i.get_shape().as_list()
num_locations = feature_h_i * feature_w_i
num_classes = (
raw_scores_i.get_shape().as_list()[-1] // num_anchors_per_locations
)
# Applies score transformation and remove the implicit background class.
scores_i = tf.slice(
tf.transpose(
tf.reshape(
raw_scores_i,
[
batch_size,
num_locations * num_anchors_per_locations,
num_classes,
],
),
[0, 2, 1],
),
[0, 1, 0],
[-1, -1, -1],
)
# Box decoding.
# The anchor boxes are shared for all data in a batch.
# One stage detector only supports class agnostic box regression.
boxes_i = tf.tile(
tf.reshape(
decoded_boxes_i,
[batch_size, 1, num_locations * num_anchors_per_locations, 4],
),
[1, num_classes - 1, 1, 1],
)
scores, boxes = edgetpu.concat_and_top_k(
pre_nms_top_k, (scores, scores_i), (boxes, boxes_i)
)
boxes: tf.Tensor = boxes # pytype: disable=annotation-type-mismatch
return boxes, tf.sigmoid(scores)
def __call__(
self,
raw_boxes: Mapping[str, tf.Tensor],
raw_scores: Mapping[str, tf.Tensor],
anchor_boxes: Mapping[str, tf.Tensor],
image_shape: tf.Tensor,
raw_attributes: Optional[Mapping[str, tf.Tensor]] = None,
) -> Mapping[str, Any]:
"""Generates final detections.
Args:
raw_boxes: A `dict` with keys representing FPN levels and values
representing box tenors of shape `[batch, feature_h, feature_w,
num_anchors * 4]`.
raw_scores: A `dict` with keys representing FPN levels and values
representing logit tensors of shape `[batch, feature_h, feature_w,
num_anchors * num_classes]`.
anchor_boxes: A `dict` with keys representing FPN levels and values
representing anchor tenors of shape `[batch_size, K, 4]` representing
the corresponding anchor boxes w.r.t `box_outputs`.
image_shape: A `tf.Tensor` of shape of [batch_size, 2] storing the image
height and width w.r.t. the scaled image, i.e. the same image space as
`box_outputs` and `anchor_boxes`.
raw_attributes: If not None, a `dict` of (attribute_name,
attribute_prediction) pairs. `attribute_prediction` is a dict that
contains keys representing FPN levels and values representing tenors of
shape `[batch, feature_h, feature_w, num_anchors * attribute_size]`.
Returns:
If `apply_nms` = True, the return is a dictionary with keys:
`detection_boxes`: A `float` tf.Tensor of shape
[batch, max_num_detections, 4] representing top detected boxes in
[y1, x1, y2, x2].
`detection_scores`: A `float` tf.Tensor of shape
[batch, max_num_detections] representing sorted confidence scores for
detected boxes. The values are between [0, 1].
`detection_classes`: An `int` tf.Tensor of shape
[batch, max_num_detections] representing classes for detected boxes.
`num_detections`: An `int` tf.Tensor of shape [batch] only the first
`num_detections` boxes are valid detections
`detection_attributes`: A dict. Values of the dict is a `float`
tf.Tensor of shape [batch, max_num_detections, attribute_size]
representing attribute predictions for detected boxes.
If `apply_nms` = False, the return is a dictionary with following keys. If
`return_decoded` = True, the following items will also be included even if
`apply_nms` = True:
`decoded_boxes`: A `float` tf.Tensor of shape [batch, num_raw_boxes, 4]
representing all the decoded boxes.
`decoded_box_scores`: A `float` tf.Tensor of shape
[batch, num_raw_boxes] representing socres of all the decoded boxes.
`decoded_box_attributes`: A dict. Values in the dict is a
`float` tf.Tensor of shape [batch, num_raw_boxes, attribute_size]
representing attribute predictions of all the decoded boxes.
"""
if (
self._config_dict['apply_nms']
and self._config_dict['nms_version'] == 'tflite'
):
boxes, classes, scores, num_detections = _generate_detections_tflite(
raw_boxes,
raw_scores,
anchor_boxes,
self.get_config()['tflite_post_processing_config'],
)
return {
'num_detections': num_detections,
'detection_boxes': boxes,
'detection_classes': classes,
'detection_scores': scores,
}
if self._config_dict['nms_version'] != 'v3':
boxes, scores, attributes = self._decode_multilevel_outputs(
raw_boxes, raw_scores, anchor_boxes, image_shape, raw_attributes
)
else:
attributes = None
boxes, scores = self._decode_multilevel_outputs_and_pre_nms_top_k(
raw_boxes, raw_scores, anchor_boxes, image_shape
)
decoded_results = {
'decoded_boxes': boxes,
'decoded_box_scores': scores,
'decoded_box_attributes': attributes,
}
if not self._config_dict['apply_nms']:
return decoded_results
# Optionally force the NMS to run on CPU.
if self._config_dict['use_cpu_nms']:
nms_context = tf.device('cpu:0')
else:
nms_context = contextlib.nullcontext()
with nms_context:
if raw_attributes and (self._config_dict['nms_version'] != 'v1'):
raise ValueError(
'Attribute learning is only supported for NMSv1 but NMS {} is used.'
.format(self._config_dict['nms_version'])
)
if self._config_dict['nms_version'] == 'batched':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_batched(
boxes,
scores,
self._config_dict['pre_nms_score_threshold'],
self._config_dict['nms_iou_threshold'],
self._config_dict['max_num_detections'],
)
)
# Set `nmsed_attributes` to None for batched NMS.
nmsed_attributes = {}
elif self._config_dict['nms_version'] == 'v1':
(
nmsed_boxes,
nmsed_scores,
nmsed_classes,
valid_detections,
nmsed_attributes,
) = _generate_detections_v1(
boxes,
scores,
attributes=attributes if raw_attributes else None,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self._config_dict[
'pre_nms_score_threshold'
],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections'],
soft_nms_sigma=self._config_dict['soft_nms_sigma'],
)
elif self._config_dict['nms_version'] == 'v2':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_v2(
boxes,
scores,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self._config_dict[
'pre_nms_score_threshold'
],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections'],
use_class_agnostic_nms=self._config_dict[
'use_class_agnostic_nms'
],
)
)
# Set `nmsed_attributes` to None for v2.
nmsed_attributes = {}
elif self._config_dict['nms_version'] == 'v3':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_v3(
boxes,
scores,
pre_nms_score_threshold=self._config_dict[
'pre_nms_score_threshold'
],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections'],
refinements=self._config_dict.get('nms_v3_refinements', 2),
)
)
# Set `nmsed_attributes` to None for v3.
nmsed_attributes = {}
else:
raise ValueError(
'NMS version {} not supported.'.format(
self._config_dict['nms_version']
)
)
# Adds 1 to offset the background class which has index 0.
nmsed_classes += 1
return {
**(decoded_results if self._config_dict['return_decoded'] else {}),
'num_detections': valid_detections,
'detection_boxes': nmsed_boxes,
'detection_classes': nmsed_classes,
'detection_scores': nmsed_scores,
'detection_attributes': nmsed_attributes,
}
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
| 64,978 | 40.04801 | 80 | py |
models | models-master/official/vision/modeling/layers/nn_blocks_3d.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains common building blocks for 3D networks."""
# Import libraries
import tensorflow as tf
from official.modeling import tf_utils
from official.vision.modeling.layers import nn_layers
@tf.keras.utils.register_keras_serializable(package='Vision')
class SelfGating(tf.keras.layers.Layer):
"""Feature gating as used in S3D-G.
This implements the S3D-G network from:
Saining Xie, Chen Sun, Jonathan Huang, Zhuowen Tu, Kevin Murphy.
Rethinking Spatiotemporal Feature Learning: Speed-Accuracy Trade-offs in Video
Classification.
(https://arxiv.org/pdf/1712.04851.pdf)
"""
def __init__(self, filters, **kwargs):
"""Initializes a self-gating layer.
Args:
filters: An `int` number of filters for the convolutional layer.
**kwargs: Additional keyword arguments to be passed.
"""
super(SelfGating, self).__init__(**kwargs)
self._filters = filters
def build(self, input_shape):
self._spatial_temporal_average = tf.keras.layers.GlobalAveragePooling3D()
# No BN and activation after conv.
self._transformer_w = tf.keras.layers.Conv3D(
filters=self._filters,
kernel_size=[1, 1, 1],
use_bias=True,
kernel_initializer=tf.keras.initializers.TruncatedNormal(
mean=0.0, stddev=0.01))
super(SelfGating, self).build(input_shape)
def call(self, inputs):
x = self._spatial_temporal_average(inputs)
x = tf.expand_dims(x, 1)
x = tf.expand_dims(x, 2)
x = tf.expand_dims(x, 3)
x = self._transformer_w(x)
x = tf.nn.sigmoid(x)
return tf.math.multiply(x, inputs)
@tf.keras.utils.register_keras_serializable(package='Vision')
class BottleneckBlock3D(tf.keras.layers.Layer):
"""Creates a 3D bottleneck block."""
def __init__(self,
filters,
temporal_kernel_size,
temporal_strides,
spatial_strides,
stochastic_depth_drop_rate=0.0,
se_ratio=None,
use_self_gating=False,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
**kwargs):
"""Initializes a 3D bottleneck block with BN after convolutions.
Args:
filters: An `int` number of filters for the first two convolutions. Note
that the third and final convolution will use 4 times as many filters.
temporal_kernel_size: An `int` of kernel size for the temporal
convolutional layer.
temporal_strides: An `int` of ftemporal stride for the temporal
convolutional layer.
spatial_strides: An `int` of spatial stride for the spatial convolutional
layer.
stochastic_depth_drop_rate: A `float` or None. If not None, drop rate for
the stochastic depth layer.
se_ratio: A `float` or None. Ratio of the Squeeze-and-Excitation layer.
use_self_gating: A `bool` of whether to apply self-gating module or not.
kernel_initializer: A `str` of kernel_initializer for convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d.
Default to None.
activation: A `str` name of the activation function.
use_sync_bn: A `bool`. If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
**kwargs: Additional keyword arguments to be passed.
"""
super(BottleneckBlock3D, self).__init__(**kwargs)
self._filters = filters
self._temporal_kernel_size = temporal_kernel_size
self._spatial_strides = spatial_strides
self._temporal_strides = temporal_strides
self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
self._use_self_gating = use_self_gating
self._se_ratio = se_ratio
self._use_sync_bn = use_sync_bn
self._activation = activation
self._kernel_initializer = kernel_initializer
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation_fn = tf_utils.get_activation(activation)
def build(self, input_shape):
self._shortcut_maxpool = tf.keras.layers.MaxPool3D(
pool_size=[1, 1, 1],
strides=[
self._temporal_strides, self._spatial_strides, self._spatial_strides
])
self._shortcut_conv = tf.keras.layers.Conv3D(
filters=4 * self._filters,
kernel_size=1,
strides=[
self._temporal_strides, self._spatial_strides, self._spatial_strides
],
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm0 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn)
self._temporal_conv = tf.keras.layers.Conv3D(
filters=self._filters,
kernel_size=[self._temporal_kernel_size, 1, 1],
strides=[self._temporal_strides, 1, 1],
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm1 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn)
self._spatial_conv = tf.keras.layers.Conv3D(
filters=self._filters,
kernel_size=[1, 3, 3],
strides=[1, self._spatial_strides, self._spatial_strides],
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm2 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn)
self._expand_conv = tf.keras.layers.Conv3D(
filters=4 * self._filters,
kernel_size=[1, 1, 1],
strides=[1, 1, 1],
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm3 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn)
if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1:
self._squeeze_excitation = nn_layers.SqueezeExcitation(
in_filters=self._filters * 4,
out_filters=self._filters * 4,
se_ratio=self._se_ratio,
use_3d_input=True,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
else:
self._squeeze_excitation = None
if self._stochastic_depth_drop_rate:
self._stochastic_depth = nn_layers.StochasticDepth(
self._stochastic_depth_drop_rate)
else:
self._stochastic_depth = None
if self._use_self_gating:
self._self_gating = SelfGating(filters=4 * self._filters)
else:
self._self_gating = None
super(BottleneckBlock3D, self).build(input_shape)
def get_config(self):
config = {
'filters': self._filters,
'temporal_kernel_size': self._temporal_kernel_size,
'temporal_strides': self._temporal_strides,
'spatial_strides': self._spatial_strides,
'use_self_gating': self._use_self_gating,
'se_ratio': self._se_ratio,
'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon
}
base_config = super(BottleneckBlock3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, training=None):
in_filters = inputs.shape.as_list()[-1]
if in_filters == 4 * self._filters:
if self._temporal_strides == 1 and self._spatial_strides == 1:
shortcut = inputs
else:
shortcut = self._shortcut_maxpool(inputs)
else:
shortcut = self._shortcut_conv(inputs)
shortcut = self._norm0(shortcut)
x = self._temporal_conv(inputs)
x = self._norm1(x)
x = self._activation_fn(x)
x = self._spatial_conv(x)
x = self._norm2(x)
x = self._activation_fn(x)
x = self._expand_conv(x)
x = self._norm3(x)
# Apply self-gating, SE, stochastic depth.
if self._self_gating:
x = self._self_gating(x)
if self._squeeze_excitation:
x = self._squeeze_excitation(x)
if self._stochastic_depth:
x = self._stochastic_depth(x, training=training)
# Apply activation before additional modules.
x = self._activation_fn(x + shortcut)
return x
| 10,565 | 35.560554 | 80 | py |
models | models-master/official/vision/modeling/layers/mask_sampler.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of mask sampler."""
# Import libraries
import tensorflow as tf
from official.vision.ops import spatial_transform_ops
def _sample_and_crop_foreground_masks(candidate_rois: tf.Tensor,
candidate_gt_boxes: tf.Tensor,
candidate_gt_classes: tf.Tensor,
candidate_gt_indices: tf.Tensor,
gt_masks: tf.Tensor,
num_sampled_masks: int = 128,
mask_target_size: int = 28):
"""Samples and creates cropped foreground masks for training.
Args:
candidate_rois: A `tf.Tensor` of shape of [batch_size, N, 4], where N is the
number of candidate RoIs to be considered for mask sampling. It includes
both positive and negative RoIs. The `num_mask_samples_per_image` positive
RoIs will be sampled to create mask training targets.
candidate_gt_boxes: A `tf.Tensor` of shape of [batch_size, N, 4], storing
the corresponding groundtruth boxes to the `candidate_rois`.
candidate_gt_classes: A `tf.Tensor` of shape of [batch_size, N], storing the
corresponding groundtruth classes to the `candidate_rois`. 0 in the tensor
corresponds to the background class, i.e. negative RoIs.
candidate_gt_indices: A `tf.Tensor` of shape [batch_size, N], storing the
corresponding groundtruth instance indices to the `candidate_gt_boxes`,
i.e. gt_boxes[candidate_gt_indices[:, i]] = candidate_gt_boxes[:, i] and
gt_boxes which is of shape [batch_size, MAX_INSTANCES, 4], M >= N, is
the superset of candidate_gt_boxes.
gt_masks: A `tf.Tensor` of [batch_size, MAX_INSTANCES, mask_height,
mask_width] containing all the groundtruth masks which sample masks are
drawn from.
num_sampled_masks: An `int` that specifies the number of masks to sample.
mask_target_size: An `int` that specifies the final cropped mask size after
sampling. The output masks are resized w.r.t the sampled RoIs.
Returns:
foreground_rois: A `tf.Tensor` of shape of [batch_size, K, 4] storing the
RoI that corresponds to the sampled foreground masks, where
K = num_mask_samples_per_image.
foreground_classes: A `tf.Tensor` of shape of [batch_size, K] storing the
classes corresponding to the sampled foreground masks.
cropoped_foreground_masks: A `tf.Tensor` of shape of
[batch_size, K, mask_target_size, mask_target_size] storing the cropped
foreground masks used for training.
"""
_, fg_instance_indices = tf.nn.top_k(
tf.cast(tf.greater(candidate_gt_classes, 0), dtype=tf.int32),
k=num_sampled_masks)
fg_instance_indices_shape = tf.shape(fg_instance_indices)
batch_indices = (
tf.expand_dims(tf.range(fg_instance_indices_shape[0]), axis=-1) *
tf.ones([1, fg_instance_indices_shape[-1]], dtype=tf.int32))
gather_nd_instance_indices = tf.stack(
[batch_indices, fg_instance_indices], axis=-1)
foreground_rois = tf.gather_nd(
candidate_rois, gather_nd_instance_indices)
foreground_boxes = tf.gather_nd(
candidate_gt_boxes, gather_nd_instance_indices)
foreground_classes = tf.gather_nd(
candidate_gt_classes, gather_nd_instance_indices)
foreground_gt_indices = tf.gather_nd(
candidate_gt_indices, gather_nd_instance_indices)
foreground_gt_indices = tf.where(
tf.equal(foreground_gt_indices, -1),
tf.zeros_like(foreground_gt_indices),
foreground_gt_indices)
foreground_gt_indices_shape = tf.shape(foreground_gt_indices)
batch_indices = (
tf.expand_dims(tf.range(foreground_gt_indices_shape[0]), axis=-1) *
tf.ones([1, foreground_gt_indices_shape[-1]], dtype=tf.int32))
gather_nd_gt_indices = tf.stack(
[batch_indices, foreground_gt_indices], axis=-1)
foreground_masks = tf.gather_nd(gt_masks, gather_nd_gt_indices)
cropped_foreground_masks = spatial_transform_ops.crop_mask_in_target_box(
foreground_masks, foreground_boxes, foreground_rois, mask_target_size,
sample_offset=0.5)
return foreground_rois, foreground_classes, cropped_foreground_masks
@tf.keras.utils.register_keras_serializable(package='Vision')
class MaskSampler(tf.keras.layers.Layer):
"""Samples and creates mask training targets."""
def __init__(self, mask_target_size: int, num_sampled_masks: int, **kwargs):
self._config_dict = {
'mask_target_size': mask_target_size,
'num_sampled_masks': num_sampled_masks,
}
super(MaskSampler, self).__init__(**kwargs)
def call(self, candidate_rois: tf.Tensor, candidate_gt_boxes: tf.Tensor,
candidate_gt_classes: tf.Tensor, candidate_gt_indices: tf.Tensor,
gt_masks: tf.Tensor):
"""Samples and creates mask targets for training.
Args:
candidate_rois: A `tf.Tensor` of shape of [batch_size, N, 4], where N is
the number of candidate RoIs to be considered for mask sampling. It
includes both positive and negative RoIs. The
`num_mask_samples_per_image` positive RoIs will be sampled to create
mask training targets.
candidate_gt_boxes: A `tf.Tensor` of shape of [batch_size, N, 4], storing
the corresponding groundtruth boxes to the `candidate_rois`.
candidate_gt_classes: A `tf.Tensor` of shape of [batch_size, N], storing
the corresponding groundtruth classes to the `candidate_rois`. 0 in the
tensor corresponds to the background class, i.e. negative RoIs.
candidate_gt_indices: A `tf.Tensor` of shape [batch_size, N], storing the
corresponding groundtruth instance indices to the `candidate_gt_boxes`,
i.e. gt_boxes[candidate_gt_indices[:, i]] = candidate_gt_boxes[:, i],
where gt_boxes which is of shape [batch_size, MAX_INSTANCES, 4], M >=
N, is the superset of candidate_gt_boxes.
gt_masks: A `tf.Tensor` of [batch_size, MAX_INSTANCES, mask_height,
mask_width] containing all the groundtruth masks which sample masks are
drawn from. after sampling. The output masks are resized w.r.t the
sampled RoIs.
Returns:
foreground_rois: A `tf.Tensor` of shape of [batch_size, K, 4] storing the
RoI that corresponds to the sampled foreground masks, where
K = num_mask_samples_per_image.
foreground_classes: A `tf.Tensor` of shape of [batch_size, K] storing the
classes corresponding to the sampled foreground masks.
cropoped_foreground_masks: A `tf.Tensor` of shape of
[batch_size, K, mask_target_size, mask_target_size] storing the
cropped foreground masks used for training.
"""
foreground_rois, foreground_classes, cropped_foreground_masks = (
_sample_and_crop_foreground_masks(
candidate_rois,
candidate_gt_boxes,
candidate_gt_classes,
candidate_gt_indices,
gt_masks,
self._config_dict['num_sampled_masks'],
self._config_dict['mask_target_size']))
return foreground_rois, foreground_classes, cropped_foreground_masks
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
| 7,914 | 46.39521 | 80 | py |
models | models-master/official/vision/modeling/layers/edgetpu.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EdgeTPU oriented layers and tools."""
from typing import List, Optional, Union, Iterable, Sequence
import numpy as np
import tensorflow as tf
_or = tf.maximum
_and = tf.minimum
_reduce_or = tf.reduce_max
def _tensor_sum_vectors(a, b):
a = tf.tile(tf.reshape(a, [1, -1, 1, a.shape[-1]]), [1, 1, a.shape[-1], 1])
b = tf.tile(tf.reshape(b, [1, -1, a.shape[-1], 1]), [1, 1, 1, a.shape[-1]])
return a + b
def _tensor_product_iou(boxes):
"""Computes pairwise IOU.
Reason to use 4-D tensors is to follow TPU compiler preference.
Args:
boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`.
Returns:
A 4-D float `Tensor` of shape `[1, 1, num_boxes, num_boxes]` containing
pairwise IOU.
"""
boxes_size = boxes.shape[-2]
# Code below will do frequent operands broadcasting.
# TPU compiler has (empirically) less issues broadcasting if
# - batch (first) dimension is 1. (Special consideration sharding)
# - there are 4 dimensions. (Standard traversal mapping)
# - last dimension is not 1. (Structure alignment)
tpu_friendly_shape = [1, -1, 1, boxes_size]
bottom, left, top, right = (
tf.reshape(side, tpu_friendly_shape)
for side in tf.split(boxes, 4, -1))
height, width = top - bottom, right - left
area = height * width
area_sum = _tensor_sum_vectors(area, area)
bottom_pad, left_pad, top_pad, right_pad = (
tf.nn.relu(_tensor_sum_vectors(x, -x))
for x in (-bottom, -left, top, right))
height_pad, width_pad = bottom_pad + top_pad, left_pad + right_pad
intersection = tf.nn.relu(height - height_pad) * tf.nn.relu(width - width_pad)
union = area_sum - intersection
iou = tf.math.divide(intersection, union + _same(union))
return iou
def _greater(x):
"""Avoid non lowerable layers in boolean comparison.
Logical operation results in tensor of boolean type. However in serving such
a tensors cannot be cast to values because of NNAPI specs.
`tf.where` operation result in `select` instruction lowering, which not runs
well on all generations of edge-tpus.
Args:
x: any numeric tensor.
Returns:
tf.where(x > tf.zero_like(x), tf.one_like(x), tf.zero_like(x))
"""
x_clip = tf.minimum(tf.nn.relu(x), tf.constant(1, dtype=x.dtype))
return -tf.math.floor(-x_clip)
def _same(x):
"""Avoid non lowerable layers in boolean equality.
Logical operation results in tensor of boolean type. However in serving such
a tensors cannot be cast to values because of NNAPI specs.
`tf.where` operation result in `select` instruction lowering, which not runs
well on all generations of edge-tpus.
Args:
x: any numeric tensor.
Returns:
tf.where(x == tf.zero_like(x), tf.one_like(x), tf.zero_like(x))
"""
x_clip = tf.minimum(tf.abs(x), tf.constant(1, dtype=x.dtype))
return tf.constant(1, dtype=x.dtype) + tf.math.floor(-x_clip)
def shard_tensors(
axis: int, block_size: int, tensors: 'Sequence[tf.Tensor]'
) -> Union[List[Sequence[tf.Tensor]], 'Iterable[Sequence[tf.Tensor]]']:
"""Consistently splits multiple tensors sharding-style.
Args:
axis: axis to be used to split tensors
block_size: block size to split tensors.
tensors: list of tensors.
Returns:
List of shards, each shard has exactly one peace of each input tesnor.
Raises:
ValueError: if input tensors has different size of sharded dimension.
"""
if not all(tensor.shape.is_fully_defined() for tensor in tensors):
return [tensors]
for validate_axis in range(axis + 1):
consistent_length: int = tensors[0].shape[validate_axis]
for tensor in tensors:
if tensor.shape[validate_axis] != consistent_length:
raise ValueError('Inconsistent shapes in shard_tensors: first is '
f'{tensors[0].shape} and other is {tensor.shape}')
batch_size: int = tensors[0].shape[axis]
if block_size >= batch_size:
return [tensors]
else:
blocks = batch_size // block_size
remainder = batch_size % block_size
if remainder:
tensor_parts = []
for tensor in tensors:
shape: tf.TensorShape = tensor.shape
body: tf.Tensor = tf.slice(tensor, [0] * len(shape), [
size if i != axis else blocks * block_size
for i, size in enumerate(shape)
])
tail: tf.Tensor = tf.slice(tensor, [
0 if i != axis else (blocks * block_size)
for i, _ in enumerate(shape)
], [
size if i != axis else (size - blocks * block_size)
for i, size in enumerate(shape)
])
tensor_parts.append(tf.split(body, blocks, axis) + [tail])
return zip(*tensor_parts)
else:
return zip(*[tf.split(tensor, blocks, axis) for tensor in tensors])
# TODO(b/258007436): Number is based on existing compiler limitations while
# running bf16 NMS on edgetpu. Remove manual sharing when compiler issue will be
# fixed.
_RECOMMENDED_NMS_MEMORY = 360000
def non_max_suppression_padded(boxes: tf.Tensor,
scores: tf.Tensor,
output_size: int,
iou_threshold: float = 0.5,
refinements: int = 0) -> tf.Tensor:
"""Selects a subset of boxes which have highest score among IOU-similar boxes.
Prunes away boxes that have high intersection-over-union (IOU) overlap
with boxes having higher score. Boxes are supplied as `[y1, x1, y2, x2]`,
where `(y1, x1)` and `(y2, x2)` are the coordinates of any diagonal pair of
box corners. Note that this algorithm is agnostic to the coordinate system.
Thus translating or reflections of the coordinate system result in the same
boxes being selected by the algorithm. The output of this operation is a
set of integers indexing into the input collection of bounding boxes
representing the selected boxes.
Set will be returned padded on the right with `-1` values. The bounding
box coordinates corresponding to the selected indices can then be obtained
using the `tf.gather` operation. For example:
```python
selected_indices = vision.modeling.layers.non_max_suppression_padded(
boxes, scores, max_output_size, iou_threshold)
selected_boxes = tf.gather(boxes, selected_indices)
```
See following documetation for implementation details.
third_party/tensorflow_models/official/projects/edgetpu/vision/modeling/g3doc/non_max_suppression.md
Args:
boxes: A 2-D+ float `Tensor` of shape `[...batch_dims, num_boxes, 4]`.
scores: A 1-D+ float `Tensor` of shape `[...batch_dims, num_boxes]`
representing a single score corresponding to each box (each row of boxes).
output_size: A scalar integer `Tensor` representing the maximum number of
boxes to be selected by non-max suppression.
iou_threshold: A float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
refinements: A number of extra refinement steps to make result closer to
original sequential NMS.
Returns:
A 1-D+ integer `Tensor` of shape `[...batch_dims, output_size]` representing
the selected indices from the boxes tensor and `-1` values for the padding.
"""
if not boxes.shape.is_fully_defined():
return _non_max_suppression_as_is(boxes, scores, output_size, iou_threshold,
refinements)
# Does partitioning job to help compiler converge with memory.
batch_shape = boxes.shape[:-2]
batch_size = np.prod(batch_shape, dtype=np.int32)
boxes_size, struct_size = boxes.shape[-2:]
boxes = tf.reshape(boxes, [batch_size, boxes_size, struct_size])
scores = tf.reshape(scores, [batch_size, boxes_size])
block = max(1, _RECOMMENDED_NMS_MEMORY // (boxes_size * boxes_size))
indices = []
for boxes_i, scores_i in shard_tensors(0, block, (boxes, scores)):
indices.append(
_non_max_suppression_as_is(boxes_i, scores_i, output_size,
iou_threshold, refinements))
indices = tf.concat(indices, axis=0)
return tf.reshape(indices, batch_shape + [output_size])
def _refine_nms_graph_to_original_algorithm(better: tf.Tensor) -> tf.Tensor:
"""Refines the relationship graph, bringing it closer to the iterative NMS.
See `test_refinement_sample` unit tests for example, also comments in body of
the algorithm, for the intuition.
Args:
better: is a tensor with zeros and ones so that [batch dims ..., box_1,
box_2] represents the [adjacency
matrix](https://en.wikipedia.org/wiki/Adjacency_matrix) for the
[relation](https://en.wikipedia.org/wiki/Relation_(mathematics)) `better`
between boxes box_1 and box_2.
Returns:
Modification of tensor encoding adjacency matrix of `better` relation.
"""
one = tf.constant(1, dtype=better.dtype)
# good_box: is a tensor with zeros and ones so that
# [batch dims ..., box_i] represents belonging of a box_i to the `good`
# subset. `good` subset is defined as exactly those boxes that do not have any
# `better` boxes.
# INTUITION: In terms of oriented graph , this is subset of nodes nobody
# points to as "I'm better than you". These nodes will never be suppressed in
# the original NMS algorithm.
good_box = one - _reduce_or(better, axis=-1)
# good_better: is a tensor with zeros and ones so that
# [batch dims ..., box_1, box_2] represents the adjacency matrix for the
# `good_better` relation on all boxes set. `good_better` relation is defined
# as relation between good box and boxes it is better than.
# INTUITION: In terms of oriented graph, this is subset of edges, which
# doesn't have any other inbound edges. These edges will represent
# suppression actions in the original NMS algorithm.
good_better = _and(tf.expand_dims(good_box, axis=-2), better)
# not_bad_box: is a tensor with zeros and ones so that
# [batch dims ..., box_i] represents belonging of a box_i to the `not_bad`
# subset. `not_bad` subset is defined as boxes all that and only those that
# does not have any `good_better` boxes.
# INTUITION: These nodes are nodes which are not suppressed by `good` boxes
# in the original NMS algorithm.
not_bad_box = one - _reduce_or(good_better, axis=-1)
# return: is a tensor with zeros and ones so that
# [batch dims ..., box_1, box_2] represents the adjacency matrix for the
# `better` relation on all boxes set which is closer to represent suppression
# procedure in original NMS algorithm.
return _and(tf.expand_dims(not_bad_box, axis=-2), better)
def _non_max_suppression_as_is(boxes: tf.Tensor,
scores: tf.Tensor,
output_size: int,
iou_threshold: float = 0.5,
refinements: int = 0) -> tf.Tensor:
"""Selects a subset of boxes which have highest score among IOU-similar boxes.
Args:
boxes: A 2-D+ float `Tensor` of shape `[...batch_dims, num_boxes, 4]`.
scores: A 1-D+ float `Tensor` of shape `[...batch_dims, num_boxes]`
representing a single score corresponding to each box (each row of boxes).
output_size: A scalar integer `Tensor` representing the maximum number of
boxes to be selected by non-max suppression.
iou_threshold: A 0-D float tensor representing the threshold for deciding
whether boxes overlap too much with respect to IOU.
refinements: A number of extra refinement steps to make result closer to
original sequencial NMS.
Returns:
A 1-D+ integer `Tensor` of shape `[...batch_dims, output_size]` representing
the selected indices from the boxes tensor and `-1` values for the padding.
"""
boxes_size = boxes.shape[-2]
if boxes.shape[-1] != 4:
raise ValueError(f'Boxes shape ({boxes.shape}) last dimension must be 4 '
'to represent [y1, x1, y2, x2] boxes coordinates')
if scores.shape != boxes.shape[:-1]:
raise ValueError(f'Boxes shape ({boxes.shape}) and scores shape '
f'({scores.shape}) do not match.')
order = tf.constant(np.arange(boxes_size), dtype=scores.dtype)
relative_order = _tensor_sum_vectors(order, -order)
relative_scores = _tensor_sum_vectors(scores, -scores)
similar = tf.cast(
_greater(
_tensor_product_iou(boxes) -
tf.constant(iou_threshold, dtype=boxes.dtype)), scores.dtype)
worse = _greater(relative_scores)
same_later = _and(_same(relative_scores), _greater(relative_order))
similar_worse_or_same_later = _and(similar, _or(worse, same_later))
for _ in range(refinements):
similar_worse_or_same_later = _refine_nms_graph_to_original_algorithm(
similar_worse_or_same_later)
prunable = _reduce_or(similar_worse_or_same_later, axis=-1)
remaining = tf.constant(1, dtype=prunable.dtype) - prunable
if scores.shape[0] is None:
# Prefer the most of tesnor shape defined, so that error messages are clear.
remaining = tf.reshape(remaining, [tf.shape(scores)[0], *scores.shape[1:]])
else:
remaining = tf.reshape(remaining, scores.shape)
# top_k runs on TPU cores, let it happen, TPU tiles implementation is slower.
top_k = tf.math.top_k(scores * remaining, output_size)
valid = _greater(top_k.values)
return (tf.cast(top_k.indices, top_k.values.dtype) * valid + valid -
tf.constant(1, dtype=top_k.values.dtype))
def concat_and_top_k(
top_k: int, scores_pair: 'tuple[Optional[tf.Tensor], tf.Tensor]',
*other_pairs: 'tuple[Optional[tf.Tensor], tf.Tensor]'
) -> 'tuple[tf.Tensor, ...]':
"""Combines shards of top_k operation, when sharded along filtered dimension.
General idea is that sometimes top_k dimension is very large, while top_k is
moderately low. (Keep in mind sample of 15K pre-top_k dimension and 150 top_k)
In that case it is possible to break top_k input into groups significantly
larger than top_k and significatly lower than pre-top_l (Keep in mind 1500).
We do top_k over first 1500 elements, than join 150 remaining with new 1500
elements (1750 in total), repeat top_k. This function provides repeatedly used
method which will concat and top_k in that case.
For example with top_k = 2 and scores_pair = ([10, 6], [9, 8, 7]), output
scores will be [10, 9].
Other pairs are filtered using indexes generated from scores. This is a preaty
common case of filtering structure by its score.
For example with one extra pair of box per score:
top_k = 2
scores_pair = ([10, 6],
[9, 8, 7])
other_pairs = [([[0, 0, 10, 10], [0, 0, 6, 6]],
[[1, 1, 9, 9], [1, 1, 8, 8], [1, 1, 7, 7]])]
Output is:
([10, 9], [[0, 0, 10, 10], [1, 1, 9, 9]])
See also 'test_top_k_sharded_fusion' unit test with end to end example.
Args:
top_k: is top_k argument of sharded tf.math.top_k.
scores_pair: Tuple (<previous shards combination>, <additional shard>)
scores to be aggregated using top_k.
*other_pairs: Tuples (<previous shards combination>, <additional shard>)
other values to be aggregated using indexes of top_k scores.
Returns:
Tuple of scores based top_k aggregations with additional shards.
"""
scores, scores_shard = scores_pair
if other_pairs:
others, others_shard = zip(*other_pairs)
else:
others = others_shard = []
# Same as tf.rank, but avoiding tensor form for graph mode execution.
top_k_dim: int = len(scores_shard.shape) - 1
if scores is None:
# First shard becomes aggregation
scores = scores_shard
others = others_shard
else:
# Merge shard into aggregation
scores = tf.concat([scores, scores_shard], top_k_dim)
others = [
tf.concat([other, other_shard], top_k_dim)
for other, other_shard in zip(others, others_shard)
]
# When shards are uneven some will be smaller than requested top_k
if scores.shape[top_k_dim] > top_k:
scores, indices = tf.nn.top_k(scores, top_k)
others = [
tf.gather(other, indices, axis=top_k_dim, batch_dims=top_k_dim)
for other in others
]
return scores, *others
| 16,727 | 41.673469 | 102 | py |
models | models-master/official/vision/modeling/layers/nn_blocks_3d_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for resnet."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.vision.modeling.layers import nn_blocks_3d
class NNBlocksTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(nn_blocks_3d.BottleneckBlock3D, 1, 1, 2, True, 0.2, 0.1),
(nn_blocks_3d.BottleneckBlock3D, 3, 2, 1, False, 0.0, 0.0),
)
def test_bottleneck_block_creation(self, block_fn, temporal_kernel_size,
temporal_strides, spatial_strides,
use_self_gating, se_ratio,
stochastic_depth):
temporal_size = 16
spatial_size = 128
filters = 256
inputs = tf.keras.Input(
shape=(temporal_size, spatial_size, spatial_size, filters * 4),
batch_size=1)
block = block_fn(
filters=filters,
temporal_kernel_size=temporal_kernel_size,
temporal_strides=temporal_strides,
spatial_strides=spatial_strides,
use_self_gating=use_self_gating,
se_ratio=se_ratio,
stochastic_depth_drop_rate=stochastic_depth)
features = block(inputs)
self.assertAllEqual([
1, temporal_size // temporal_strides, spatial_size // spatial_strides,
spatial_size // spatial_strides, filters * 4
], features.shape.as_list())
if __name__ == '__main__':
tf.test.main()
| 2,028 | 33.389831 | 78 | py |
models | models-master/official/vision/modeling/layers/roi_generator.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of ROI generator."""
from typing import Optional, Mapping
# Import libraries
import tensorflow as tf
from official.vision.ops import box_ops
from official.vision.ops import nms
def _multilevel_propose_rois(raw_boxes: Mapping[str, tf.Tensor],
raw_scores: Mapping[str, tf.Tensor],
anchor_boxes: Mapping[str, tf.Tensor],
image_shape: tf.Tensor,
pre_nms_top_k: int = 2000,
pre_nms_score_threshold: float = 0.0,
pre_nms_min_size_threshold: float = 0.0,
nms_iou_threshold: float = 0.7,
num_proposals: int = 1000,
use_batched_nms: bool = False,
decode_boxes: bool = True,
clip_boxes: bool = True,
apply_sigmoid_to_score: bool = True):
"""Proposes RoIs given a group of candidates from different FPN levels.
The following describes the steps:
1. For each individual level:
a. Apply sigmoid transform if specified.
b. Decode boxes if specified.
c. Clip boxes if specified.
d. Filter small boxes and those fall outside image if specified.
e. Apply pre-NMS filtering including pre-NMS top k and score thresholding.
f. Apply NMS.
2. Aggregate post-NMS boxes from each level.
3. Apply an overall top k to generate the final selected RoIs.
Args:
raw_boxes: A `dict` with keys representing FPN levels and values
representing box tenors of shape
[batch_size, feature_h, feature_w, num_anchors * 4].
raw_scores: A `dict` with keys representing FPN levels and values
representing logit tensors of shape
[batch_size, feature_h, feature_w, num_anchors].
anchor_boxes: A `dict` with keys representing FPN levels and values
representing anchor box tensors of shape
[batch_size, feature_h * feature_w * num_anchors, 4].
image_shape: A `tf.Tensor` of shape [batch_size, 2] where the last dimension
are [height, width] of the scaled image.
pre_nms_top_k: An `int` of top scoring RPN proposals *per level* to keep
before applying NMS. Default: 2000.
pre_nms_score_threshold: A `float` between 0 and 1 representing the minimal
box score to keep before applying NMS. This is often used as a
pre-filtering step for better performance. Default: 0, no filtering is
applied.
pre_nms_min_size_threshold: A `float` representing the minimal box size in
each side (w.r.t. the scaled image) to keep before applying NMS. This is
often used as a pre-filtering step for better performance. Default: 0, no
filtering is applied.
nms_iou_threshold: A `float` between 0 and 1 representing the IoU threshold
used for NMS. If 0.0, no NMS is applied. Default: 0.7.
num_proposals: An `int` of top scoring RPN proposals *in total* to keep
after applying NMS. Default: 1000.
use_batched_nms: A `bool` indicating whether NMS is applied in batch using
`tf.image.combined_non_max_suppression`. Currently only available in
CPU/GPU. Default is False.
decode_boxes: A `bool` indicating whether `raw_boxes` needs to be decoded
using `anchor_boxes`. If False, use `raw_boxes` directly and ignore
`anchor_boxes`. Default is True.
clip_boxes: A `bool` indicating whether boxes are first clipped to the
scaled image size before appliying NMS. If False, no clipping is applied
and `image_shape` is ignored. Default is True.
apply_sigmoid_to_score: A `bool` indicating whether apply sigmoid to
`raw_scores` before applying NMS. Default is True.
Returns:
selected_rois: A `tf.Tensor` of shape [batch_size, num_proposals, 4],
representing the box coordinates of the selected proposals w.r.t. the
scaled image.
selected_roi_scores: A `tf.Tensor` of shape [batch_size, num_proposals, 1],
representing the scores of the selected proposals.
"""
with tf.name_scope('multilevel_propose_rois'):
rois = []
roi_scores = []
image_shape = tf.expand_dims(image_shape, axis=1)
for level in sorted(raw_scores.keys()):
with tf.name_scope('level_%s' % level):
_, feature_h, feature_w, num_anchors_per_location = (
raw_scores[level].get_shape().as_list())
num_boxes = feature_h * feature_w * num_anchors_per_location
this_level_scores = tf.reshape(raw_scores[level], [-1, num_boxes])
this_level_boxes = tf.reshape(raw_boxes[level], [-1, num_boxes, 4])
this_level_anchors = tf.cast(
tf.reshape(anchor_boxes[level], [-1, num_boxes, 4]),
dtype=this_level_scores.dtype)
if apply_sigmoid_to_score:
this_level_scores = tf.sigmoid(this_level_scores)
if decode_boxes:
this_level_boxes = box_ops.decode_boxes(
this_level_boxes, this_level_anchors)
if clip_boxes:
this_level_boxes = box_ops.clip_boxes(
this_level_boxes, image_shape)
if pre_nms_min_size_threshold > 0.0:
this_level_boxes, this_level_scores = box_ops.filter_boxes(
this_level_boxes,
this_level_scores,
image_shape,
pre_nms_min_size_threshold)
this_level_pre_nms_top_k = min(num_boxes, pre_nms_top_k)
this_level_post_nms_top_k = min(num_boxes, num_proposals)
if nms_iou_threshold > 0.0:
if use_batched_nms:
this_level_rois, this_level_roi_scores, _, _ = (
tf.image.combined_non_max_suppression(
tf.expand_dims(this_level_boxes, axis=2),
tf.expand_dims(this_level_scores, axis=-1),
max_output_size_per_class=this_level_pre_nms_top_k,
max_total_size=this_level_post_nms_top_k,
iou_threshold=nms_iou_threshold,
score_threshold=pre_nms_score_threshold,
pad_per_class=False,
clip_boxes=False))
else:
if pre_nms_score_threshold > 0.0:
this_level_boxes, this_level_scores = (
box_ops.filter_boxes_by_scores(
this_level_boxes,
this_level_scores,
pre_nms_score_threshold))
this_level_boxes, this_level_scores = box_ops.top_k_boxes(
this_level_boxes, this_level_scores, k=this_level_pre_nms_top_k)
this_level_roi_scores, this_level_rois = (
nms.sorted_non_max_suppression_padded(
this_level_scores,
this_level_boxes,
max_output_size=this_level_post_nms_top_k,
iou_threshold=nms_iou_threshold))
else:
this_level_rois, this_level_roi_scores = box_ops.top_k_boxes(
this_level_boxes,
this_level_scores,
k=this_level_post_nms_top_k)
rois.append(this_level_rois)
roi_scores.append(this_level_roi_scores)
all_rois = tf.concat(rois, axis=1)
all_roi_scores = tf.concat(roi_scores, axis=1)
with tf.name_scope('top_k_rois'):
_, num_valid_rois = all_roi_scores.get_shape().as_list()
overall_top_k = min(num_valid_rois, num_proposals)
selected_rois, selected_roi_scores = box_ops.top_k_boxes(
all_rois, all_roi_scores, k=overall_top_k)
return selected_rois, selected_roi_scores
@tf.keras.utils.register_keras_serializable(package='Vision')
class MultilevelROIGenerator(tf.keras.layers.Layer):
"""Proposes RoIs for the second stage processing."""
def __init__(self,
pre_nms_top_k: int = 2000,
pre_nms_score_threshold: float = 0.0,
pre_nms_min_size_threshold: float = 0.0,
nms_iou_threshold: float = 0.7,
num_proposals: int = 1000,
test_pre_nms_top_k: int = 1000,
test_pre_nms_score_threshold: float = 0.0,
test_pre_nms_min_size_threshold: float = 0.0,
test_nms_iou_threshold: float = 0.7,
test_num_proposals: int = 1000,
use_batched_nms: bool = False,
**kwargs):
"""Initializes a ROI generator.
The ROI generator transforms the raw predictions from RPN to ROIs.
Args:
pre_nms_top_k: An `int` of the number of top scores proposals to be kept
before applying NMS.
pre_nms_score_threshold: A `float` of the score threshold to apply before
applying NMS. Proposals whose scores are below this threshold are
thrown away.
pre_nms_min_size_threshold: A `float` of the threshold of each side of the
box (w.r.t. the scaled image). Proposals whose sides are below this
threshold are thrown away.
nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold.
num_proposals: An `int` of the final number of proposals to generate.
test_pre_nms_top_k: An `int` of the number of top scores proposals to be
kept before applying NMS in testing.
test_pre_nms_score_threshold: A `float` of the score threshold to apply
before applying NMS in testing. Proposals whose scores are below this
threshold are thrown away.
test_pre_nms_min_size_threshold: A `float` of the threshold of each side
of the box (w.r.t. the scaled image) in testing. Proposals whose sides
are below this threshold are thrown away.
test_nms_iou_threshold: A `float` in [0, 1] of the NMS IoU threshold in
testing.
test_num_proposals: An `int` of the final number of proposals to generate
in testing.
use_batched_nms: A `bool` of whether or not use
`tf.image.combined_non_max_suppression`.
**kwargs: Additional keyword arguments passed to Layer.
"""
self._config_dict = {
'pre_nms_top_k': pre_nms_top_k,
'pre_nms_score_threshold': pre_nms_score_threshold,
'pre_nms_min_size_threshold': pre_nms_min_size_threshold,
'nms_iou_threshold': nms_iou_threshold,
'num_proposals': num_proposals,
'test_pre_nms_top_k': test_pre_nms_top_k,
'test_pre_nms_score_threshold': test_pre_nms_score_threshold,
'test_pre_nms_min_size_threshold': test_pre_nms_min_size_threshold,
'test_nms_iou_threshold': test_nms_iou_threshold,
'test_num_proposals': test_num_proposals,
'use_batched_nms': use_batched_nms,
}
super(MultilevelROIGenerator, self).__init__(**kwargs)
def call(self,
raw_boxes: Mapping[str, tf.Tensor],
raw_scores: Mapping[str, tf.Tensor],
anchor_boxes: Mapping[str, tf.Tensor],
image_shape: tf.Tensor,
training: Optional[bool] = None):
"""Proposes RoIs given a group of candidates from different FPN levels.
The following describes the steps:
1. For each individual level:
a. Apply sigmoid transform if specified.
b. Decode boxes if specified.
c. Clip boxes if specified.
d. Filter small boxes and those fall outside image if specified.
e. Apply pre-NMS filtering including pre-NMS top k and score
thresholding.
f. Apply NMS.
2. Aggregate post-NMS boxes from each level.
3. Apply an overall top k to generate the final selected RoIs.
Args:
raw_boxes: A `dict` with keys representing FPN levels and values
representing box tenors of shape
[batch, feature_h, feature_w, num_anchors * 4].
raw_scores: A `dict` with keys representing FPN levels and values
representing logit tensors of shape
[batch, feature_h, feature_w, num_anchors].
anchor_boxes: A `dict` with keys representing FPN levels and values
representing anchor box tensors of shape
[batch, feature_h * feature_w * num_anchors, 4].
image_shape: A `tf.Tensor` of shape [batch, 2] where the last dimension
are [height, width] of the scaled image.
training: A `bool` that indicates whether it is in training mode.
Returns:
roi_boxes: A `tf.Tensor` of shape [batch, num_proposals, 4], the proposed
ROIs in the scaled image coordinate.
roi_scores: A `tf.Tensor` of shape [batch, num_proposals], scores of the
proposed ROIs.
"""
roi_boxes, roi_scores = _multilevel_propose_rois(
raw_boxes,
raw_scores,
anchor_boxes,
image_shape,
pre_nms_top_k=(
self._config_dict['pre_nms_top_k'] if training
else self._config_dict['test_pre_nms_top_k']),
pre_nms_score_threshold=(
self._config_dict['pre_nms_score_threshold'] if training
else self._config_dict['test_pre_nms_score_threshold']),
pre_nms_min_size_threshold=(
self._config_dict['pre_nms_min_size_threshold'] if training
else self._config_dict['test_pre_nms_min_size_threshold']),
nms_iou_threshold=(
self._config_dict['nms_iou_threshold'] if training
else self._config_dict['test_nms_iou_threshold']),
num_proposals=(
self._config_dict['num_proposals'] if training
else self._config_dict['test_num_proposals']),
use_batched_nms=self._config_dict['use_batched_nms'],
decode_boxes=True,
clip_boxes=True,
apply_sigmoid_to_score=True)
return roi_boxes, roi_scores
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 14,408 | 44.888535 | 80 | py |
models | models-master/official/vision/modeling/layers/nn_layers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains common building blocks for neural networks."""
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Union
from absl import logging
import tensorflow as tf
from official.modeling import tf_utils
from official.vision.ops import spatial_transform_ops
# Type annotations.
States = Dict[str, tf.Tensor]
Activation = Union[str, Callable]
def make_divisible(value: float,
divisor: int,
min_value: Optional[float] = None,
round_down_protect: bool = True,
) -> int:
"""This is to ensure that all layers have channels that are divisible by 8.
Args:
value: A `float` of original value.
divisor: An `int` of the divisor that need to be checked upon.
min_value: A `float` of minimum value threshold.
round_down_protect: A `bool` indicating whether round down more than 10%
will be allowed.
Returns:
The adjusted value in `int` that is divisible against divisor.
"""
if min_value is None:
min_value = divisor
new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if round_down_protect and new_value < 0.9 * value:
new_value += divisor
return int(new_value)
def round_filters(filters: int,
multiplier: float,
divisor: int = 8,
min_depth: Optional[int] = None,
round_down_protect: bool = True,
skip: bool = False) -> int:
"""Rounds number of filters based on width multiplier."""
orig_f = filters
if skip or not multiplier:
return filters
new_filters = make_divisible(value=filters * multiplier,
divisor=divisor,
min_value=min_depth,
round_down_protect=round_down_protect)
logging.info('round_filter input=%s output=%s', orig_f, new_filters)
return int(new_filters)
def get_padding_for_kernel_size(kernel_size):
"""Compute padding size given kernel size."""
if kernel_size == 7:
return (3, 3)
elif kernel_size == 3:
return (1, 1)
else:
raise ValueError('Padding for kernel size {} not known.'.format(
kernel_size))
@tf.keras.utils.register_keras_serializable(package='Vision')
class SqueezeExcitation(tf.keras.layers.Layer):
"""Creates a squeeze and excitation layer."""
def __init__(self,
in_filters,
out_filters,
se_ratio,
divisible_by=1,
use_3d_input=False,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
gating_activation='sigmoid',
round_down_protect=True,
**kwargs):
"""Initializes a squeeze and excitation layer.
Args:
in_filters: An `int` number of filters of the input tensor.
out_filters: An `int` number of filters of the output tensor.
se_ratio: A `float` or None. If not None, se ratio for the squeeze and
excitation layer.
divisible_by: An `int` that ensures all inner dimensions are divisible by
this number.
use_3d_input: A `bool` of whether input is 2D or 3D image.
kernel_initializer: A `str` of kernel_initializer for convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d.
Default to None.
activation: A `str` name of the activation function.
gating_activation: A `str` name of the activation function for final
gating function.
round_down_protect: A `bool` of whether round down more than 10% will be
allowed.
**kwargs: Additional keyword arguments to be passed.
"""
super(SqueezeExcitation, self).__init__(**kwargs)
self._in_filters = in_filters
self._out_filters = out_filters
self._se_ratio = se_ratio
self._divisible_by = divisible_by
self._round_down_protect = round_down_protect
self._use_3d_input = use_3d_input
self._activation = activation
self._gating_activation = gating_activation
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
if tf.keras.backend.image_data_format() == 'channels_last':
if not use_3d_input:
self._spatial_axis = [1, 2]
else:
self._spatial_axis = [1, 2, 3]
else:
if not use_3d_input:
self._spatial_axis = [2, 3]
else:
self._spatial_axis = [2, 3, 4]
self._activation_fn = tf_utils.get_activation(activation)
self._gating_activation_fn = tf_utils.get_activation(gating_activation)
def build(self, input_shape):
num_reduced_filters = make_divisible(
max(1, int(self._in_filters * self._se_ratio)),
divisor=self._divisible_by,
round_down_protect=self._round_down_protect)
self._se_reduce = tf.keras.layers.Conv2D(
filters=num_reduced_filters,
kernel_size=1,
strides=1,
padding='same',
use_bias=True,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._se_expand = tf.keras.layers.Conv2D(
filters=self._out_filters,
kernel_size=1,
strides=1,
padding='same',
use_bias=True,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
super(SqueezeExcitation, self).build(input_shape)
def get_config(self):
config = {
'in_filters': self._in_filters,
'out_filters': self._out_filters,
'se_ratio': self._se_ratio,
'divisible_by': self._divisible_by,
'use_3d_input': self._use_3d_input,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'gating_activation': self._gating_activation,
'round_down_protect': self._round_down_protect,
}
base_config = super(SqueezeExcitation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
x = tf.reduce_mean(inputs, self._spatial_axis, keepdims=True)
x = self._activation_fn(self._se_reduce(x))
x = self._gating_activation_fn(self._se_expand(x))
return x * inputs
def get_stochastic_depth_rate(init_rate, i, n):
"""Get drop connect rate for the ith block.
Args:
init_rate: A `float` of initial drop rate.
i: An `int` of order of the current block.
n: An `int` total number of blocks.
Returns:
Drop rate of the ith block.
"""
if init_rate is not None:
if init_rate < 0 or init_rate > 1:
raise ValueError('Initial drop rate must be within 0 and 1.')
rate = init_rate * float(i) / n
else:
rate = None
return rate
@tf.keras.utils.register_keras_serializable(package='Vision')
class StochasticDepth(tf.keras.layers.Layer):
"""Creates a stochastic depth layer."""
def __init__(self, stochastic_depth_drop_rate, **kwargs):
"""Initializes a stochastic depth layer.
Args:
stochastic_depth_drop_rate: A `float` of drop rate.
**kwargs: Additional keyword arguments to be passed.
Returns:
A output `tf.Tensor` of which should have the same shape as input.
"""
super(StochasticDepth, self).__init__(**kwargs)
self._drop_rate = stochastic_depth_drop_rate
def get_config(self):
config = {'stochastic_depth_drop_rate': self._drop_rate}
base_config = super(StochasticDepth, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, training=None):
if training is None:
training = tf.keras.backend.learning_phase()
if not training or self._drop_rate is None or self._drop_rate == 0:
return inputs
keep_prob = 1.0 - self._drop_rate
batch_size = tf.shape(inputs)[0]
random_tensor = keep_prob
random_tensor += tf.random.uniform(
[batch_size] + [1] * (inputs.shape.rank - 1), dtype=inputs.dtype)
binary_tensor = tf.floor(random_tensor)
output = tf.math.divide(inputs, keep_prob) * binary_tensor
return output
@tf.keras.utils.register_keras_serializable(package='Vision')
def pyramid_feature_fusion(inputs, target_level):
"""Fuses all feature maps in the feature pyramid at the target level.
Args:
inputs: A dictionary containing the feature pyramid. The size of the input
tensor needs to be fixed.
target_level: An `int` of the target feature level for feature fusion.
Returns:
A `float` `tf.Tensor` of shape [batch_size, feature_height, feature_width,
feature_channel].
"""
# Convert keys to int.
pyramid_feats = {int(k): v for k, v in inputs.items()}
min_level = min(pyramid_feats.keys())
max_level = max(pyramid_feats.keys())
resampled_feats = []
for l in range(min_level, max_level + 1):
if l == target_level:
resampled_feats.append(pyramid_feats[l])
else:
feat = pyramid_feats[l]
target_size = list(feat.shape[1:3])
target_size[0] *= 2**(l - target_level)
target_size[1] *= 2**(l - target_level)
# Casts feat to float32 so the resize op can be run on TPU.
feat = tf.cast(feat, tf.float32)
feat = tf.image.resize(
feat, size=target_size, method=tf.image.ResizeMethod.BILINEAR)
# Casts it back to be compatible with the rest opetations.
feat = tf.cast(feat, pyramid_feats[l].dtype)
resampled_feats.append(feat)
return tf.math.add_n(resampled_feats)
class PanopticFPNFusion(tf.keras.Model):
"""Creates a Panoptic FPN feature Fusion layer.
This implements feature fusion for semantic segmentation head from the paper:
Alexander Kirillov, Ross Girshick, Kaiming He and Piotr Dollar.
Panoptic Feature Pyramid Networks.
(https://arxiv.org/pdf/1901.02446.pdf)
"""
def __init__(
self,
min_level: int = 2,
max_level: int = 5,
target_level: int = 2,
num_filters: int = 128,
num_fpn_filters: int = 256,
activation: str = 'relu',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initializes panoptic FPN feature fusion layer.
Args:
min_level: An `int` of minimum level to use in feature fusion.
max_level: An `int` of maximum level to use in feature fusion.
target_level: An `int` of the target feature level for feature fusion.
num_filters: An `int` number of filters in conv2d layers.
num_fpn_filters: An `int` number of filters in the FPN outputs
activation: A `str` name of the activation function.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
**kwargs: Additional keyword arguments to be passed.
Returns:
A `float` `tf.Tensor` of shape [batch_size, feature_height, feature_width,
feature_channel].
"""
if target_level > max_level:
raise ValueError('target_level should be less than max_level')
self._config_dict = {
'min_level': min_level,
'max_level': max_level,
'target_level': target_level,
'num_filters': num_filters,
'num_fpn_filters': num_fpn_filters,
'activation': activation,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
}
norm = tf.keras.layers.GroupNormalization
conv2d = tf.keras.layers.Conv2D
activation_fn = tf_utils.get_activation(activation)
if tf.keras.backend.image_data_format() == 'channels_last':
norm_axis = -1
else:
norm_axis = 1
inputs = self._build_inputs(num_fpn_filters, min_level, max_level)
upscaled_features = []
for level in range(min_level, max_level + 1):
num_conv_layers = max(1, level - target_level)
x = inputs[str(level)]
for i in range(num_conv_layers):
x = conv2d(
filters=num_filters,
kernel_size=3,
padding='same',
kernel_initializer=tf.keras.initializers.VarianceScaling(),
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)(x)
x = norm(groups=32, axis=norm_axis)(x)
x = activation_fn(x)
if level != target_level:
x = spatial_transform_ops.nearest_upsampling(x, scale=2)
upscaled_features.append(x)
fused_features = tf.math.add_n(upscaled_features)
self._output_specs = {str(target_level): fused_features.get_shape()}
super(PanopticFPNFusion, self).__init__(
inputs=inputs, outputs=fused_features, **kwargs)
def _build_inputs(self, num_filters: int,
min_level: int, max_level: int):
inputs = {}
for level in range(min_level, max_level + 1):
inputs[str(level)] = tf.keras.Input(shape=[None, None, num_filters])
return inputs
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self) -> Mapping[str, tf.TensorShape]:
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@tf.keras.utils.register_keras_serializable(package='Vision')
class Scale(tf.keras.layers.Layer):
"""Scales the input by a trainable scalar weight.
This is useful for applying ReZero to layers, which improves convergence
speed. This implements the paper:
ReZero is All You Need: Fast Convergence at Large Depth.
(https://arxiv.org/pdf/2003.04887.pdf).
"""
def __init__(
self,
initializer: tf.keras.initializers.Initializer = 'ones',
regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initializes a scale layer.
Args:
initializer: A `str` of initializer for the scalar weight.
regularizer: A `tf.keras.regularizers.Regularizer` for the scalar weight.
**kwargs: Additional keyword arguments to be passed to this layer.
Returns:
An `tf.Tensor` of which should have the same shape as input.
"""
super(Scale, self).__init__(**kwargs)
self._initializer = initializer
self._regularizer = regularizer
self._scale = self.add_weight(
name='scale',
shape=[],
dtype=self.dtype,
initializer=self._initializer,
regularizer=self._regularizer,
trainable=True)
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'initializer': self._initializer,
'regularizer': self._regularizer,
}
base_config = super(Scale, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
"""Calls the layer with the given inputs."""
scale = tf.cast(self._scale, inputs.dtype)
return scale * inputs
@tf.keras.utils.register_keras_serializable(package='Vision')
class TemporalSoftmaxPool(tf.keras.layers.Layer):
"""Creates a network layer corresponding to temporal softmax pooling.
This is useful for multi-class logits (used in e.g., Charades). Modified from
AssembleNet Charades evaluation from:
Michael S. Ryoo, AJ Piergiovanni, Mingxing Tan, Anelia Angelova.
AssembleNet: Searching for Multi-Stream Neural Connectivity in Video
Architectures.
(https://arxiv.org/pdf/1905.13209.pdf).
"""
def call(self, inputs):
"""Calls the layer with the given inputs."""
assert inputs.shape.rank in (3, 4, 5)
frames = tf.shape(inputs)[1]
pre_logits = inputs / tf.sqrt(tf.cast(frames, inputs.dtype))
activations = tf.nn.softmax(pre_logits, axis=1)
outputs = inputs * activations
return outputs
@tf.keras.utils.register_keras_serializable(package='Vision')
class PositionalEncoding(tf.keras.layers.Layer):
"""Creates a network layer that adds a sinusoidal positional encoding.
Positional encoding is incremented across frames, and is added to the input.
The positional encoding is first weighted at 0 so that the network can choose
to ignore it. This implements:
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones,
Aidan N. Gomez, Lukasz Kaiser, Illia Polosukhin.
Attention Is All You Need.
(https://arxiv.org/pdf/1706.03762.pdf).
"""
def __init__(self,
initializer: tf.keras.initializers.Initializer = 'zeros',
cache_encoding: bool = False,
state_prefix: Optional[str] = None,
**kwargs):
"""Initializes positional encoding.
Args:
initializer: A `str` of initializer for weighting the positional encoding.
cache_encoding: A `bool`. If True, cache the positional encoding tensor
after calling build. Otherwise, rebuild the tensor for every call.
Setting this to False can be useful when we want to input a variable
number of frames, so the positional encoding tensor can change shape.
state_prefix: a prefix string to identify states.
**kwargs: Additional keyword arguments to be passed to this layer.
Returns:
A `tf.Tensor` of which should have the same shape as input.
"""
super(PositionalEncoding, self).__init__(**kwargs)
self._initializer = initializer
self._cache_encoding = cache_encoding
self._pos_encoding = None
self._rezero = Scale(initializer=initializer, name='rezero')
state_prefix = state_prefix if state_prefix is not None else ''
self._state_prefix = state_prefix
self._frame_count_name = f'{state_prefix}_pos_enc_frame_count'
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'initializer': self._initializer,
'cache_encoding': self._cache_encoding,
'state_prefix': self._state_prefix,
}
base_config = super(PositionalEncoding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _positional_encoding(self,
num_positions: Union[int, tf.Tensor],
hidden_size: Union[int, tf.Tensor],
start_position: Union[int, tf.Tensor] = 0,
dtype: str = 'float32') -> tf.Tensor:
"""Creates a sequence of sinusoidal positional encoding vectors.
Args:
num_positions: the total number of positions (frames).
hidden_size: the number of channels used for the hidden vectors.
start_position: the start position.
dtype: the dtype of the output tensor.
Returns:
The positional encoding tensor with shape [num_positions, hidden_size].
"""
if isinstance(start_position, tf.Tensor) and start_position.shape.rank == 1:
start_position = start_position[0]
# Calling `tf.range` with `dtype=tf.bfloat16` results in an error,
# so we cast afterward.
positions = tf.range(start_position, start_position + num_positions)
positions = tf.cast(positions, dtype)[:, tf.newaxis]
idx = tf.range(hidden_size)[tf.newaxis, :]
power = tf.cast(2 * (idx // 2), dtype)
power /= tf.cast(hidden_size, dtype)
angles = 1. / tf.math.pow(10_000., power)
radians = positions * angles
sin = tf.math.sin(radians[:, 0::2])
cos = tf.math.cos(radians[:, 1::2])
pos_encoding = tf.concat([sin, cos], axis=-1)
return pos_encoding
def _get_pos_encoding(self,
input_shape: tf.Tensor,
frame_count: int = 0) -> tf.Tensor:
"""Calculates the positional encoding from the input shape.
Args:
input_shape: the shape of the input.
frame_count: a count of frames that indicates the index of the first
frame.
Returns:
The positional encoding tensor with shape [num_positions, hidden_size].
"""
frames = input_shape[1]
channels = input_shape[-1]
pos_encoding = self._positional_encoding(
frames, channels, start_position=frame_count, dtype=self.dtype)
pos_encoding = tf.reshape(pos_encoding, [1, frames, 1, 1, channels])
return pos_encoding
def build(self, input_shape):
"""Builds the layer with the given input shape.
Args:
input_shape: The input shape.
Raises:
ValueError: If using 'channels_first' data format.
"""
if tf.keras.backend.image_data_format() == 'channels_first':
raise ValueError('"channels_first" mode is unsupported.')
if self._cache_encoding:
self._pos_encoding = self._get_pos_encoding(input_shape)
super(PositionalEncoding, self).build(input_shape)
def call(
self,
inputs: tf.Tensor,
states: Optional[States] = None,
output_states: bool = True,
) -> Union[tf.Tensor, Tuple[tf.Tensor, States]]:
"""Calls the layer with the given inputs.
Args:
inputs: An input `tf.Tensor`.
states: A `dict` of states such that, if any of the keys match for this
layer, will overwrite the contents of the buffer(s). Expected keys
include `state_prefix + '_pos_enc_frame_count'`.
output_states: A `bool`. If True, returns the output tensor and output
states. Returns just the output tensor otherwise.
Returns:
An output `tf.Tensor` (and optionally the states if `output_states=True`).
Raises:
ValueError: If using 'channels_first' data format.
"""
states = dict(states) if states is not None else {}
# Keep a count of frames encountered across input iterations in
# num_frames to be able to accurately update the positional encoding.
num_frames = tf.shape(inputs)[1]
frame_count = tf.cast(states.get(self._frame_count_name, [0]), tf.int32)
states[self._frame_count_name] = frame_count + num_frames
if self._cache_encoding:
pos_encoding = self._pos_encoding
else:
pos_encoding = self._get_pos_encoding(
tf.shape(inputs), frame_count=frame_count)
pos_encoding = tf.cast(pos_encoding, inputs.dtype)
pos_encoding = self._rezero(pos_encoding)
outputs = inputs + pos_encoding
return (outputs, states) if output_states else outputs
@tf.keras.utils.register_keras_serializable(package='Vision')
class GlobalAveragePool3D(tf.keras.layers.Layer):
"""Creates a global average pooling layer with causal mode.
Implements causal mode, which runs a cumulative sum (with `tf.cumsum`) across
frames in the time dimension, allowing the use of a stream buffer. Sums any
valid input state with the current input to allow state to accumulate over
several iterations.
"""
def __init__(self,
keepdims: bool = False,
causal: bool = False,
state_prefix: Optional[str] = None,
**kwargs):
"""Initializes a global average pool layer.
Args:
keepdims: A `bool`. If True, keep the averaged dimensions.
causal: A `bool` of whether to run in causal mode with a cumulative sum
across frames.
state_prefix: a prefix string to identify states.
**kwargs: Additional keyword arguments to be passed to this layer.
Returns:
An output `tf.Tensor`.
"""
super(GlobalAveragePool3D, self).__init__(**kwargs)
self._keepdims = keepdims
self._causal = causal
state_prefix = state_prefix if state_prefix is not None else ''
self._state_prefix = state_prefix
self._state_name = f'{state_prefix}_pool_buffer'
self._frame_count_name = f'{state_prefix}_pool_frame_count'
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'keepdims': self._keepdims,
'causal': self._causal,
'state_prefix': self._state_prefix,
}
base_config = super(GlobalAveragePool3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self,
inputs: tf.Tensor,
states: Optional[States] = None,
output_states: bool = False
) -> Union[tf.Tensor, Tuple[tf.Tensor, States]]:
"""Calls the layer with the given inputs.
Args:
inputs: An input `tf.Tensor`.
states: A `dict` of states such that, if any of the keys match for this
layer, will overwrite the contents of the buffer(s).
Expected keys include `state_prefix + '__pool_buffer'` and
`state_prefix + '__pool_frame_count'`.
output_states: A `bool`. If True, returns the output tensor and output
states. Returns just the output tensor otherwise.
Returns:
An output `tf.Tensor` (and optionally the states if `output_states=True`).
If `causal=True`, the output tensor will have shape
`[batch_size, num_frames, 1, 1, channels]` if `keepdims=True`. We keep
the frame dimension in this case to simulate a cumulative global average
as if we are inputting one frame at a time. If `causal=False`, the output
is equivalent to `tf.keras.layers.GlobalAveragePooling3D` with shape
`[batch_size, 1, 1, 1, channels]` if `keepdims=True` (plus the optional
buffer stored in `states`).
Raises:
ValueError: If using 'channels_first' data format.
"""
states = dict(states) if states is not None else {}
if tf.keras.backend.image_data_format() == 'channels_first':
raise ValueError('"channels_first" mode is unsupported.')
# Shape: [batch_size, 1, 1, 1, channels]
buffer = states.get(self._state_name, None)
if buffer is None:
buffer = tf.zeros_like(inputs[:, :1, :1, :1], dtype=inputs.dtype)
states[self._state_name] = buffer
# Keep a count of frames encountered across input iterations in
# num_frames to be able to accurately take a cumulative average across
# all frames when running in streaming mode
num_frames = tf.shape(inputs)[1]
frame_count = states.get(self._frame_count_name, tf.constant([0]))
frame_count = tf.cast(frame_count, tf.int32)
states[self._frame_count_name] = frame_count + num_frames
if self._causal:
# Take a mean of spatial dimensions to make computation more efficient.
x = tf.reduce_mean(inputs, axis=[2, 3], keepdims=True)
x = tf.cumsum(x, axis=1)
x = x + buffer
# The last frame will be the value of the next state
# Shape: [batch_size, 1, 1, 1, channels]
states[self._state_name] = x[:, -1:]
# In causal mode, the divisor increments by 1 for every frame to
# calculate cumulative averages instead of one global average
mean_divisors = tf.range(num_frames) + frame_count + 1
mean_divisors = tf.reshape(mean_divisors, [1, num_frames, 1, 1, 1])
mean_divisors = tf.cast(mean_divisors, x.dtype)
# Shape: [batch_size, num_frames, 1, 1, channels]
x = x / mean_divisors
else:
# In non-causal mode, we (optionally) sum across frames to take a
# cumulative average across input iterations rather than individual
# frames. If no buffer state is passed, this essentially becomes
# regular global average pooling.
# Shape: [batch_size, 1, 1, 1, channels]
x = tf.reduce_sum(inputs, axis=(1, 2, 3), keepdims=True)
x = x / tf.cast(tf.shape(inputs)[2] * tf.shape(inputs)[3], x.dtype)
x = x + buffer
# Shape: [batch_size, 1, 1, 1, channels]
states[self._state_name] = x
x = x / tf.cast(frame_count + num_frames, x.dtype)
if not self._keepdims:
x = tf.squeeze(x, axis=(1, 2, 3))
return (x, states) if output_states else x
@tf.keras.utils.register_keras_serializable(package='Vision')
class SpatialAveragePool3D(tf.keras.layers.Layer):
"""Creates a global average pooling layer pooling across spatial dimentions."""
def __init__(self, keepdims: bool = False, **kwargs):
"""Initializes a global average pool layer.
Args:
keepdims: A `bool`. If True, keep the averaged dimensions.
**kwargs: Additional keyword arguments to be passed to this layer.
Returns:
An output `tf.Tensor`.
"""
super(SpatialAveragePool3D, self).__init__(**kwargs)
self._keepdims = keepdims
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'keepdims': self._keepdims,
}
base_config = super(SpatialAveragePool3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
"""Builds the layer with the given input shape."""
if tf.keras.backend.image_data_format() == 'channels_first':
raise ValueError('"channels_first" mode is unsupported.')
super(SpatialAveragePool3D, self).build(input_shape)
def call(self, inputs, states=None, output_states: bool = False):
"""Calls the layer with the given inputs."""
if inputs.shape.rank != 5:
raise ValueError(
'Input should have rank {}, got {}'.format(5, inputs.shape.rank))
output = tf.reduce_mean(inputs, axis=(2, 3), keepdims=self._keepdims)
return (output, states) if output_states else output
class CausalConvMixin:
"""Mixin class to implement CausalConv for `tf.keras.layers.Conv` layers."""
@property
def use_buffered_input(self) -> bool:
return self._use_buffered_input
@use_buffered_input.setter
def use_buffered_input(self, variable: bool):
self._use_buffered_input = variable
def _compute_buffered_causal_padding(self,
inputs: tf.Tensor,
use_buffered_input: bool = False,
time_axis: int = 1,
) -> List[List[int]]:
"""Calculates padding for 'causal' option for conv layers.
Args:
inputs: An optional input `tf.Tensor` to be padded.
use_buffered_input: A `bool`. If True, use 'valid' padding along the time
dimension. This should be set when applying the stream buffer.
time_axis: An `int` of the axis of the time dimension.
Returns:
A list of paddings for `tf.pad`.
"""
input_shape = tf.shape(inputs)[1:-1]
if tf.keras.backend.image_data_format() == 'channels_first':
raise ValueError('"channels_first" mode is unsupported.')
kernel_size_effective = [
(self.kernel_size[i] +
(self.kernel_size[i] - 1) * (self.dilation_rate[i] - 1))
for i in range(self.rank)
]
pad_total = [kernel_size_effective[0] - 1]
for i in range(1, self.rank):
overlap = (input_shape[i] - 1) % self.strides[i] + 1
pad_total.append(tf.maximum(kernel_size_effective[i] - overlap, 0))
pad_beg = [pad_total[i] // 2 for i in range(self.rank)]
pad_end = [pad_total[i] - pad_beg[i] for i in range(self.rank)]
padding = [[pad_beg[i], pad_end[i]] for i in range(self.rank)]
padding = [[0, 0]] + padding + [[0, 0]]
if use_buffered_input:
padding[time_axis] = [0, 0]
else:
padding[time_axis] = [padding[time_axis][0] + padding[time_axis][1], 0]
return padding
def _causal_validate_init(self):
"""Validates the Conv layer initial configuration."""
# Overriding this method is meant to circumvent unnecessary errors when
# using causal padding.
if (self.filters is not None
and self.filters % self.groups != 0):
raise ValueError(
'The number of filters must be evenly divisible by the number of '
'groups. Received: groups={}, filters={}'.format(
self.groups, self.filters))
if not all(self.kernel_size):
raise ValueError('The argument `kernel_size` cannot contain 0(s). '
'Received: %s' % (self.kernel_size,))
def _buffered_spatial_output_shape(self, spatial_output_shape: List[int]):
"""Computes the spatial output shape from the input shape."""
# When buffer padding, use 'valid' padding across time. The output shape
# across time should be the input shape minus any padding, assuming
# the stride across time is 1.
if self._use_buffered_input and spatial_output_shape[0] is not None:
padding = self._compute_buffered_causal_padding(
tf.zeros([1] + spatial_output_shape + [1]), use_buffered_input=False)
spatial_output_shape[0] -= sum(padding[1])
return spatial_output_shape
@tf.keras.utils.register_keras_serializable(package='Vision')
class Conv2D(tf.keras.layers.Conv2D, CausalConvMixin):
"""Conv2D layer supporting CausalConv.
Supports `padding='causal'` option (like in `tf.keras.layers.Conv1D`),
which applies causal padding to the temporal dimension, and same padding in
the spatial dimensions.
"""
def __init__(self, *args, use_buffered_input=False, **kwargs):
"""Initializes conv2d.
Args:
*args: Arguments to be passed.
use_buffered_input: A `bool`. If True, the input is expected to be padded
beforehand. In effect, calling this layer will use 'valid' padding on
the temporal dimension to simulate 'causal' padding.
**kwargs: Additional keyword arguments to be passed.
Returns:
An output `tf.Tensor` of the Conv2D operation.
"""
super(Conv2D, self).__init__(*args, **kwargs)
self._use_buffered_input = use_buffered_input
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'use_buffered_input': self._use_buffered_input,
}
base_config = super(Conv2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _compute_causal_padding(self, inputs):
"""Computes causal padding dimensions for the given inputs."""
return self._compute_buffered_causal_padding(
inputs, use_buffered_input=self._use_buffered_input)
def _validate_init(self):
"""Validates the Conv layer initial configuration."""
self._causal_validate_init()
def _spatial_output_shape(self, spatial_input_shape: List[int]):
"""Computes the spatial output shape from the input shape."""
shape = super(Conv2D, self)._spatial_output_shape(spatial_input_shape)
return self._buffered_spatial_output_shape(shape)
@tf.keras.utils.register_keras_serializable(package='Vision')
class DepthwiseConv2D(tf.keras.layers.DepthwiseConv2D, CausalConvMixin):
"""DepthwiseConv2D layer supporting CausalConv.
Supports `padding='causal'` option (like in `tf.keras.layers.Conv1D`),
which applies causal padding to the temporal dimension, and same padding in
the spatial dimensions.
"""
def __init__(self, *args, use_buffered_input=False, **kwargs):
"""Initializes depthwise conv2d.
Args:
*args: Arguments to be passed.
use_buffered_input: A `bool`. If True, the input is expected to be padded
beforehand. In effect, calling this layer will use 'valid' padding on
the temporal dimension to simulate 'causal' padding.
**kwargs: Additional keyword arguments to be passed.
Returns:
An output `tf.Tensor` of the DepthwiseConv2D operation.
"""
super(DepthwiseConv2D, self).__init__(*args, **kwargs)
self._use_buffered_input = use_buffered_input
# Causal padding is unsupported by default for DepthwiseConv2D,
# so we resort to valid padding internally. However, we handle
# causal padding as a special case with `self._is_causal`, which is
# defined by the super class.
if self.padding == 'causal':
self.padding = 'valid'
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'use_buffered_input': self._use_buffered_input,
}
base_config = super(DepthwiseConv2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
"""Calls the layer with the given inputs."""
if self._is_causal:
inputs = tf.pad(inputs, self._compute_causal_padding(inputs))
return super(DepthwiseConv2D, self).call(inputs)
def _compute_causal_padding(self, inputs):
"""Computes causal padding dimensions for the given inputs."""
return self._compute_buffered_causal_padding(
inputs, use_buffered_input=self._use_buffered_input)
def _validate_init(self):
"""Validates the Conv layer initial configuration."""
self._causal_validate_init()
def _spatial_output_shape(self, spatial_input_shape: List[int]):
"""Computes the spatial output shape from the input shape."""
shape = super(DepthwiseConv2D, self)._spatial_output_shape(
spatial_input_shape)
return self._buffered_spatial_output_shape(shape)
@tf.keras.utils.register_keras_serializable(package='Vision')
class Conv3D(tf.keras.layers.Conv3D, CausalConvMixin):
"""Conv3D layer supporting CausalConv.
Supports `padding='causal'` option (like in `tf.keras.layers.Conv1D`),
which applies causal padding to the temporal dimension, and same padding in
the spatial dimensions.
"""
def __init__(self, *args, use_buffered_input=False, **kwargs):
"""Initializes conv3d.
Args:
*args: Arguments to be passed.
use_buffered_input: A `bool`. If True, the input is expected to be padded
beforehand. In effect, calling this layer will use 'valid' padding on
the temporal dimension to simulate 'causal' padding.
**kwargs: Additional keyword arguments to be passed.
Returns:
An output `tf.Tensor` of the Conv3D operation.
"""
super(Conv3D, self).__init__(*args, **kwargs)
self._use_buffered_input = use_buffered_input
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'use_buffered_input': self._use_buffered_input,
}
base_config = super(Conv3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
"""Call the layer with the given inputs."""
# Note: tf.nn.conv3d with depthwise kernels on CPU is currently only
# supported when compiling with TF graph (XLA) using tf.function, so it
# is compiled by default here (b/186463870).
conv_fn = tf.function(super(Conv3D, self).call, jit_compile=True)
return conv_fn(inputs)
def _compute_causal_padding(self, inputs):
"""Computes causal padding dimensions for the given inputs."""
return self._compute_buffered_causal_padding(
inputs, use_buffered_input=self._use_buffered_input)
def _validate_init(self):
"""Validates the Conv layer initial configuration."""
self._causal_validate_init()
def _spatial_output_shape(self, spatial_input_shape: List[int]):
"""Computes the spatial output shape from the input shape."""
shape = super(Conv3D, self)._spatial_output_shape(spatial_input_shape)
return self._buffered_spatial_output_shape(shape)
@tf.keras.utils.register_keras_serializable(package='Vision')
class SpatialPyramidPooling(tf.keras.layers.Layer):
"""Implements the Atrous Spatial Pyramid Pooling.
References:
[Rethinking Atrous Convolution for Semantic Image Segmentation](
https://arxiv.org/pdf/1706.05587.pdf)
[Encoder-Decoder with Atrous Separable Convolution for Semantic Image
Segmentation](https://arxiv.org/pdf/1802.02611.pdf)
"""
def __init__(
self,
output_channels: int,
dilation_rates: List[int],
pool_kernel_size: Optional[List[int]] = None,
use_sync_bn: bool = False,
batchnorm_momentum: float = 0.99,
batchnorm_epsilon: float = 0.001,
activation: str = 'relu',
dropout: float = 0.5,
kernel_initializer: str = 'GlorotUniform',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
interpolation: str = 'bilinear',
use_depthwise_convolution: bool = False,
**kwargs):
"""Initializes `SpatialPyramidPooling`.
Args:
output_channels: Number of channels produced by SpatialPyramidPooling.
dilation_rates: A list of integers for parallel dilated conv.
pool_kernel_size: A list of integers or None. If None, global average
pooling is applied, otherwise an average pooling of pool_kernel_size is
applied.
use_sync_bn: A bool, whether or not to use sync batch normalization.
batchnorm_momentum: A float for the momentum in BatchNorm. Defaults to
0.99.
batchnorm_epsilon: A float for the epsilon value in BatchNorm. Defaults to
0.001.
activation: A `str` for type of activation to be used. Defaults to 'relu'.
dropout: A float for the dropout rate before output. Defaults to 0.5.
kernel_initializer: Kernel initializer for conv layers. Defaults to
`glorot_uniform`.
kernel_regularizer: Kernel regularizer for conv layers. Defaults to None.
interpolation: The interpolation method for upsampling. Defaults to
`bilinear`.
use_depthwise_convolution: Allows spatial pooling to be separable
depthwise convolusions. [Encoder-Decoder with Atrous Separable
Convolution for Semantic Image Segmentation](
https://arxiv.org/pdf/1802.02611.pdf)
**kwargs: Other keyword arguments for the layer.
"""
super().__init__(**kwargs)
self._output_channels = output_channels
self._dilation_rates = dilation_rates
self._use_sync_bn = use_sync_bn
self._batchnorm_momentum = batchnorm_momentum
self._batchnorm_epsilon = batchnorm_epsilon
self._activation = activation
self._dropout = dropout
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._interpolation = interpolation
self._pool_kernel_size = pool_kernel_size
self._use_depthwise_convolution = use_depthwise_convolution
self._activation_fn = tf_utils.get_activation(activation)
self._bn_op = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
def build(self, input_shape):
height = input_shape[1]
width = input_shape[2]
channels = input_shape[3]
self.aspp_layers = []
conv1 = tf.keras.layers.Conv2D(
filters=self._output_channels,
kernel_size=(1, 1),
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
use_bias=False)
norm1 = self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon,
synchronized=self._use_sync_bn)
self.aspp_layers.append([conv1, norm1])
for dilation_rate in self._dilation_rates:
leading_layers = []
kernel_size = (3, 3)
if self._use_depthwise_convolution:
leading_layers += [
tf.keras.layers.DepthwiseConv2D(
depth_multiplier=1,
kernel_size=kernel_size,
padding='same',
depthwise_regularizer=self._kernel_regularizer,
depthwise_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
dilation_rate=dilation_rate,
use_bias=False)
]
kernel_size = (1, 1)
conv_dilation = leading_layers + [
tf.keras.layers.Conv2D(
filters=self._output_channels,
kernel_size=kernel_size,
padding='same',
kernel_regularizer=self._kernel_regularizer,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
dilation_rate=dilation_rate,
use_bias=False)
]
norm_dilation = self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon,
synchronized=self._use_sync_bn)
self.aspp_layers.append(conv_dilation + [norm_dilation])
if self._pool_kernel_size is None:
pooling = [
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Reshape((1, 1, channels))
]
else:
pooling = [tf.keras.layers.AveragePooling2D(self._pool_kernel_size)]
conv2 = tf.keras.layers.Conv2D(
filters=self._output_channels,
kernel_size=(1, 1),
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
use_bias=False)
norm2 = self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon,
synchronized=self._use_sync_bn)
self.aspp_layers.append(pooling + [conv2, norm2])
self._resizing_layer = tf.keras.layers.Resizing(
height, width, interpolation=self._interpolation, dtype=tf.float32)
self._projection = [
tf.keras.layers.Conv2D(
filters=self._output_channels,
kernel_size=(1, 1),
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
use_bias=False),
self._bn_op(
axis=self._bn_axis,
momentum=self._batchnorm_momentum,
epsilon=self._batchnorm_epsilon,
synchronized=self._use_sync_bn)
]
self._dropout_layer = tf.keras.layers.Dropout(rate=self._dropout)
self._concat_layer = tf.keras.layers.Concatenate(axis=-1)
def call(self,
inputs: tf.Tensor,
training: Optional[bool] = None) -> tf.Tensor:
if training is None:
training = tf.keras.backend.learning_phase()
result = []
for i, layers in enumerate(self.aspp_layers):
x = inputs
for layer in layers:
# Apply layers sequentially.
x = layer(x, training=training)
x = self._activation_fn(x)
# Apply resize layer to the end of the last set of layers.
if i == len(self.aspp_layers) - 1:
x = self._resizing_layer(x)
result.append(tf.cast(x, inputs.dtype))
x = self._concat_layer(result)
for layer in self._projection:
x = layer(x, training=training)
x = self._activation_fn(x)
return self._dropout_layer(x)
def get_config(self):
config = {
'output_channels': self._output_channels,
'dilation_rates': self._dilation_rates,
'pool_kernel_size': self._pool_kernel_size,
'use_sync_bn': self._use_sync_bn,
'batchnorm_momentum': self._batchnorm_momentum,
'batchnorm_epsilon': self._batchnorm_epsilon,
'activation': self._activation,
'dropout': self._dropout,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'interpolation': self._interpolation,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf.keras.utils.register_keras_serializable(package='Vision')
class MultiHeadAttention(tf.keras.layers.MultiHeadAttention):
"""MultiHeadAttention layer.
This is an implementation of multi-headed attention as described in the paper
"Attention is all you Need" (Vaswani et al., 2017).
"""
def __init__(
self,
*args,
partition_dims: Optional[Tuple[int, int, int, int]] = None,
max_inference_parallelism: Optional[int] = None,
**kwargs,
):
"""Initializes MultiHeadAttention.
Args:
*args: Positional arguments passed to super().__init__.
partition_dims: Spatial partition dimensions.
max_inference_parallelism: The number of examples to run in parallel
during inference. Set this limit to reduce the peak memory usage. If
None, use vectorized operations to run the whole batch in parallel.
**kwargs: Keyword arguments passed to super().__init__.
"""
super().__init__(*args, **kwargs)
self._partition_dims = partition_dims
self._max_inference_parallelism = max_inference_parallelism
def get_config(self):
config = super().get_config()
config.update({
'partition_dims': self._partition_dims,
'max_inference_parallelism': self._max_inference_parallelism,
})
return config
def _compute_attention(
self,
query: tf.Tensor,
key: tf.Tensor,
value: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
training: Optional[bool] = None,
):
"""Applies dot-product attention with query, key, value tensors.
Args:
query: Projected query `Tensor` of shape `(B, T, N, key_dim)`.
key: Projected key `Tensor` of shape `(B, S, N, key_dim)`.
value: Projected value `Tensor` of shape `(B, S, N, value_dim)`.
attention_mask: a boolean mask of shape `(B, T, S)`, that prevents
attention to certain positions. It is generally not needed if the
`query` and `value` (and/or `key`) are masked.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Returns:
attention_output: Multi-headed outputs of attention computation.
attention_scores: Multi-headed attention weights.
"""
if self._partition_dims is not None:
strategy = tf.distribute.get_strategy()
# `query` = [B, T, N ,H]
query = strategy.experimental_split_to_logical_devices(
query, self._partition_dims)
key = strategy.experimental_split_to_logical_devices(
key, self._partition_dims)
value = strategy.experimental_split_to_logical_devices(
value, self._partition_dims)
batch_size = query.get_shape().as_list()[0] # None if dynamic.
if (
training
or self._max_inference_parallelism is None
or self._max_inference_parallelism <= 0
or (
# If the whole batch is allowed to be run in parallel, use fully
# vectorized computation instead of tf.map_fn to make things more
# efficient.
batch_size is not None
and batch_size <= self._max_inference_parallelism
)
):
return self._compute_attention_delegate(
query, key, value, attention_mask, training
)
else:
# Sequentialize the inference execution with limited parallelism.
def _compute_fn(x):
attention_output, attention_scores = self._compute_attention_delegate(
query=x[0][tf.newaxis, ...],
key=x[1][tf.newaxis, ...],
value=x[2][tf.newaxis, ...],
attention_mask=x[3][tf.newaxis, ...] if len(x) >= 4 else None,
training=training,
)
attention_output = tf.squeeze(attention_output, axis=0)
attention_scores = tf.squeeze(attention_scores, axis=0)
return attention_output, attention_scores
if attention_mask is not None:
elems = [query, key, value, attention_mask]
else:
elems = [query, key, value]
return tf.map_fn(
fn=_compute_fn,
elems=elems,
fn_output_signature=(value.dtype, value.dtype),
parallel_iterations=self._max_inference_parallelism,
)
def _compute_attention_delegate(
self,
query: tf.Tensor,
key: tf.Tensor,
value: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
training: Optional[bool] = None,
):
"""Implements dot-product attention with query, key, value tensors."""
# Simply calls the implementation of the super class here, while the users
# can override this function for customizing attention computation.
return super()._compute_attention(
query, key, value, attention_mask, training
)
| 52,972 | 36.489738 | 81 | py |
models | models-master/official/vision/modeling/layers/detection_generator_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for detection_generator.py."""
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.vision.modeling.layers import detection_generator
from official.vision.ops import anchor
class SelectTopKScoresTest(tf.test.TestCase):
def testSelectTopKScores(self):
pre_nms_num_boxes = 2
scores_data = [[[0.2, 0.2], [0.1, 0.9], [0.5, 0.1], [0.3, 0.5]]]
scores_in = tf.constant(scores_data, dtype=tf.float32)
top_k_scores, top_k_indices = detection_generator._select_top_k_scores(
scores_in, pre_nms_num_detections=pre_nms_num_boxes)
expected_top_k_scores = np.array([[[0.5, 0.9], [0.3, 0.5]]],
dtype=np.float32)
expected_top_k_indices = [[[2, 1], [3, 3]]]
self.assertAllEqual(top_k_scores.numpy(), expected_top_k_scores)
self.assertAllEqual(top_k_indices.numpy(), expected_top_k_indices)
class DetectionGeneratorTest(
parameterized.TestCase, tf.test.TestCase):
@parameterized.product(
nms_version=['batched', 'v1', 'v2'],
use_cpu_nms=[True, False],
soft_nms_sigma=[None, 0.1],
use_sigmoid_probability=[True, False])
def testDetectionsOutputShape(self, nms_version, use_cpu_nms, soft_nms_sigma,
use_sigmoid_probability):
max_num_detections = 10
num_classes = 4
pre_nms_top_k = 5000
pre_nms_score_threshold = 0.01
batch_size = 1
kwargs = {
'apply_nms': True,
'pre_nms_top_k': pre_nms_top_k,
'pre_nms_score_threshold': pre_nms_score_threshold,
'nms_iou_threshold': 0.5,
'max_num_detections': max_num_detections,
'nms_version': nms_version,
'use_cpu_nms': use_cpu_nms,
'soft_nms_sigma': soft_nms_sigma,
'use_sigmoid_probability': use_sigmoid_probability,
}
generator = detection_generator.DetectionGenerator(**kwargs)
cls_outputs_all = (
np.random.rand(84, num_classes) - 0.5) * 3 # random 84x3 outputs.
box_outputs_all = np.random.rand(84, 4 * num_classes) # random 84 boxes.
anchor_boxes_all = np.random.rand(84, 4) # random 84 boxes.
class_outputs = tf.reshape(
tf.convert_to_tensor(cls_outputs_all, dtype=tf.float32),
[1, 84, num_classes])
box_outputs = tf.reshape(
tf.convert_to_tensor(box_outputs_all, dtype=tf.float32),
[1, 84, 4 * num_classes])
anchor_boxes = tf.reshape(
tf.convert_to_tensor(anchor_boxes_all, dtype=tf.float32),
[1, 84, 4])
image_info = tf.constant(
[[[1000, 1000], [100, 100], [0.1, 0.1], [0, 0]]],
dtype=tf.float32)
results = generator(
box_outputs, class_outputs, anchor_boxes, image_info[:, 1, :])
boxes = results['detection_boxes']
classes = results['detection_classes']
scores = results['detection_scores']
valid_detections = results['num_detections']
self.assertEqual(boxes.numpy().shape, (batch_size, max_num_detections, 4))
self.assertEqual(scores.numpy().shape, (batch_size, max_num_detections,))
self.assertEqual(classes.numpy().shape, (batch_size, max_num_detections,))
self.assertEqual(valid_detections.numpy().shape, (batch_size,))
def test_serialize_deserialize(self):
kwargs = {
'apply_nms': True,
'pre_nms_top_k': 1000,
'pre_nms_score_threshold': 0.1,
'nms_iou_threshold': 0.5,
'max_num_detections': 10,
'nms_version': 'v2',
'use_cpu_nms': False,
'soft_nms_sigma': None,
'use_sigmoid_probability': False,
}
generator = detection_generator.DetectionGenerator(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(generator.get_config(), expected_config)
new_generator = (
detection_generator.DetectionGenerator.from_config(
generator.get_config()))
self.assertAllEqual(generator.get_config(), new_generator.get_config())
class MultilevelDetectionGeneratorTest(
parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('batched', False, True, None, None, None),
('batched', False, False, None, None, None),
('v3', False, True, None, None, None),
('v3', False, False, None, None, None),
('v2', False, True, None, None, None),
('v2', False, False, None, None, None),
('v2', False, False, None, None, True),
('v1', True, True, 0.0, None, None),
('v1', True, False, 0.1, None, None),
('v1', True, False, None, None, None),
('tflite', False, False, None, True, None),
('tflite', False, False, None, False, None),
)
def testDetectionsOutputShape(
self,
nms_version,
has_att_heads,
use_cpu_nms,
soft_nms_sigma,
use_regular_nms,
use_class_agnostic_nms,
):
min_level = 4
max_level = 6
num_scales = 2
max_num_detections = 10
aspect_ratios = [1.0, 2.0]
anchor_scale = 2.0
output_size = [64, 64]
num_classes = 4
pre_nms_top_k = 5000
pre_nms_score_threshold = 0.01
batch_size = 1
tflite_post_processing_config = {
'max_detections': max_num_detections,
'max_classes_per_detection': 1,
'use_regular_nms': use_regular_nms,
'nms_score_threshold': 0.01,
'nms_iou_threshold': 0.5,
'input_image_size': [224, 224],
'normalize_anchor_coordinates': True,
}
kwargs = {
'apply_nms': True,
'pre_nms_top_k': pre_nms_top_k,
'pre_nms_score_threshold': pre_nms_score_threshold,
'nms_iou_threshold': 0.5,
'max_num_detections': max_num_detections,
'nms_version': nms_version,
'use_cpu_nms': use_cpu_nms,
'soft_nms_sigma': soft_nms_sigma,
'tflite_post_processing_config': tflite_post_processing_config,
'use_class_agnostic_nms': use_class_agnostic_nms,
}
input_anchor = anchor.build_anchor_generator(min_level, max_level,
num_scales, aspect_ratios,
anchor_scale)
anchor_boxes = input_anchor(output_size)
cls_outputs_all = (
np.random.rand(84, num_classes) - 0.5) * 3 # random 84x3 outputs.
box_outputs_all = np.random.rand(84, 4) # random 84 boxes.
class_outputs = {
'4':
tf.reshape(
tf.convert_to_tensor(cls_outputs_all[0:64], dtype=tf.float32),
[1, 8, 8, num_classes]),
'5':
tf.reshape(
tf.convert_to_tensor(cls_outputs_all[64:80], dtype=tf.float32),
[1, 4, 4, num_classes]),
'6':
tf.reshape(
tf.convert_to_tensor(cls_outputs_all[80:84], dtype=tf.float32),
[1, 2, 2, num_classes]),
}
box_outputs = {
'4': tf.reshape(tf.convert_to_tensor(
box_outputs_all[0:64], dtype=tf.float32), [1, 8, 8, 4]),
'5': tf.reshape(tf.convert_to_tensor(
box_outputs_all[64:80], dtype=tf.float32), [1, 4, 4, 4]),
'6': tf.reshape(tf.convert_to_tensor(
box_outputs_all[80:84], dtype=tf.float32), [1, 2, 2, 4]),
}
if has_att_heads:
att_outputs_all = np.random.rand(84, 1) # random attributes.
att_outputs = {
'depth': {
'4':
tf.reshape(
tf.convert_to_tensor(
att_outputs_all[0:64], dtype=tf.float32),
[1, 8, 8, 1]),
'5':
tf.reshape(
tf.convert_to_tensor(
att_outputs_all[64:80], dtype=tf.float32),
[1, 4, 4, 1]),
'6':
tf.reshape(
tf.convert_to_tensor(
att_outputs_all[80:84], dtype=tf.float32),
[1, 2, 2, 1]),
}
}
else:
att_outputs = None
image_info = tf.constant([[[1000, 1000], [100, 100], [0.1, 0.1], [0, 0]]],
dtype=tf.float32)
generator = detection_generator.MultilevelDetectionGenerator(**kwargs)
results = generator(box_outputs, class_outputs, anchor_boxes,
image_info[:, 1, :], att_outputs)
boxes = results['detection_boxes']
classes = results['detection_classes']
scores = results['detection_scores']
valid_detections = results['num_detections']
if nms_version == 'tflite':
# When nms_version is `tflite`, all output tensors are empty as the actual
# post-processing happens in the TFLite model.
self.assertEqual(boxes.numpy().shape, ())
self.assertEqual(scores.numpy().shape, ())
self.assertEqual(classes.numpy().shape, ())
self.assertEqual(valid_detections.numpy().shape, ())
else:
self.assertEqual(boxes.numpy().shape, (batch_size, max_num_detections, 4))
self.assertEqual(scores.numpy().shape, (
batch_size,
max_num_detections,
))
self.assertEqual(classes.numpy().shape, (
batch_size,
max_num_detections,
))
self.assertEqual(valid_detections.numpy().shape, (batch_size,))
if has_att_heads:
for att in results['detection_attributes'].values():
self.assertEqual(att.numpy().shape,
(batch_size, max_num_detections, 1))
def test_decode_multilevel_outputs_and_pre_nms_top_k(self):
named_params = {
'apply_nms': True,
'pre_nms_top_k': 5,
'pre_nms_score_threshold': 0.05,
'nms_iou_threshold': 0.5,
'max_num_detections': 2,
'nms_version': 'v3',
'use_cpu_nms': False,
'soft_nms_sigma': None,
}
generator = detection_generator.MultilevelDetectionGenerator(**named_params)
# 2 classes, 3 boxes per pixel, 2 levels '1': 2x2, '2':1x1
background = [1, 0, 0]
first = [0, 1, 0]
second = [0, 0, 1]
some = [0, 0.5, 0.5]
class_outputs = {
'1':
tf.constant([[[
first + background + first, first + background + second
], [second + background + first, second + background + second]]],
dtype=tf.float32),
'2':
tf.constant([[[background + some + background]]], dtype=tf.float32),
}
box_outputs = {
'1': tf.zeros(shape=[1, 2, 2, 12], dtype=tf.float32),
'2': tf.zeros(shape=[1, 1, 1, 12], dtype=tf.float32)
}
anchor_boxes = {
'1':
tf.random.uniform(
shape=[2, 2, 12], minval=1., maxval=99., dtype=tf.float32),
'2':
tf.random.uniform(
shape=[1, 1, 12], minval=1., maxval=99., dtype=tf.float32),
}
boxes, scores = generator._decode_multilevel_outputs_and_pre_nms_top_k(
box_outputs, class_outputs, anchor_boxes,
tf.constant([[100, 100]], dtype=tf.float32))
self.assertAllClose(
scores,
tf.sigmoid(
tf.constant([[[1, 1, 1, 1, 0.5], [1, 1, 1, 1, 0.5]]],
dtype=tf.float32)))
self.assertAllClose(
tf.squeeze(boxes),
tf.stack([
# Where the first is + some as last
tf.stack([
anchor_boxes['1'][0, 0, 0:4], anchor_boxes['1'][0, 0, 8:12],
anchor_boxes['1'][0, 1, 0:4], anchor_boxes['1'][1, 0, 8:12],
anchor_boxes['2'][0, 0, 4:8]
]),
# Where the second is + some as last
tf.stack([
anchor_boxes['1'][0, 1, 8:12], anchor_boxes['1'][1, 0, 0:4],
anchor_boxes['1'][1, 1, 0:4], anchor_boxes['1'][1, 1, 8:12],
anchor_boxes['2'][0, 0, 4:8]
]),
]))
def test_serialize_deserialize(self):
tflite_post_processing_config = {
'max_detections': 100,
'max_classes_per_detection': 1,
'use_regular_nms': True,
'nms_score_threshold': 0.01,
'nms_iou_threshold': 0.5,
'input_image_size': [224, 224],
}
kwargs = {
'apply_nms': True,
'pre_nms_top_k': 1000,
'pre_nms_score_threshold': 0.1,
'nms_iou_threshold': 0.5,
'max_num_detections': 10,
'nms_version': 'v2',
'use_cpu_nms': False,
'soft_nms_sigma': None,
'tflite_post_processing_config': tflite_post_processing_config,
'return_decoded': False,
'use_class_agnostic_nms': False,
}
generator = detection_generator.MultilevelDetectionGenerator(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(generator.get_config(), expected_config)
new_generator = (
detection_generator.MultilevelDetectionGenerator.from_config(
generator.get_config()))
self.assertAllEqual(generator.get_config(), new_generator.get_config())
if __name__ == '__main__':
tf.test.main()
| 13,532 | 35.97541 | 80 | py |
models | models-master/official/vision/modeling/layers/nn_blocks_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for nn_blocks."""
from typing import Any, Iterable, Tuple
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.vision.modeling.layers import nn_blocks
from official.vision.modeling.layers import nn_layers
def distribution_strategy_combinations() -> Iterable[Tuple[Any, ...]]:
"""Returns the combinations of end-to-end tests to run."""
return combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],)
class NNBlocksTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(nn_blocks.ResidualBlock, 1, False, 0.0, None),
(nn_blocks.ResidualBlock, 2, True, 0.2, 0.25),
)
def test_residual_block_creation(self, block_fn, strides, use_projection,
stochastic_depth_drop_rate, se_ratio):
input_size = 128
filter_size = 256
inputs = tf.keras.Input(
shape=(input_size, input_size, filter_size), batch_size=1)
block = block_fn(
filter_size,
strides,
use_projection=use_projection,
se_ratio=se_ratio,
stochastic_depth_drop_rate=stochastic_depth_drop_rate,
)
features = block(inputs)
self.assertAllEqual(
[1, input_size // strides, input_size // strides, filter_size],
features.shape.as_list())
def test_layerscale_call(self):
# Set up test inputs
input_shape = (2, 3, 4)
init_values = 1e-4
inputs = tf.ones(input_shape, dtype=tf.float32)
# Instantiate LayerScale object
layer_scale = nn_blocks.LayerScale(init_values)
# Call LayerScale object on test inputs
output = layer_scale(inputs)
# Check output shape
expected_output_shape = input_shape
self.assertAllEqual(output.shape, expected_output_shape)
# Check that output values are correct
expected_output_values = init_values * np.ones(input_shape)
self.assertAllClose(
output.numpy(), expected_output_values, rtol=1e-5, atol=1e-5)
def test_layerscale_training(self):
# Verify that gamma values have changed from their initial values in one
# step forward pass.
# Set up test inputs
input_shape = (1, 3, 4)
init_values = 1e-4
inputs = tf.ones(input_shape, dtype=tf.float32)
targets = tf.ones(input_shape, dtype=tf.float32)
# Instantiate LayerScale object
layer_scale = nn_blocks.LayerScale(init_values)
# Define optimizer and loss function
optimizer = tf.keras.optimizers.Adam()
loss_fn = tf.keras.losses.MeanSquaredError()
# Train the model for one step
with tf.GradientTape() as tape:
predictions = layer_scale(inputs)
loss = loss_fn(targets, predictions)
grads = tape.gradient(loss, layer_scale.trainable_variables)
optimizer.apply_gradients(zip(grads, layer_scale.trainable_variables))
# Check that gamma values have changed
updated_gamma = layer_scale.gamma.numpy()[0, 0, 0]
self.assertNotEqual(updated_gamma, init_values)
@parameterized.parameters(
(nn_blocks.BottleneckBlock, 1, False, 0.0, None),
(nn_blocks.BottleneckBlock, 2, True, 0.2, 0.25),
)
def test_bottleneck_block_creation(self, block_fn, strides, use_projection,
stochastic_depth_drop_rate, se_ratio):
input_size = 128
filter_size = 256
inputs = tf.keras.Input(
shape=(input_size, input_size, filter_size * 4), batch_size=1)
block = block_fn(
filter_size,
strides,
use_projection=use_projection,
se_ratio=se_ratio,
stochastic_depth_drop_rate=stochastic_depth_drop_rate)
features = block(inputs)
self.assertAllEqual(
[1, input_size // strides, input_size // strides, filter_size * 4],
features.shape.as_list())
@parameterized.parameters(
(nn_blocks.InvertedBottleneckBlock, 1, 1, None, None),
(nn_blocks.InvertedBottleneckBlock, 6, 1, None, None),
(nn_blocks.InvertedBottleneckBlock, 1, 2, None, None),
(nn_blocks.InvertedBottleneckBlock, 1, 1, 0.2, None),
(nn_blocks.InvertedBottleneckBlock, 1, 1, None, 0.2),
)
def test_invertedbottleneck_block_creation(self, block_fn, expand_ratio,
strides, se_ratio,
stochastic_depth_drop_rate):
input_size = 128
in_filters = 24
out_filters = 40
inputs = tf.keras.Input(
shape=(input_size, input_size, in_filters), batch_size=1)
block = block_fn(
in_filters=in_filters,
out_filters=out_filters,
expand_ratio=expand_ratio,
strides=strides,
se_ratio=se_ratio,
stochastic_depth_drop_rate=stochastic_depth_drop_rate)
features = block(inputs)
self.assertAllEqual(
[1, input_size // strides, input_size // strides, out_filters],
features.shape.as_list())
@parameterized.parameters(
(nn_blocks.TuckerConvBlock, 1, 0.25, 0.25),
(nn_blocks.TuckerConvBlock, 2, 0.25, 0.25),
)
def test_tucker_conv_block(self, block_fn, strides, input_compression_ratio,
output_compression_ratio):
input_size = 128
in_filters = 24
out_filters = 24
inputs = tf.keras.Input(
shape=(input_size, input_size, in_filters), batch_size=1)
block = block_fn(
in_filters=in_filters,
out_filters=out_filters,
input_compression_ratio=input_compression_ratio,
output_compression_ratio=output_compression_ratio,
strides=strides)
features = block(inputs)
self.assertAllEqual(
[1, input_size // strides, input_size // strides, out_filters],
features.shape.as_list())
class ResidualInnerTest(parameterized.TestCase, tf.test.TestCase):
@combinations.generate(distribution_strategy_combinations())
def test_shape(self, distribution):
bsz, h, w, c = 8, 32, 32, 32
filters = 64
strides = 2
input_tensor = tf.random.uniform(shape=[bsz, h, w, c])
with distribution.scope():
test_layer = nn_blocks.ResidualInner(filters, strides)
output = test_layer(input_tensor)
expected_output_shape = [bsz, h // strides, w // strides, filters]
self.assertEqual(expected_output_shape, output.shape.as_list())
class BottleneckResidualInnerTest(parameterized.TestCase, tf.test.TestCase):
@combinations.generate(distribution_strategy_combinations())
def test_shape(self, distribution):
bsz, h, w, c = 8, 32, 32, 32
filters = 64
strides = 2
input_tensor = tf.random.uniform(shape=[bsz, h, w, c])
with distribution.scope():
test_layer = nn_blocks.BottleneckResidualInner(filters, strides)
output = test_layer(input_tensor)
expected_output_shape = [bsz, h // strides, w // strides, filters * 4]
self.assertEqual(expected_output_shape, output.shape.as_list())
class DepthwiseSeparableConvBlockTest(parameterized.TestCase, tf.test.TestCase):
@combinations.generate(distribution_strategy_combinations())
def test_shape(self, distribution):
batch_size, height, width, num_channels = 8, 32, 32, 32
num_filters = 64
strides = 2
input_tensor = tf.random.normal(
shape=[batch_size, height, width, num_channels])
with distribution.scope():
block = nn_blocks.DepthwiseSeparableConvBlock(
num_filters, strides=strides)
config_dict = block.get_config()
recreate_block = nn_blocks.DepthwiseSeparableConvBlock(**config_dict)
output_tensor = block(input_tensor)
expected_output_shape = [
batch_size, height // strides, width // strides, num_filters
]
self.assertEqual(output_tensor.shape.as_list(), expected_output_shape)
output_tensor = recreate_block(input_tensor)
self.assertEqual(output_tensor.shape.as_list(), expected_output_shape)
class ReversibleLayerTest(parameterized.TestCase, tf.test.TestCase):
@combinations.generate(distribution_strategy_combinations())
def test_downsampling_non_reversible_step(self, distribution):
bsz, h, w, c = 8, 32, 32, 32
filters = 64
strides = 2
input_tensor = tf.random.uniform(shape=[bsz, h, w, c])
with distribution.scope():
f = nn_blocks.ResidualInner(
filters=filters // 2, strides=strides, batch_norm_first=True)
g = nn_blocks.ResidualInner(
filters=filters // 2, strides=1, batch_norm_first=True)
test_layer = nn_blocks.ReversibleLayer(f, g)
test_layer.build(input_tensor.shape)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
@tf.function
def step_fn():
with tf.GradientTape() as tape:
output = test_layer(input_tensor, training=True)
grads = tape.gradient(output, test_layer.trainable_variables)
# Test applying gradients with optimizer works
optimizer.apply_gradients(zip(grads, test_layer.trainable_variables))
return output
replica_output = distribution.run(step_fn)
outputs = distribution.experimental_local_results(replica_output)
# Assert forward pass shape
expected_output_shape = [bsz, h // strides, w // strides, filters]
for output in outputs:
self.assertEqual(expected_output_shape, output.shape.as_list())
@combinations.generate(distribution_strategy_combinations())
def test_reversible_step(self, distribution):
# Reversible layers satisfy: (a) strides = 1 (b) in_filter = out_filter
bsz, h, w, c = 8, 32, 32, 32
filters = c
strides = 1
input_tensor = tf.random.uniform(shape=[bsz, h, w, c])
with distribution.scope():
f = nn_blocks.ResidualInner(
filters=filters // 2, strides=strides, batch_norm_first=False)
g = nn_blocks.ResidualInner(
filters=filters // 2, strides=1, batch_norm_first=False)
test_layer = nn_blocks.ReversibleLayer(f, g)
test_layer(input_tensor, training=False) # init weights
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
@tf.function
def step_fn():
with tf.GradientTape() as tape:
output = test_layer(input_tensor, training=True)
grads = tape.gradient(output, test_layer.trainable_variables)
# Test applying gradients with optimizer works
optimizer.apply_gradients(zip(grads, test_layer.trainable_variables))
return output
@tf.function
def fwd():
test_layer(input_tensor)
distribution.run(fwd) # Initialize variables
prev_variables = tf.identity_n(test_layer.trainable_variables)
replica_output = distribution.run(step_fn)
outputs = distribution.experimental_local_results(replica_output)
# Assert variables values have changed values
for v0, v1 in zip(prev_variables, test_layer.trainable_variables):
self.assertNotAllEqual(v0, v1)
# Assert forward pass shape
expected_output_shape = [bsz, h // strides, w // strides, filters]
for output in outputs:
self.assertEqual(expected_output_shape, output.shape.as_list())
@combinations.generate(distribution_strategy_combinations())
def test_manual_gradients_correctness(self, distribution):
bsz, h, w, c = 8, 32, 32, 32
filters = c
strides = 1
input_tensor = tf.random.uniform(shape=[bsz, h, w, c * 4]) # bottleneck
with distribution.scope():
f_manual = nn_blocks.BottleneckResidualInner(
filters=filters // 2, strides=strides, batch_norm_first=False)
g_manual = nn_blocks.BottleneckResidualInner(
filters=filters // 2, strides=1, batch_norm_first=False)
manual_grad_layer = nn_blocks.ReversibleLayer(f_manual, g_manual)
manual_grad_layer(input_tensor, training=False) # init weights
f_auto = nn_blocks.BottleneckResidualInner(
filters=filters // 2, strides=strides, batch_norm_first=False)
g_auto = nn_blocks.BottleneckResidualInner(
filters=filters // 2, strides=1, batch_norm_first=False)
auto_grad_layer = nn_blocks.ReversibleLayer(
f_auto, g_auto, manual_grads=False)
auto_grad_layer(input_tensor) # init weights
# Clone all weights (tf.keras.layers.Layer has no .clone())
auto_grad_layer._f.set_weights(manual_grad_layer._f.get_weights())
auto_grad_layer._g.set_weights(manual_grad_layer._g.get_weights())
@tf.function
def manual_fn():
with tf.GradientTape() as tape:
output = manual_grad_layer(input_tensor, training=True)
grads = tape.gradient(output, manual_grad_layer.trainable_variables)
return grads
@tf.function
def auto_fn():
with tf.GradientTape() as tape:
output = auto_grad_layer(input_tensor, training=True)
grads = tape.gradient(output, auto_grad_layer.trainable_variables)
return grads
manual_grads = distribution.run(manual_fn)
auto_grads = distribution.run(auto_fn)
# Assert gradients calculated manually are close to that from autograd
for manual_grad, auto_grad in zip(manual_grads, auto_grads):
self.assertAllClose(
distribution.experimental_local_results(manual_grad),
distribution.experimental_local_results(auto_grad),
atol=5e-3,
rtol=5e-3)
# Verify that BN moving mean and variance is correct.
for manual_var, auto_var in zip(manual_grad_layer.non_trainable_variables,
auto_grad_layer.non_trainable_variables):
self.assertAllClose(manual_var, auto_var)
# Test class that wraps a standard attention layer. If this layer is called
# at any point, the list passed to the config object will be filled with a
# boolean 'True'. We register this class as a Keras serializable so we can
# test serialization below.
@tf.keras.utils.register_keras_serializable(package='TestOnlyAttention')
class ValidatedAttentionLayer(nn_layers.MultiHeadAttention):
def __init__(self, call_list, **kwargs):
super(ValidatedAttentionLayer, self).__init__(**kwargs)
self.list = call_list
def call(
self,
query,
value,
attention_mask=None,
return_attention_scores=False,
):
self.list.append(True)
return super(ValidatedAttentionLayer, self).call(
query,
value,
attention_mask=attention_mask,
return_attention_scores=return_attention_scores)
def get_config(self):
config = super(ValidatedAttentionLayer, self).get_config()
config['call_list'] = self.list
return config
# Test class implements a simple feedforward layer. If this layer is called
# at any point, the list passed to the config object will be filled with a
# boolean 'True'. We register this class as a Keras serializable so we can
# test serialization below.
@tf.keras.utils.register_keras_serializable(package='TestOnlyFeedforward')
class ValidatedFeedforwardLayer(tf.keras.layers.Layer):
def __init__(self, call_list, activation, **kwargs):
super(ValidatedFeedforwardLayer, self).__init__(**kwargs)
self.list = call_list
self.activation = activation
def build(self, input_shape):
hidden_size = input_shape[-1]
self._feedforward_dense = tf.keras.layers.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
activation=self.activation,
name='feedforward')
def call(self, inputs):
self.list.append(True)
return self._feedforward_dense(inputs)
def get_config(self):
config = super(ValidatedFeedforwardLayer, self).get_config()
config['call_list'] = []
config['activation'] = self.activation
return config
class TransformerLayerTest(tf.test.TestCase, parameterized.TestCase):
def tearDown(self):
super(TransformerLayerTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy('float32')
@parameterized.parameters(None, 2)
def test_layer_creation(self, max_attention_inference_parallelism):
sequence_length = 21
width = 80
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': []
}
test_layer = nn_blocks.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
max_attention_inference_parallelism=max_attention_inference_parallelism,
)
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
call_list = test_layer._attention_layer.get_config()['call_list']
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
def test_layer_creation_with_feedforward_cls(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
feedforward_call_list = []
feedforward_layer_cfg = {
'activation': 'relu',
'call_list': feedforward_call_list,
}
test_layer = nn_blocks.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
feedforward_cls=ValidatedFeedforwardLayer,
feedforward_cfg=feedforward_layer_cfg,
num_attention_heads=10,
inner_dim=None,
inner_activation=None)
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
self.assertNotEmpty(feedforward_call_list)
self.assertTrue(feedforward_call_list[0],
"The passed layer class wasn't instantiated.")
def test_layer_creation_with_mask(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
test_layer = nn_blocks.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu')
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
@parameterized.parameters(None, 2)
def test_layer_invocation(self, max_attention_inference_parallelism):
sequence_length = 21
width = 80
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': [],
}
test_layer = nn_blocks.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
max_attention_inference_parallelism=max_attention_inference_parallelism)
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# Create a model from the test layer.
model = tf.keras.Model(data_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
_ = model.predict(input_data)
call_list = test_layer._attention_layer.get_config()['call_list']
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
def test_layer_invocation_with_feedforward_cls(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
feedforward_call_list = []
feedforward_layer_cfg = {
'activation': 'relu',
'call_list': feedforward_call_list,
}
feedforward_layer = ValidatedFeedforwardLayer(**feedforward_layer_cfg)
test_layer = nn_blocks.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
feedforward_cls=feedforward_layer,
num_attention_heads=10,
inner_dim=None,
inner_activation=None)
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
self.assertNotEmpty(feedforward_call_list)
self.assertTrue(feedforward_call_list[0],
"The passed layer class wasn't instantiated.")
def test_layer_invocation_with_mask(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
test_layer = nn_blocks.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu')
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
def test_layer_invocation_with_float16_dtype(self):
tf.keras.mixed_precision.set_global_policy('mixed_float16')
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
test_layer = nn_blocks.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu')
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = (10 * np.random.random_sample(
(batch_size, sequence_length, width)))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
def test_transform_with_initializer(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
test_layer = nn_blocks.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list())
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0])
def test_layer_restoration_from_config(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
'name': 'test_layer',
}
test_layer = nn_blocks.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
inner_dim=2048,
inner_activation='relu')
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
pre_serialization_output = model.predict([input_data, mask_data])
# Serialize the model config. Pass the serialized data through json to
# ensure that we can serialize this layer to disk.
serialized_data = model.get_config()
# Create a new model from the old config, and copy the weights. These models
# should have identical outputs.
new_model = tf.keras.Model.from_config(serialized_data)
new_model.set_weights(model.get_weights())
output = new_model.predict([input_data, mask_data])
self.assertAllClose(pre_serialization_output, output)
# If the layer was configured correctly, it should have a list attribute
# (since it should have the custom class and config passed to it).
new_model.summary()
new_call_list = new_model.get_layer(
name='transformer_scaffold')._attention_layer.list
self.assertNotEmpty(new_call_list)
self.assertTrue(new_call_list[0],
"The passed layer class wasn't instantiated.")
def test_layer_with_feedforward_cls_restoration_from_config(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
'name': 'test_layer',
}
feedforward_call_list = []
feedforward_layer_cfg = {
'activation': 'relu',
'call_list': feedforward_call_list,
}
test_layer = nn_blocks.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
feedforward_cls=ValidatedFeedforwardLayer,
feedforward_cfg=feedforward_layer_cfg,
num_attention_heads=10,
inner_dim=None,
inner_activation=None)
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
pre_serialization_output = model.predict([input_data, mask_data])
serialized_data = model.get_config()
# Create a new model from the old config, and copy the weights. These models
# should have identical outputs.
new_model = tf.keras.Model.from_config(serialized_data)
new_model.set_weights(model.get_weights())
output = new_model.predict([input_data, mask_data])
self.assertAllClose(pre_serialization_output, output)
# If the layer was configured correctly, it should have a list attribute
# (since it should have the custom class and config passed to it).
new_model.summary()
new_call_list = new_model.get_layer(
name='transformer_scaffold')._attention_layer.list
self.assertNotEmpty(new_call_list)
self.assertTrue(new_call_list[0],
"The passed layer class wasn't instantiated.")
new_feedforward_call_list = new_model.get_layer(
name='transformer_scaffold')._feedforward_block.list
self.assertNotEmpty(new_feedforward_call_list)
self.assertTrue(new_feedforward_call_list[0],
"The passed layer class wasn't instantiated.")
if __name__ == '__main__':
tf.test.main()
| 33,825 | 37.135287 | 80 | py |
models | models-master/official/vision/modeling/layers/nn_layers_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for nn_layers."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.vision.modeling.layers import nn_layers
class NNLayersTest(parameterized.TestCase, tf.test.TestCase):
def test_scale(self):
scale = nn_layers.Scale(initializer=tf.keras.initializers.constant(10.))
output = scale(3.)
self.assertAllEqual(output, 30.)
def test_temporal_softmax_pool(self):
inputs = tf.range(4, dtype=tf.float32) + 1.
inputs = tf.reshape(inputs, [1, 4, 1, 1, 1])
layer = nn_layers.TemporalSoftmaxPool()
output = layer(inputs)
self.assertAllClose(
output,
[[[[[0.10153633]]],
[[[0.33481020]]],
[[[0.82801306]]],
[[[1.82021690]]]]])
def test_positional_encoding(self):
pos_encoding = nn_layers.PositionalEncoding(
initializer='ones', cache_encoding=False)
pos_encoding_cached = nn_layers.PositionalEncoding(
initializer='ones', cache_encoding=True)
inputs = tf.ones([1, 4, 1, 1, 3])
outputs, _ = pos_encoding(inputs)
outputs_cached, _ = pos_encoding_cached(inputs)
expected = tf.constant(
[[[[[1.0000000, 1.0000000, 2.0000000]]],
[[[1.8414710, 1.0021545, 1.5403023]]],
[[[1.9092975, 1.0043088, 0.5838531]]],
[[[1.1411200, 1.0064633, 0.0100075]]]]])
self.assertEqual(outputs.shape, expected.shape)
self.assertAllClose(outputs, expected)
self.assertEqual(outputs.shape, outputs_cached.shape)
self.assertAllClose(outputs, outputs_cached)
inputs = tf.ones([1, 5, 1, 1, 3])
_ = pos_encoding(inputs)
def test_positional_encoding_bfloat16(self):
pos_encoding = nn_layers.PositionalEncoding(initializer='ones')
inputs = tf.ones([1, 4, 1, 1, 3], dtype=tf.bfloat16)
outputs, _ = pos_encoding(inputs)
expected = tf.constant(
[[[[[1.0000000, 1.0000000, 2.0000000]]],
[[[1.8414710, 1.0021545, 1.5403023]]],
[[[1.9092975, 1.0043088, 0.5838531]]],
[[[1.1411200, 1.0064633, 0.0100075]]]]])
self.assertEqual(outputs.shape, expected.shape)
self.assertAllClose(outputs, expected)
def test_global_average_pool_basic(self):
pool = nn_layers.GlobalAveragePool3D(keepdims=True)
inputs = tf.ones([1, 2, 3, 4, 1])
outputs = pool(inputs, output_states=False)
expected = tf.ones([1, 1, 1, 1, 1])
self.assertEqual(outputs.shape, expected.shape)
self.assertAllEqual(outputs, expected)
def test_positional_encoding_stream(self):
pos_encoding = nn_layers.PositionalEncoding(
initializer='ones', cache_encoding=False)
inputs = tf.range(4, dtype=tf.float32) + 1.
inputs = tf.reshape(inputs, [1, 4, 1, 1, 1])
inputs = tf.tile(inputs, [1, 1, 1, 1, 3])
expected, _ = pos_encoding(inputs)
for num_splits in [1, 2, 4]:
frames = tf.split(inputs, num_splits, axis=1)
states = {}
predicted = []
for frame in frames:
output, states = pos_encoding(frame, states=states)
predicted.append(output)
predicted = tf.concat(predicted, axis=1)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
self.assertAllClose(predicted, [[[[[1.0000000, 1.0000000, 2.0000000]]],
[[[2.8414710, 2.0021544, 2.5403023]]],
[[[3.9092975, 3.0043090, 2.5838532]]],
[[[4.1411200, 4.0064630, 3.0100074]]]]])
def test_global_average_pool_keras(self):
pool = nn_layers.GlobalAveragePool3D(keepdims=False)
keras_pool = tf.keras.layers.GlobalAveragePooling3D()
inputs = 10 * tf.random.normal([1, 2, 3, 4, 1])
outputs = pool(inputs, output_states=False)
keras_output = keras_pool(inputs)
self.assertAllEqual(outputs.shape, keras_output.shape)
self.assertAllClose(outputs, keras_output)
def test_stream_global_average_pool(self):
gap = nn_layers.GlobalAveragePool3D(keepdims=True, causal=False)
inputs = tf.range(4, dtype=tf.float32) + 1.
inputs = tf.reshape(inputs, [1, 4, 1, 1, 1])
inputs = tf.tile(inputs, [1, 1, 2, 2, 3])
expected, _ = gap(inputs, output_states=True)
for num_splits in [1, 2, 4]:
frames = tf.split(inputs, num_splits, axis=1)
states = {}
predicted = None
for frame in frames:
predicted, states = gap(frame, states=states, output_states=True)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
self.assertAllClose(
predicted,
[[[[[2.5, 2.5, 2.5]]]]])
def test_causal_stream_global_average_pool(self):
gap = nn_layers.GlobalAveragePool3D(keepdims=True, causal=True)
inputs = tf.range(4, dtype=tf.float32) + 1.
inputs = tf.reshape(inputs, [1, 4, 1, 1, 1])
inputs = tf.tile(inputs, [1, 1, 2, 2, 3])
expected, _ = gap(inputs, output_states=True)
for num_splits in [1, 2, 4]:
frames = tf.split(inputs, num_splits, axis=1)
states = {}
predicted = []
for frame in frames:
x, states = gap(frame, states=states, output_states=True)
predicted.append(x)
predicted = tf.concat(predicted, axis=1)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
self.assertAllClose(
predicted,
[[[[[1.0, 1.0, 1.0]]],
[[[1.5, 1.5, 1.5]]],
[[[2.0, 2.0, 2.0]]],
[[[2.5, 2.5, 2.5]]]]])
def test_spatial_average_pool(self):
pool = nn_layers.SpatialAveragePool3D(keepdims=True)
inputs = tf.range(64, dtype=tf.float32) + 1.
inputs = tf.reshape(inputs, [1, 4, 4, 4, 1])
output = pool(inputs)
self.assertEqual(output.shape, [1, 4, 1, 1, 1])
self.assertAllClose(
output,
[[[[[8.50]]],
[[[24.5]]],
[[[40.5]]],
[[[56.5]]]]])
def test_conv2d_causal(self):
conv2d = nn_layers.Conv2D(
filters=3,
kernel_size=(3, 3),
strides=(1, 2),
padding='causal',
use_buffered_input=True,
kernel_initializer='ones',
use_bias=False,
)
inputs = tf.ones([1, 4, 2, 3])
paddings = [[0, 0], [2, 0], [0, 0], [0, 0]]
padded_inputs = tf.pad(inputs, paddings)
predicted = conv2d(padded_inputs)
expected = tf.constant(
[[[[6.0, 6.0, 6.0]],
[[12., 12., 12.]],
[[18., 18., 18.]],
[[18., 18., 18.]]]])
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
conv2d.use_buffered_input = False
predicted = conv2d(inputs)
self.assertFalse(conv2d.use_buffered_input)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
def test_depthwise_conv2d_causal(self):
conv2d = nn_layers.DepthwiseConv2D(
kernel_size=(3, 3),
strides=(1, 1),
padding='causal',
use_buffered_input=True,
depthwise_initializer='ones',
use_bias=False,
)
inputs = tf.ones([1, 2, 2, 3])
paddings = [[0, 0], [2, 0], [0, 0], [0, 0]]
padded_inputs = tf.pad(inputs, paddings)
predicted = conv2d(padded_inputs)
expected = tf.constant(
[[[[2., 2., 2.],
[2., 2., 2.]],
[[4., 4., 4.],
[4., 4., 4.]]]])
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
conv2d.use_buffered_input = False
predicted = conv2d(inputs)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
def test_conv3d_causal(self):
conv3d = nn_layers.Conv3D(
filters=3,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
padding='causal',
use_buffered_input=True,
kernel_initializer='ones',
use_bias=False,
)
inputs = tf.ones([1, 2, 4, 4, 3])
paddings = [[0, 0], [2, 0], [0, 0], [0, 0], [0, 0]]
padded_inputs = tf.pad(inputs, paddings)
predicted = conv3d(padded_inputs)
expected = tf.constant(
[[[[[27., 27., 27.],
[18., 18., 18.]],
[[18., 18., 18.],
[12., 12., 12.]]],
[[[54., 54., 54.],
[36., 36., 36.]],
[[36., 36., 36.],
[24., 24., 24.]]]]])
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
conv3d.use_buffered_input = False
predicted = conv3d(inputs)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
def test_depthwise_conv3d_causal(self):
conv3d = nn_layers.Conv3D(
filters=3,
kernel_size=(3, 3, 3),
strides=(1, 2, 2),
padding='causal',
use_buffered_input=True,
kernel_initializer='ones',
use_bias=False,
groups=3,
)
inputs = tf.ones([1, 2, 4, 4, 3])
paddings = [[0, 0], [2, 0], [0, 0], [0, 0], [0, 0]]
padded_inputs = tf.pad(inputs, paddings)
predicted = conv3d(padded_inputs)
expected = tf.constant(
[[[[[9.0, 9.0, 9.0],
[6.0, 6.0, 6.0]],
[[6.0, 6.0, 6.0],
[4.0, 4.0, 4.0]]],
[[[18.0, 18.0, 18.0],
[12., 12., 12.]],
[[12., 12., 12.],
[8., 8., 8.]]]]])
output_shape = conv3d._spatial_output_shape([4, 4, 4])
self.assertAllClose(output_shape, [2, 2, 2])
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
conv3d.use_buffered_input = False
predicted = conv3d(inputs)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
def test_conv3d_causal_padding_2d(self):
"""Test to ensure causal padding works like standard padding."""
conv3d = nn_layers.Conv3D(
filters=1,
kernel_size=(1, 3, 3),
strides=(1, 2, 2),
padding='causal',
use_buffered_input=False,
kernel_initializer='ones',
use_bias=False,
)
keras_conv3d = tf.keras.layers.Conv3D(
filters=1,
kernel_size=(1, 3, 3),
strides=(1, 2, 2),
padding='same',
kernel_initializer='ones',
use_bias=False,
)
inputs = tf.ones([1, 1, 4, 4, 1])
predicted = conv3d(inputs)
expected = keras_conv3d(inputs)
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
self.assertAllClose(predicted,
[[[[[9.],
[6.]],
[[6.],
[4.]]]]])
def test_conv3d_causal_padding_1d(self):
"""Test to ensure causal padding works like standard padding."""
conv3d = nn_layers.Conv3D(
filters=1,
kernel_size=(3, 1, 1),
strides=(2, 1, 1),
padding='causal',
use_buffered_input=False,
kernel_initializer='ones',
use_bias=False,
)
keras_conv1d = tf.keras.layers.Conv1D(
filters=1,
kernel_size=3,
strides=2,
padding='causal',
kernel_initializer='ones',
use_bias=False,
)
inputs = tf.ones([1, 4, 1, 1, 1])
predicted = conv3d(inputs)
expected = keras_conv1d(tf.squeeze(inputs, axis=[2, 3]))
expected = tf.reshape(expected, [1, 2, 1, 1, 1])
self.assertEqual(predicted.shape, expected.shape)
self.assertAllClose(predicted, expected)
self.assertAllClose(predicted,
[[[[[1.]]],
[[[3.]]]]])
@parameterized.parameters(
(None, []),
(None, [6, 12, 18]),
([32, 32], [6, 12, 18]),
)
def test_aspp(self, pool_kernel_size, dilation_rates):
inputs = tf.keras.Input(shape=(64, 64, 128), dtype=tf.float32)
layer = nn_layers.SpatialPyramidPooling(
output_channels=256,
dilation_rates=dilation_rates,
pool_kernel_size=pool_kernel_size)
output = layer(inputs)
self.assertAllEqual([None, 64, 64, 256], output.shape)
@parameterized.parameters(None, 2)
def test_multi_head_attention(self, max_inference_parallelism):
layer = nn_layers.MultiHeadAttention(
num_heads=12,
key_dim=64,
max_inference_parallelism=max_inference_parallelism,
)
# Create a 3-dimensional input (the first dimension is implicit).
query = tf.keras.Input(shape=(40, 80))
value = tf.keras.Input(shape=(20, 80))
output = layer(query=query, value=value)
self.assertEqual(output.shape.as_list(), [None, 40, 80])
if __name__ == '__main__':
tf.test.main()
| 13,309 | 29.73903 | 79 | py |
models | models-master/official/vision/modeling/layers/edgetpu_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests EdgeTPU oriented layers and tools."""
from typing import Optional
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.vision.modeling.layers import edgetpu
def random_boxes(shape):
a = tf.random.uniform(shape=shape+[2])
b = tf.random.uniform(shape=shape+[2])
l = tf.minimum(a, b)
u = tf.maximum(a, b)
return tf.concat([l, u], axis=-1)
def _maximum_activation_size(model):
max_size = 0
for layer in model.layers:
outputs = layer.output
if not isinstance(outputs, list):
outputs = [outputs]
for output in outputs:
if hasattr(output, 'shape'):
size = np.prod(output.shape)
max_size = max(max_size, size)
return max_size
def _deviation_and_margin(reference, valid, optimized):
"""Returns deviation and margin between two batched sets of indices."""
deviation_rate = 0
min_union = reference.shape[1] + optimized.shape[1]
runs = reference.shape[0]
for run in range(runs):
reference_slice = {*reference[run, :valid[run]].numpy().tolist()}
optimized_slice = {*optimized[run].numpy().astype(int).tolist()} - {-1}
union_size = len(optimized_slice | reference_slice)
symdiff_size = len(optimized_slice ^ reference_slice)
deviation_rate += symdiff_size / union_size
min_union = min(min_union, union_size)
deviation_rate = deviation_rate / runs
# six sigma estimate via LLN theorem
margin = 6 * (deviation_rate / np.sqrt(runs) + 1 / (runs * min_union))
return deviation_rate, margin
class NonMaxSuppressionTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super().setUp()
tf.random.set_seed(42)
def test_refinement_sample(self):
"""Tests difference in NMS behaviours.
Runs on four boxes with following IOU table (only neighbours will qualify
as similar boxes)
box | 0 | 1 | 2 | 3
--- | ---- | ---- | ---- | ----
0 | 1 | 7/13 | 1/4 | 1/19
1 | 7/13 | 1 | 7/13 | 1/4
2 | 1/4 | 7/13 | 1 | 7/13
3 | 1/19 | 1/4 | 7/13 | 1
So 0 is best box, it eliminates 1, next is box 2 which is eleminated by 1
if it is allowed (depending on number of refinements).
"""
boxes: tf.Tensor = tf.constant(
[
# y1, x1, y2, x2
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.3, 1.0, 1.3],
[0.0, 0.6, 1.0, 1.6],
[0.0, 0.9, 1.0, 1.9],
],
dtype=tf.float32)
scores: tf.Tensor = tf.constant([
1.0,
0.9,
0.8,
0.7,
], dtype=tf.float32)
self.assertAllEqual(
edgetpu.non_max_suppression_padded(boxes, scores, 4, refinements=0),
tf.constant([0.0, -1.0, -1.0, -1.0], dtype=tf.float32))
self.assertAllEqual(
edgetpu.non_max_suppression_padded(boxes, scores, 4, refinements=1),
tf.constant([0.0, 2.0, -1.0, -1.0], dtype=tf.float32))
@parameterized.parameters((16, 8, 200, [0.009, 0.004, 0.004]),
(31, 17, 100, [0.013, 0.004, 0.004]),
(71, 41, 100, [0.045, 0.003, 0.002]),
(150, 100, 100, [0.129, 0.010, 0.001]),
(300, 300, 100, [0.116, 0.016, 0.002]),
(600, 600, 50, [0.176, 0.032, 0.003]))
def test_reference_match(self, n, top, runs, max_devs):
"""Compares that new optimized method is close to reference method.
Runs two algorithms with same sets of input boxes and scores, and measures
deviation between returned sets of prunned boxes.
Read more about test results at ./g3doc/non_max_suppression.md
(*) Avoid flakiness with safe boundary (go/python-tips/048): deviation
between two sets is a positive number, which may vary from test to test.
Doing multiple runs expected to reduce average deviation variation following
LLN theorem. Therefore by having first test run we know upper deviation
bound which algorithm would not exceed until broken (in any feasible amount
of time in the future). Use of this safe boundary makes test non-flaky.
Args:
n: number of boxes and scores on input of the algorithm.
top: limit of output boxes count.
runs: for the statistical testing number of runs to performs to avoid
tests flakiness.
max_devs: series of mean limits on deviation between optimized and
reference algorithms with different number of refinements. (Indexes of
elements correspond to number of refinements) Please use margin based
values proposed by failed test to avoid flaky testing.
"""
boxes = random_boxes([runs, n])
scores = tf.random.uniform(shape=[runs, n])
reference, valid = tf.image.non_max_suppression_padded(
boxes, scores, top, pad_to_max_output_size=True)
for refinements, max_deviation in enumerate(max_devs):
optimized = edgetpu.non_max_suppression_padded(
boxes, scores, top, refinements=refinements)
deviation, margin = _deviation_and_margin(reference, valid, optimized)
self.assertLess(
deviation,
max_deviation,
msg='Deviation rate between optimized and reference implementations is '
'higher than expected. If you are tuning the test, recommended safe '
'deviation rate is '
f'{deviation} + {margin} = {deviation + margin}')
@parameterized.parameters(([16], 8), ([91, 150], 100), ([20, 20, 200], 10))
def test_sharded_match(self, shape: list[int], top: int):
boxes = random_boxes(shape)
scores = tf.random.uniform(shape=shape)
optimized = edgetpu.non_max_suppression_padded(boxes, scores, top)
reference = edgetpu._non_max_suppression_as_is(boxes, scores, top)
self.assertAllEqual(optimized, reference)
_sharded_nms = edgetpu.non_max_suppression_padded
_stright_nms = edgetpu._non_max_suppression_as_is
@parameterized.parameters(([16], 8, _sharded_nms, True),
([16], 8, _stright_nms, True),
([91, 150], 100, _sharded_nms, True),
([91, 150], 100, _stright_nms, False),
([20, 20, 200], 10, _sharded_nms, True),
([20, 20, 200], 10, _stright_nms, False))
def test_sharded_size(self, shape: list[int], top: int, algorithm,
fits_as_is: bool):
scores = tf.keras.Input(shape=shape, batch_size=1)
boxes = tf.keras.Input(shape=shape + [4], batch_size=1)
optimized = algorithm(boxes, scores, top)
model = tf.keras.Model(inputs=[boxes, scores], outputs=optimized)
max_size = _maximum_activation_size(model)
if fits_as_is:
# Sharding done or not needed.
self.assertLessEqual(max_size, edgetpu._RECOMMENDED_NMS_MEMORY)
else:
# Sharding needed.
self.assertGreater(max_size, edgetpu._RECOMMENDED_NMS_MEMORY)
def test_shard_tensors(self):
a: tf.Tensor = tf.constant([[0, 1, 2, 3, 4]])
b: tf.Tensor = tf.constant([[
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
]])
for i, (a_i, b_i) in enumerate(edgetpu.shard_tensors(1, 3, (a, b))):
self.assertAllEqual(a_i, a[:, i * 3:i * 3 + 3])
self.assertAllEqual(b_i, b[:, i * 3:i * 3 + 3, :])
def test_top_k_sharded_fusion_arguments_validation(self):
# Input scores is not pair of aggregation and shard.
self.assertRaises(ValueError, edgetpu.concat_and_top_k, 100,
tf.zeros(shape=[1000]))
# Input other values is not pairs of aggregation and shard.
self.assertRaises(TypeError, edgetpu.concat_and_top_k, 100,
(None, tf.zeros(shape=[1000])), None,
tf.zeros(shape=[1000]))
# Insufficient rank to do top_k
self.assertRaises(IndexError, edgetpu.concat_and_top_k, 100,
(None, tf.constant(1.)))
@parameterized.parameters(0, 1, 2)
def test_top_k_sharded_fusion_vs_top_k_unsharded(self, axis: int):
r"""Tests `horizontal` sharding using shard_tensors and concat_and_top_k.
Will generate and test graph (on diagram 4 shards, in test 6 shards):
Input
-----
|
+-------+--------------------------------------------
| Split |----------------------- \
+-------+--- \ |
| \ | |
+-------+ +--------+ +-------+ +--------+ +-------+ +--------+ +-------+
| top k |-| concat |-| top k |-| concat |-| top k |-| concat |-| top k |
+-------+ +--------+ +-------+ +--------+ +-------+ +--------+ +-------+
|
Output
------
Args:
axis: test top_k axis (tensor rank will be axis + 1)
"""
sample: tf.Tensor = tf.random.uniform(
shape=axis * [1] + [10000], dtype=tf.float32)
top_1000_direct: tf.Tensor = tf.math.top_k(sample, 1000).values
top_1000_sharded: Optional[tf.Tensor] = None
for (piece,) in edgetpu.shard_tensors(axis, 1500, (sample,)):
(top_1000_sharded,) = edgetpu.concat_and_top_k(
1000, (top_1000_sharded, piece))
self.assertAllEqual(top_1000_direct, top_1000_sharded)
if __name__ == '__main__':
tf.test.main()
| 10,110 | 40.780992 | 82 | py |
models | models-master/official/vision/modeling/layers/deeplab.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers for DeepLabV3."""
import tensorflow as tf
from official.modeling import tf_utils
class SpatialPyramidPooling(tf.keras.layers.Layer):
"""Implements the Atrous Spatial Pyramid Pooling.
References:
[Rethinking Atrous Convolution for Semantic Image Segmentation](
https://arxiv.org/pdf/1706.05587.pdf)
[Encoder-Decoder with Atrous Separable Convolution for Semantic Image
Segmentation](https://arxiv.org/pdf/1802.02611.pdf)
"""
def __init__(
self,
output_channels,
dilation_rates,
pool_kernel_size=None,
use_sync_bn=False,
batchnorm_momentum=0.99,
batchnorm_epsilon=0.001,
activation='relu',
dropout=0.5,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
interpolation='bilinear',
use_depthwise_convolution=False,
**kwargs):
"""Initializes `SpatialPyramidPooling`.
Args:
output_channels: Number of channels produced by SpatialPyramidPooling.
dilation_rates: A list of integers for parallel dilated conv.
pool_kernel_size: A list of integers or None. If None, global average
pooling is applied, otherwise an average pooling of pool_kernel_size
is applied.
use_sync_bn: A bool, whether or not to use sync batch normalization.
batchnorm_momentum: A float for the momentum in BatchNorm. Defaults to
0.99.
batchnorm_epsilon: A float for the epsilon value in BatchNorm. Defaults to
0.001.
activation: A `str` for type of activation to be used. Defaults to 'relu'.
dropout: A float for the dropout rate before output. Defaults to 0.5.
kernel_initializer: Kernel initializer for conv layers. Defaults to
`glorot_uniform`.
kernel_regularizer: Kernel regularizer for conv layers. Defaults to None.
interpolation: The interpolation method for upsampling. Defaults to
`bilinear`.
use_depthwise_convolution: Allows spatial pooling to be separable
depthwise convolusions. [Encoder-Decoder with Atrous Separable
Convolution for Semantic Image Segmentation](
https://arxiv.org/pdf/1802.02611.pdf)
**kwargs: Other keyword arguments for the layer.
"""
super(SpatialPyramidPooling, self).__init__(**kwargs)
self.output_channels = output_channels
self.dilation_rates = dilation_rates
self.use_sync_bn = use_sync_bn
self.batchnorm_momentum = batchnorm_momentum
self.batchnorm_epsilon = batchnorm_epsilon
self.activation = activation
self.dropout = dropout
self.kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self.kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self.interpolation = interpolation
self.input_spec = tf.keras.layers.InputSpec(ndim=4)
self.pool_kernel_size = pool_kernel_size
self.use_depthwise_convolution = use_depthwise_convolution
def build(self, input_shape):
channels = input_shape[3]
self.aspp_layers = []
bn_op = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
bn_axis = -1
else:
bn_axis = 1
conv_sequential = tf.keras.Sequential([
tf.keras.layers.Conv2D(
filters=self.output_channels,
kernel_size=(1, 1),
kernel_initializer=tf_utils.clone_initializer(
self.kernel_initializer),
kernel_regularizer=self.kernel_regularizer,
use_bias=False),
bn_op(
axis=bn_axis,
momentum=self.batchnorm_momentum,
epsilon=self.batchnorm_epsilon,
synchronized=self.use_sync_bn),
tf.keras.layers.Activation(self.activation)
])
self.aspp_layers.append(conv_sequential)
for dilation_rate in self.dilation_rates:
leading_layers = []
kernel_size = (3, 3)
if self.use_depthwise_convolution:
leading_layers += [
tf.keras.layers.DepthwiseConv2D(
depth_multiplier=1,
kernel_size=kernel_size,
padding='same',
dilation_rate=dilation_rate,
use_bias=False)
]
kernel_size = (1, 1)
conv_sequential = tf.keras.Sequential(leading_layers + [
tf.keras.layers.Conv2D(
filters=self.output_channels,
kernel_size=kernel_size,
padding='same',
kernel_regularizer=self.kernel_regularizer,
kernel_initializer=tf_utils.clone_initializer(
self.kernel_initializer),
dilation_rate=dilation_rate,
use_bias=False),
bn_op(
axis=bn_axis,
momentum=self.batchnorm_momentum,
epsilon=self.batchnorm_epsilon,
synchronized=self.use_sync_bn),
tf.keras.layers.Activation(self.activation)
])
self.aspp_layers.append(conv_sequential)
if self.pool_kernel_size is None:
pool_sequential = tf.keras.Sequential([
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Reshape((1, 1, channels))])
else:
pool_sequential = tf.keras.Sequential([
tf.keras.layers.AveragePooling2D(self.pool_kernel_size)])
pool_sequential.add(
tf.keras.Sequential([
tf.keras.layers.Conv2D(
filters=self.output_channels,
kernel_size=(1, 1),
kernel_initializer=tf_utils.clone_initializer(
self.kernel_initializer),
kernel_regularizer=self.kernel_regularizer,
use_bias=False),
bn_op(
axis=bn_axis,
momentum=self.batchnorm_momentum,
epsilon=self.batchnorm_epsilon,
synchronized=self.use_sync_bn),
tf.keras.layers.Activation(self.activation)
]))
self.aspp_layers.append(pool_sequential)
self.projection = tf.keras.Sequential([
tf.keras.layers.Conv2D(
filters=self.output_channels,
kernel_size=(1, 1),
kernel_initializer=tf_utils.clone_initializer(
self.kernel_initializer),
kernel_regularizer=self.kernel_regularizer,
use_bias=False),
bn_op(
axis=bn_axis,
momentum=self.batchnorm_momentum,
epsilon=self.batchnorm_epsilon,
synchronized=self.use_sync_bn),
tf.keras.layers.Activation(self.activation),
tf.keras.layers.Dropout(rate=self.dropout)
])
def call(self, inputs, training=None):
if training is None:
training = tf.keras.backend.learning_phase()
result = []
for i, layer in enumerate(self.aspp_layers):
x = layer(inputs, training=training)
# Apply resize layer to the end of the last set of layers.
if i == len(self.aspp_layers) - 1:
x = tf.image.resize(tf.cast(x, tf.float32), tf.shape(inputs)[1:3])
result.append(tf.cast(x, inputs.dtype))
result = tf.concat(result, axis=-1)
result = self.projection(result, training=training)
return result
def get_config(self):
config = {
'output_channels': self.output_channels,
'dilation_rates': self.dilation_rates,
'pool_kernel_size': self.pool_kernel_size,
'use_sync_bn': self.use_sync_bn,
'batchnorm_momentum': self.batchnorm_momentum,
'batchnorm_epsilon': self.batchnorm_epsilon,
'activation': self.activation,
'dropout': self.dropout,
'kernel_initializer': tf.keras.initializers.serialize(
self.kernel_initializer),
'kernel_regularizer': tf.keras.regularizers.serialize(
self.kernel_regularizer),
'interpolation': self.interpolation,
}
base_config = super(SpatialPyramidPooling, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 8,561 | 37.053333 | 80 | py |
models | models-master/official/vision/modeling/layers/roi_aligner_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for roi_aligner.py."""
# Import libraries
import tensorflow as tf
from official.vision.modeling.layers import roi_aligner
class MultilevelROIAlignerTest(tf.test.TestCase):
def test_serialize_deserialize(self):
kwargs = dict(
crop_size=7,
sample_offset=0.5,
)
aligner = roi_aligner.MultilevelROIAligner(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(aligner.get_config(), expected_config)
new_aligner = roi_aligner.MultilevelROIAligner.from_config(
aligner.get_config())
self.assertAllEqual(aligner.get_config(), new_aligner.get_config())
if __name__ == '__main__':
tf.test.main()
| 1,275 | 28.674419 | 74 | py |
models | models-master/official/vision/modeling/layers/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers package definition."""
from official.vision.modeling.layers.box_sampler import BoxSampler
from official.vision.modeling.layers.detection_generator import DetectionGenerator
from official.vision.modeling.layers.detection_generator import MultilevelDetectionGenerator
from official.vision.modeling.layers.mask_sampler import MaskSampler
from official.vision.modeling.layers.nn_blocks import BottleneckBlock
from official.vision.modeling.layers.nn_blocks import BottleneckResidualInner
from official.vision.modeling.layers.nn_blocks import DepthwiseSeparableConvBlock
from official.vision.modeling.layers.nn_blocks import InvertedBottleneckBlock
from official.vision.modeling.layers.nn_blocks import ResidualBlock
from official.vision.modeling.layers.nn_blocks import ResidualInner
from official.vision.modeling.layers.nn_blocks import ReversibleLayer
from official.vision.modeling.layers.nn_blocks_3d import BottleneckBlock3D
from official.vision.modeling.layers.nn_blocks_3d import SelfGating
from official.vision.modeling.layers.nn_layers import CausalConvMixin
from official.vision.modeling.layers.nn_layers import Conv2D
from official.vision.modeling.layers.nn_layers import Conv3D
from official.vision.modeling.layers.nn_layers import DepthwiseConv2D
from official.vision.modeling.layers.nn_layers import GlobalAveragePool3D
from official.vision.modeling.layers.nn_layers import PositionalEncoding
from official.vision.modeling.layers.nn_layers import Scale
from official.vision.modeling.layers.nn_layers import SpatialAveragePool3D
from official.vision.modeling.layers.nn_layers import SqueezeExcitation
from official.vision.modeling.layers.nn_layers import StochasticDepth
from official.vision.modeling.layers.nn_layers import TemporalSoftmaxPool
from official.vision.modeling.layers.roi_aligner import MultilevelROIAligner
from official.vision.modeling.layers.roi_generator import MultilevelROIGenerator
from official.vision.modeling.layers.roi_sampler import ROISampler
| 2,597 | 58.045455 | 92 | py |
models | models-master/official/vision/modeling/layers/box_sampler.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of box sampler."""
# Import libraries
import tensorflow as tf
from official.vision.ops import sampling_ops
@tf.keras.utils.register_keras_serializable(package='Vision')
class BoxSampler(tf.keras.layers.Layer):
"""Creates a BoxSampler to sample positive and negative boxes."""
def __init__(self,
num_samples: int = 512,
foreground_fraction: float = 0.25,
**kwargs):
"""Initializes a box sampler.
Args:
num_samples: An `int` of the number of sampled boxes per image.
foreground_fraction: A `float` in [0, 1], what percentage of boxes should
be sampled from the positive examples.
**kwargs: Additional keyword arguments passed to Layer.
"""
self._config_dict = {
'num_samples': num_samples,
'foreground_fraction': foreground_fraction,
}
super(BoxSampler, self).__init__(**kwargs)
def call(self, positive_matches: tf.Tensor, negative_matches: tf.Tensor,
ignored_matches: tf.Tensor):
"""Samples and selects positive and negative instances.
Args:
positive_matches: A `bool` tensor of shape of [batch, N] where N is the
number of instances. For each element, `True` means the instance
corresponds to a positive example.
negative_matches: A `bool` tensor of shape of [batch, N] where N is the
number of instances. For each element, `True` means the instance
corresponds to a negative example.
ignored_matches: A `bool` tensor of shape of [batch, N] where N is the
number of instances. For each element, `True` means the instance should
be ignored.
Returns:
A `tf.tensor` of shape of [batch_size, K], storing the indices of the
sampled examples, where K is `num_samples`.
"""
sample_candidates = tf.logical_and(
tf.logical_or(positive_matches, negative_matches),
tf.logical_not(ignored_matches))
sampler = sampling_ops.BalancedPositiveNegativeSampler(
positive_fraction=self._config_dict['foreground_fraction'],
is_static=True)
batch_size = sample_candidates.shape[0]
sampled_indicators = []
for i in range(batch_size):
sampled_indicator = sampler.subsample(
sample_candidates[i],
self._config_dict['num_samples'],
positive_matches[i])
sampled_indicators.append(sampled_indicator)
sampled_indicators = tf.stack(sampled_indicators)
_, selected_indices = tf.nn.top_k(
tf.cast(sampled_indicators, dtype=tf.int32),
k=self._config_dict['num_samples'],
sorted=True)
return selected_indices
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
| 3,401 | 35.191489 | 79 | py |
models | models-master/official/vision/modeling/layers/roi_aligner.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of ROI aligner."""
from typing import Mapping
import tensorflow as tf
from official.vision.ops import spatial_transform_ops
@tf.keras.utils.register_keras_serializable(package='Vision')
class MultilevelROIAligner(tf.keras.layers.Layer):
"""Performs ROIAlign for the second stage processing."""
def __init__(self, crop_size: int = 7, sample_offset: float = 0.5, **kwargs):
"""Initializes a ROI aligner.
Args:
crop_size: An `int` of the output size of the cropped features.
sample_offset: A `float` in [0, 1] of the subpixel sample offset.
**kwargs: Additional keyword arguments passed to Layer.
"""
self._config_dict = {
'crop_size': crop_size,
'sample_offset': sample_offset,
}
super(MultilevelROIAligner, self).__init__(**kwargs)
def call(self,
features: Mapping[str, tf.Tensor],
boxes: tf.Tensor,
training: bool = None):
"""Generates ROIs.
Args:
features: A dictionary with key as pyramid level and value as features.
The features are in shape of
[batch_size, height_l, width_l, num_filters].
boxes: A 3-D `tf.Tensor` of shape [batch_size, num_boxes, 4]. Each row
represents a box with [y1, x1, y2, x2] in un-normalized coordinates.
from grid point.
training: A `bool` of whether it is in training mode.
Returns:
A 5-D `tf.Tensor` representing feature crop of shape
[batch_size, num_boxes, crop_size, crop_size, num_filters].
"""
roi_features = spatial_transform_ops.multilevel_crop_and_resize(
features,
boxes,
output_size=self._config_dict['crop_size'],
sample_offset=self._config_dict['sample_offset'])
return roi_features
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 2,533 | 33.712329 | 79 | py |
models | models-master/official/vision/modeling/layers/nn_blocks.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains common building blocks for neural networks."""
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Text
# Import libraries
from absl import logging
import tensorflow as tf
from official.modeling import tf_utils
from official.nlp import modeling as nlp_modeling
from official.vision.modeling.layers import nn_layers
def _pad_strides(strides: int, axis: int) -> Tuple[int, int, int, int]:
"""Converts int to len 4 strides (`tf.nn.avg_pool` uses length 4)."""
if axis == 1:
return (1, 1, strides, strides)
else:
return (1, strides, strides, 1)
def _maybe_downsample(x: tf.Tensor, out_filter: int, strides: int,
axis: int) -> tf.Tensor:
"""Downsamples feature map and 0-pads tensor if in_filter != out_filter."""
data_format = 'NCHW' if axis == 1 else 'NHWC'
strides = _pad_strides(strides, axis=axis)
x = tf.nn.avg_pool(x, strides, strides, 'VALID', data_format=data_format)
in_filter = x.shape[axis]
if in_filter < out_filter:
# Pad on channel dimension with 0s: half on top half on bottom.
pad_size = [(out_filter - in_filter) // 2, (out_filter - in_filter) // 2]
if axis == 1:
x = tf.pad(x, [[0, 0], pad_size, [0, 0], [0, 0]])
else:
x = tf.pad(x, [[0, 0], [0, 0], [0, 0], pad_size])
return x + 0.
@tf.keras.utils.register_keras_serializable(package='Vision')
class ResidualBlock(tf.keras.layers.Layer):
"""A residual block."""
def __init__(self,
filters,
strides,
use_projection=False,
se_ratio=None,
resnetd_shortcut=False,
stochastic_depth_drop_rate=None,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
use_explicit_padding: bool = False,
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
bn_trainable=True,
**kwargs):
"""Initializes a residual block with BN after convolutions.
Args:
filters: An `int` number of filters for the first two convolutions. Note
that the third and final convolution will use 4 times as many filters.
strides: An `int` block stride. If greater than 1, this block will
ultimately downsample the input.
use_projection: A `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
se_ratio: A `float` or None. Ratio of the Squeeze-and-Excitation layer.
resnetd_shortcut: A `bool` if True, apply the resnetd style modification
to the shortcut connection. Not implemented in residual blocks.
stochastic_depth_drop_rate: A `float` or None. if not None, drop rate for
the stochastic depth layer.
kernel_initializer: A `str` of kernel_initializer for convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d.
Default to None.
activation: A `str` name of the activation function.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_sync_bn: A `bool`. If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
bn_trainable: A `bool` that indicates whether batch norm layers should be
trainable. Default to True.
**kwargs: Additional keyword arguments to be passed.
"""
super(ResidualBlock, self).__init__(**kwargs)
self._filters = filters
self._strides = strides
self._use_projection = use_projection
self._se_ratio = se_ratio
self._resnetd_shortcut = resnetd_shortcut
self._use_explicit_padding = use_explicit_padding
self._use_sync_bn = use_sync_bn
self._activation = activation
self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
self._kernel_initializer = kernel_initializer
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation_fn = tf_utils.get_activation(activation)
self._bn_trainable = bn_trainable
def build(self, input_shape):
if self._use_projection:
self._shortcut = tf.keras.layers.Conv2D(
filters=self._filters,
kernel_size=1,
strides=self._strides,
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm0 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
trainable=self._bn_trainable,
synchronized=self._use_sync_bn,
)
conv1_padding = 'same'
# explicit padding here is added for centernet
if self._use_explicit_padding:
self._pad = tf.keras.layers.ZeroPadding2D(padding=(1, 1))
conv1_padding = 'valid'
self._conv1 = tf.keras.layers.Conv2D(
filters=self._filters,
kernel_size=3,
strides=self._strides,
padding=conv1_padding,
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm1 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
trainable=self._bn_trainable,
synchronized=self._use_sync_bn,
)
self._conv2 = tf.keras.layers.Conv2D(
filters=self._filters,
kernel_size=3,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm2 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
trainable=self._bn_trainable,
synchronized=self._use_sync_bn,
)
if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1:
self._squeeze_excitation = nn_layers.SqueezeExcitation(
in_filters=self._filters,
out_filters=self._filters,
se_ratio=self._se_ratio,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
else:
self._squeeze_excitation = None
if self._stochastic_depth_drop_rate:
self._stochastic_depth = nn_layers.StochasticDepth(
self._stochastic_depth_drop_rate)
else:
self._stochastic_depth = None
super(ResidualBlock, self).build(input_shape)
def get_config(self):
config = {
'filters': self._filters,
'strides': self._strides,
'use_projection': self._use_projection,
'se_ratio': self._se_ratio,
'resnetd_shortcut': self._resnetd_shortcut,
'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_explicit_padding': self._use_explicit_padding,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'bn_trainable': self._bn_trainable
}
base_config = super(ResidualBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, training=None):
shortcut = inputs
if self._use_projection:
shortcut = self._shortcut(shortcut)
shortcut = self._norm0(shortcut)
if self._use_explicit_padding:
inputs = self._pad(inputs)
x = self._conv1(inputs)
x = self._norm1(x)
x = self._activation_fn(x)
x = self._conv2(x)
x = self._norm2(x)
if self._squeeze_excitation:
x = self._squeeze_excitation(x)
if self._stochastic_depth:
x = self._stochastic_depth(x, training=training)
return self._activation_fn(x + shortcut)
@tf.keras.utils.register_keras_serializable(package='Vision')
class BottleneckBlock(tf.keras.layers.Layer):
"""A standard bottleneck block."""
def __init__(self,
filters,
strides,
dilation_rate=1,
use_projection=False,
se_ratio=None,
resnetd_shortcut=False,
stochastic_depth_drop_rate=None,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
bn_trainable=True,
**kwargs):
"""Initializes a standard bottleneck block with BN after convolutions.
Args:
filters: An `int` number of filters for the first two convolutions. Note
that the third and final convolution will use 4 times as many filters.
strides: An `int` block stride. If greater than 1, this block will
ultimately downsample the input.
dilation_rate: An `int` dilation_rate of convolutions. Default to 1.
use_projection: A `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
se_ratio: A `float` or None. Ratio of the Squeeze-and-Excitation layer.
resnetd_shortcut: A `bool`. If True, apply the resnetd style modification
to the shortcut connection.
stochastic_depth_drop_rate: A `float` or None. If not None, drop rate for
the stochastic depth layer.
kernel_initializer: A `str` of kernel_initializer for convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d.
Default to None.
activation: A `str` name of the activation function.
use_sync_bn: A `bool`. If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
bn_trainable: A `bool` that indicates whether batch norm layers should be
trainable. Default to True.
**kwargs: Additional keyword arguments to be passed.
"""
super(BottleneckBlock, self).__init__(**kwargs)
self._filters = filters
self._strides = strides
self._dilation_rate = dilation_rate
self._use_projection = use_projection
self._se_ratio = se_ratio
self._resnetd_shortcut = resnetd_shortcut
self._use_sync_bn = use_sync_bn
self._activation = activation
self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
self._kernel_initializer = kernel_initializer
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._bn_trainable = bn_trainable
def build(self, input_shape):
if self._use_projection:
if self._resnetd_shortcut:
self._shortcut0 = tf.keras.layers.AveragePooling2D(
pool_size=2, strides=self._strides, padding='same')
self._shortcut1 = tf.keras.layers.Conv2D(
filters=self._filters * 4,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
else:
self._shortcut = tf.keras.layers.Conv2D(
filters=self._filters * 4,
kernel_size=1,
strides=self._strides,
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm0 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
trainable=self._bn_trainable,
synchronized=self._use_sync_bn,
)
self._conv1 = tf.keras.layers.Conv2D(
filters=self._filters,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm1 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
trainable=self._bn_trainable,
synchronized=self._use_sync_bn,
)
self._activation1 = tf_utils.get_activation(
self._activation, use_keras_layer=True)
self._conv2 = tf.keras.layers.Conv2D(
filters=self._filters,
kernel_size=3,
strides=self._strides,
dilation_rate=self._dilation_rate,
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm2 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
trainable=self._bn_trainable,
synchronized=self._use_sync_bn,
)
self._activation2 = tf_utils.get_activation(
self._activation, use_keras_layer=True)
self._conv3 = tf.keras.layers.Conv2D(
filters=self._filters * 4,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm3 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
trainable=self._bn_trainable,
synchronized=self._use_sync_bn,
)
self._activation3 = tf_utils.get_activation(
self._activation, use_keras_layer=True)
if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1:
self._squeeze_excitation = nn_layers.SqueezeExcitation(
in_filters=self._filters * 4,
out_filters=self._filters * 4,
se_ratio=self._se_ratio,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
else:
self._squeeze_excitation = None
if self._stochastic_depth_drop_rate:
self._stochastic_depth = nn_layers.StochasticDepth(
self._stochastic_depth_drop_rate)
else:
self._stochastic_depth = None
self._add = tf.keras.layers.Add()
super(BottleneckBlock, self).build(input_shape)
def get_config(self):
config = {
'filters': self._filters,
'strides': self._strides,
'dilation_rate': self._dilation_rate,
'use_projection': self._use_projection,
'se_ratio': self._se_ratio,
'resnetd_shortcut': self._resnetd_shortcut,
'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'bn_trainable': self._bn_trainable
}
base_config = super(BottleneckBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, training=None):
shortcut = inputs
if self._use_projection:
if self._resnetd_shortcut:
shortcut = self._shortcut0(shortcut)
shortcut = self._shortcut1(shortcut)
else:
shortcut = self._shortcut(shortcut)
shortcut = self._norm0(shortcut)
x = self._conv1(inputs)
x = self._norm1(x)
x = self._activation1(x)
x = self._conv2(x)
x = self._norm2(x)
x = self._activation2(x)
x = self._conv3(x)
x = self._norm3(x)
if self._squeeze_excitation:
x = self._squeeze_excitation(x)
if self._stochastic_depth:
x = self._stochastic_depth(x, training=training)
x = self._add([x, shortcut])
return self._activation3(x)
@tf.keras.utils.register_keras_serializable(package='Vision')
class InvertedBottleneckBlock(tf.keras.layers.Layer):
"""An inverted bottleneck block."""
def __init__(self,
in_filters,
out_filters,
expand_ratio,
strides,
kernel_size=3,
se_ratio=None,
stochastic_depth_drop_rate=None,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
se_inner_activation='relu',
se_gating_activation='sigmoid',
se_round_down_protect=True,
expand_se_in_filters=False,
depthwise_activation=None,
use_sync_bn=False,
dilation_rate=1,
divisible_by=1,
regularize_depthwise=False,
use_depthwise=True,
use_residual=True,
norm_momentum=0.99,
norm_epsilon=0.001,
output_intermediate_endpoints=False,
**kwargs):
"""Initializes an inverted bottleneck block with BN after convolutions.
Args:
in_filters: An `int` number of filters of the input tensor.
out_filters: An `int` number of filters of the output tensor.
expand_ratio: An `int` of expand_ratio for an inverted bottleneck block.
strides: An `int` block stride. If greater than 1, this block will
ultimately downsample the input.
kernel_size: An `int` kernel_size of the depthwise conv layer.
se_ratio: A `float` or None. If not None, se ratio for the squeeze and
excitation layer.
stochastic_depth_drop_rate: A `float` or None. if not None, drop rate for
the stochastic depth layer.
kernel_initializer: A `str` of kernel_initializer for convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d.
Default to None.
activation: A `str` name of the activation function.
se_inner_activation: A `str` name of squeeze-excitation inner activation.
se_gating_activation: A `str` name of squeeze-excitation gating
activation.
se_round_down_protect: A `bool` of whether round down more than 10% will
be allowed in SE layer.
expand_se_in_filters: A `bool` of whether or not to expand in_filter in
squeeze and excitation layer.
depthwise_activation: A `str` name of the activation function for
depthwise only.
use_sync_bn: A `bool`. If True, use synchronized batch normalization.
dilation_rate: An `int` that specifies the dilation rate to use for.
divisible_by: An `int` that ensures all inner dimensions are divisible by
this number. dilated convolution: An `int` to specify the same value for
all spatial dimensions.
regularize_depthwise: A `bool` of whether or not apply regularization on
depthwise.
use_depthwise: A `bool` of whether to uses fused convolutions instead of
depthwise.
use_residual: A `bool` of whether to include residual connection between
input and output.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
output_intermediate_endpoints: A `bool` of whether or not output the
intermediate endpoints.
**kwargs: Additional keyword arguments to be passed.
"""
super(InvertedBottleneckBlock, self).__init__(**kwargs)
self._in_filters = in_filters
self._out_filters = out_filters
self._expand_ratio = expand_ratio
self._strides = strides
self._kernel_size = kernel_size
self._se_ratio = se_ratio
self._divisible_by = divisible_by
self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
self._dilation_rate = dilation_rate
self._use_sync_bn = use_sync_bn
self._regularize_depthwise = regularize_depthwise
self._use_depthwise = use_depthwise
self._use_residual = use_residual
self._activation = activation
self._se_inner_activation = se_inner_activation
self._se_gating_activation = se_gating_activation
self._depthwise_activation = depthwise_activation
self._se_round_down_protect = se_round_down_protect
self._kernel_initializer = kernel_initializer
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._expand_se_in_filters = expand_se_in_filters
self._output_intermediate_endpoints = output_intermediate_endpoints
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
if not depthwise_activation:
self._depthwise_activation = activation
if regularize_depthwise:
self._depthsize_regularizer = kernel_regularizer
else:
self._depthsize_regularizer = None
def build(self, input_shape):
expand_filters = self._in_filters
if self._expand_ratio > 1:
# First 1x1 conv for channel expansion.
expand_filters = nn_layers.make_divisible(
self._in_filters * self._expand_ratio, self._divisible_by)
expand_kernel = 1 if self._use_depthwise else self._kernel_size
expand_stride = 1 if self._use_depthwise else self._strides
self._conv0 = tf.keras.layers.Conv2D(
filters=expand_filters,
kernel_size=expand_kernel,
strides=expand_stride,
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm0 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn,
)
self._activation_layer = tf_utils.get_activation(
self._activation, use_keras_layer=True)
if self._use_depthwise:
# Depthwise conv.
self._conv1 = tf.keras.layers.DepthwiseConv2D(
kernel_size=(self._kernel_size, self._kernel_size),
strides=self._strides,
padding='same',
depth_multiplier=1,
dilation_rate=self._dilation_rate,
use_bias=False,
depthwise_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
depthwise_regularizer=self._depthsize_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm1 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn,
)
self._depthwise_activation_layer = tf_utils.get_activation(
self._depthwise_activation, use_keras_layer=True)
# Squeeze and excitation.
if self._se_ratio and self._se_ratio > 0 and self._se_ratio <= 1:
logging.info('Use Squeeze and excitation.')
in_filters = self._in_filters
if self._expand_se_in_filters:
in_filters = expand_filters
self._squeeze_excitation = nn_layers.SqueezeExcitation(
in_filters=in_filters,
out_filters=expand_filters,
se_ratio=self._se_ratio,
divisible_by=self._divisible_by,
round_down_protect=self._se_round_down_protect,
kernel_initializer=tf_utils.clone_initializer(
self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._se_inner_activation,
gating_activation=self._se_gating_activation)
else:
self._squeeze_excitation = None
# Last 1x1 conv.
self._conv2 = tf.keras.layers.Conv2D(
filters=self._out_filters,
kernel_size=1,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm2 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn,
)
if self._stochastic_depth_drop_rate:
self._stochastic_depth = nn_layers.StochasticDepth(
self._stochastic_depth_drop_rate)
else:
self._stochastic_depth = None
self._add = tf.keras.layers.Add()
super(InvertedBottleneckBlock, self).build(input_shape)
def get_config(self):
config = {
'in_filters': self._in_filters,
'out_filters': self._out_filters,
'expand_ratio': self._expand_ratio,
'strides': self._strides,
'kernel_size': self._kernel_size,
'se_ratio': self._se_ratio,
'divisible_by': self._divisible_by,
'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'se_inner_activation': self._se_inner_activation,
'se_gating_activation': self._se_gating_activation,
'se_round_down_protect': self._se_round_down_protect,
'expand_se_in_filters': self._expand_se_in_filters,
'depthwise_activation': self._depthwise_activation,
'dilation_rate': self._dilation_rate,
'use_sync_bn': self._use_sync_bn,
'regularize_depthwise': self._regularize_depthwise,
'use_depthwise': self._use_depthwise,
'use_residual': self._use_residual,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'output_intermediate_endpoints': self._output_intermediate_endpoints
}
base_config = super(InvertedBottleneckBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, training=None):
endpoints = {}
shortcut = inputs
if self._expand_ratio > 1:
x = self._conv0(inputs)
x = self._norm0(x)
x = self._activation_layer(x)
else:
x = inputs
if self._use_depthwise:
x = self._conv1(x)
x = self._norm1(x)
x = self._depthwise_activation_layer(x)
if self._output_intermediate_endpoints:
endpoints['depthwise'] = x
if self._squeeze_excitation:
x = self._squeeze_excitation(x)
x = self._conv2(x)
x = self._norm2(x)
if (self._use_residual and self._in_filters == self._out_filters and
self._strides == 1):
if self._stochastic_depth:
x = self._stochastic_depth(x, training=training)
x = self._add([x, shortcut])
if self._output_intermediate_endpoints:
return x, endpoints
return x
@tf.keras.utils.register_keras_serializable(package='Vision')
class ResidualInner(tf.keras.layers.Layer):
"""Creates a single inner block of a residual.
This corresponds to `F`/`G` functions in the RevNet paper:
Aidan N. Gomez, Mengye Ren, Raquel Urtasun, Roger B. Grosse.
The Reversible Residual Network: Backpropagation Without Storing Activations.
(https://arxiv.org/pdf/1707.04585.pdf)
"""
def __init__(
self,
filters: int,
strides: int,
kernel_initializer: Union[str, Callable[
..., tf.keras.initializers.Initializer]] = 'VarianceScaling',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
activation: Union[str, Callable[..., tf.Tensor]] = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
batch_norm_first: bool = True,
**kwargs):
"""Initializes a ResidualInner.
Args:
filters: An `int` of output filter size.
strides: An `int` of stride size for convolution for the residual block.
kernel_initializer: A `str` or `tf.keras.initializers.Initializer`
instance for convolutional layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` for Conv2D.
activation: A `str` or `callable` instance of the activation function.
use_sync_bn: A `bool`. If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
batch_norm_first: A `bool` of whether to apply activation and batch norm
before conv.
**kwargs: Additional keyword arguments to be passed.
"""
super(ResidualInner, self).__init__(**kwargs)
self.strides = strides
self.filters = filters
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._kernel_regularizer = kernel_regularizer
self._activation = tf.keras.activations.get(activation)
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._batch_norm_first = batch_norm_first
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation_fn = tf_utils.get_activation(activation)
def build(self, input_shape: tf.TensorShape):
if self._batch_norm_first:
self._batch_norm_0 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn,
)
self._conv2d_1 = tf.keras.layers.Conv2D(
filters=self.filters,
kernel_size=3,
strides=self.strides,
use_bias=False,
padding='same',
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer)
self._batch_norm_1 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn,
)
self._conv2d_2 = tf.keras.layers.Conv2D(
filters=self.filters,
kernel_size=3,
strides=1,
use_bias=False,
padding='same',
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer)
super(ResidualInner, self).build(input_shape)
def get_config(self) -> Dict[str, Any]:
config = {
'filters': self.filters,
'strides': self.strides,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'batch_norm_first': self._batch_norm_first,
}
base_config = super(ResidualInner, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self,
inputs: tf.Tensor,
training: Optional[bool] = None) -> tf.Tensor:
x = inputs
if self._batch_norm_first:
x = self._batch_norm_0(x, training=training)
x = self._activation_fn(x)
x = self._conv2d_1(x)
x = self._batch_norm_1(x, training=training)
x = self._activation_fn(x)
x = self._conv2d_2(x)
return x
@tf.keras.utils.register_keras_serializable(package='Vision')
class BottleneckResidualInner(tf.keras.layers.Layer):
"""Creates a single inner block of a bottleneck.
This corresponds to `F`/`G` functions in the RevNet paper:
Aidan N. Gomez, Mengye Ren, Raquel Urtasun, Roger B. Grosse.
The Reversible Residual Network: Backpropagation Without Storing Activations.
(https://arxiv.org/pdf/1707.04585.pdf)
"""
def __init__(
self,
filters: int,
strides: int,
kernel_initializer: Union[str, Callable[
..., tf.keras.initializers.Initializer]] = 'VarianceScaling',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
activation: Union[str, Callable[..., tf.Tensor]] = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
batch_norm_first: bool = True,
**kwargs):
"""Initializes a BottleneckResidualInner.
Args:
filters: An `int` number of filters for first 2 convolutions. Last Last,
and thus the number of output channels from the bottlneck block is
`4*filters`
strides: An `int` of stride size for convolution for the residual block.
kernel_initializer: A `str` or `tf.keras.initializers.Initializer`
instance for convolutional layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` for Conv2D.
activation: A `str` or `callable` instance of the activation function.
use_sync_bn: A `bool`. If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
batch_norm_first: A `bool` of whether to apply activation and batch norm
before conv.
**kwargs: Additional keyword arguments to be passed.
"""
super(BottleneckResidualInner, self).__init__(**kwargs)
self.strides = strides
self.filters = filters
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._kernel_regularizer = kernel_regularizer
self._activation = tf.keras.activations.get(activation)
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._batch_norm_first = batch_norm_first
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation_fn = tf_utils.get_activation(activation)
def build(self, input_shape: tf.TensorShape):
if self._batch_norm_first:
self._batch_norm_0 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn,
)
self._conv2d_1 = tf.keras.layers.Conv2D(
filters=self.filters,
kernel_size=1,
strides=self.strides,
use_bias=False,
padding='same',
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer)
self._batch_norm_1 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn,
)
self._conv2d_2 = tf.keras.layers.Conv2D(
filters=self.filters,
kernel_size=3,
strides=1,
use_bias=False,
padding='same',
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer)
self._batch_norm_2 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn,
)
self._conv2d_3 = tf.keras.layers.Conv2D(
filters=self.filters * 4,
kernel_size=1,
strides=1,
use_bias=False,
padding='same',
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer)
super(BottleneckResidualInner, self).build(input_shape)
def get_config(self) -> Dict[str, Any]:
config = {
'filters': self.filters,
'strides': self.strides,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'batch_norm_first': self._batch_norm_first,
}
base_config = super(BottleneckResidualInner, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self,
inputs: tf.Tensor,
training: Optional[bool] = None) -> tf.Tensor:
x = inputs
if self._batch_norm_first:
x = self._batch_norm_0(x, training=training)
x = self._activation_fn(x)
x = self._conv2d_1(x)
x = self._batch_norm_1(x, training=training)
x = self._activation_fn(x)
x = self._conv2d_2(x)
x = self._batch_norm_2(x, training=training)
x = self._activation_fn(x)
x = self._conv2d_3(x)
return x
@tf.keras.utils.register_keras_serializable(package='Vision')
class ReversibleLayer(tf.keras.layers.Layer):
"""Creates a reversible layer.
Computes y1 = x1 + f(x2), y2 = x2 + g(y1), where f and g can be arbitrary
layers that are stateless, which in this case are `ResidualInner` layers.
"""
def __init__(self,
f: tf.keras.layers.Layer,
g: tf.keras.layers.Layer,
manual_grads: bool = True,
**kwargs):
"""Initializes a ReversibleLayer.
Args:
f: A `tf.keras.layers.Layer` instance of `f` inner block referred to in
paper. Each reversible layer consists of two inner functions. For
example, in RevNet the reversible residual consists of two f/g inner
(bottleneck) residual functions. Where the input to the reversible layer
is x, the input gets partitioned in the channel dimension and the
forward pass follows (eq8): x = [x1; x2], z1 = x1 + f(x2), y2 = x2 +
g(z1), y1 = stop_gradient(z1).
g: A `tf.keras.layers.Layer` instance of `g` inner block referred to in
paper. Detailed explanation same as above as `f` arg.
manual_grads: A `bool` [Testing Only] of whether to manually take
gradients as in Algorithm 1 or defer to autograd.
**kwargs: Additional keyword arguments to be passed.
"""
super(ReversibleLayer, self).__init__(**kwargs)
self._f = f
self._g = g
self._manual_grads = manual_grads
if tf.keras.backend.image_data_format() == 'channels_last':
self._axis = -1
else:
self._axis = 1
def get_config(self) -> Dict[str, Any]:
config = {
'f': self._f,
'g': self._g,
'manual_grads': self._manual_grads,
}
base_config = super(ReversibleLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _ckpt_non_trainable_vars(self):
self._f_non_trainable_vars = [
v.read_value() for v in self._f.non_trainable_variables
]
self._g_non_trainable_vars = [
v.read_value() for v in self._g.non_trainable_variables
]
def _load_ckpt_non_trainable_vars(self):
for v, v_chkpt in zip(self._f.non_trainable_variables,
self._f_non_trainable_vars):
v.assign(v_chkpt)
for v, v_chkpt in zip(self._g.non_trainable_variables,
self._g_non_trainable_vars):
v.assign(v_chkpt)
def call(self,
inputs: tf.Tensor,
training: Optional[bool] = None) -> tf.Tensor:
@tf.custom_gradient
def reversible(
x: tf.Tensor
) -> Tuple[tf.Tensor, Callable[[Any], Tuple[List[tf.Tensor],
List[tf.Tensor]]]]:
"""Implements Algorithm 1 in the RevNet paper.
Aidan N. Gomez, Mengye Ren, Raquel Urtasun, Roger B. Grosse.
The Reversible Residual Network: Backpropagation Without Storing
Activations.
(https://arxiv.org/pdf/1707.04585.pdf)
Args:
x: An input `tf.Tensor.
Returns:
y: The output [y1; y2] in Algorithm 1.
grad_fn: A callable function that computes the gradients.
"""
with tf.GradientTape() as fwdtape:
fwdtape.watch(x)
x1, x2 = tf.split(x, num_or_size_splits=2, axis=self._axis)
f_x2 = self._f(x2, training=training)
x1_down = _maybe_downsample(x1, f_x2.shape[self._axis], self._f.strides,
self._axis)
z1 = f_x2 + x1_down
g_z1 = self._g(z1, training=training)
x2_down = _maybe_downsample(x2, g_z1.shape[self._axis], self._f.strides,
self._axis)
y2 = x2_down + g_z1
# Equation 8: https://arxiv.org/pdf/1707.04585.pdf
# Decouple y1 and z1 so that their derivatives are different.
y1 = tf.identity(z1)
y = tf.concat([y1, y2], axis=self._axis)
irreversible = ((self._f.strides != 1 or self._g.strides != 1) or
(y.shape[self._axis] != inputs.shape[self._axis]))
# Checkpointing moving mean/variance for batch normalization layers
# as they shouldn't be updated during the custom gradient pass of f/g.
self._ckpt_non_trainable_vars()
def grad_fn(
dy: tf.Tensor,
variables: Optional[List[tf.Variable]] = None,
) -> Tuple[List[tf.Tensor], List[tf.Tensor]]:
"""Given dy calculate (dy/dx)|_{x_{input}} using f/g."""
if irreversible or not self._manual_grads:
grads_combined = fwdtape.gradient(
y, [x] + variables, output_gradients=dy)
dx = grads_combined[0]
grad_vars = grads_combined[1:]
else:
y1_nograd = tf.stop_gradient(y1)
y2_nograd = tf.stop_gradient(y2)
dy1, dy2 = tf.split(dy, num_or_size_splits=2, axis=self._axis)
# Index mapping from self.f/g.trainable_variables to grad_fn
# input `variables` kwarg so that we can reorder dwf + dwg
# variable gradient list to match `variables` order.
f_var_refs = [v.ref() for v in self._f.trainable_variables]
g_var_refs = [v.ref() for v in self._g.trainable_variables]
fg_var_refs = f_var_refs + g_var_refs
self_to_var_index = [fg_var_refs.index(v.ref()) for v in variables]
# Algorithm 1 in paper (line # documented in-line)
z1 = y1_nograd # line 2
with tf.GradientTape() as gtape:
gtape.watch(z1)
g_z1 = self._g(z1, training=training)
x2 = y2_nograd - g_z1 # line 3
with tf.GradientTape() as ftape:
ftape.watch(x2)
f_x2 = self._f(x2, training=training)
x1 = z1 - f_x2 # pylint: disable=unused-variable # line 4
# Compute gradients
g_grads_combined = gtape.gradient(
g_z1, [z1] + self._g.trainable_variables, output_gradients=dy2)
dz1 = dy1 + g_grads_combined[0] # line 5
dwg = g_grads_combined[1:] # line 9
f_grads_combined = ftape.gradient(
f_x2, [x2] + self._f.trainable_variables, output_gradients=dz1)
dx2 = dy2 + f_grads_combined[0] # line 6
dwf = f_grads_combined[1:] # line 8
dx1 = dz1 # line 7
# Pack the input and variable gradients.
dx = tf.concat([dx1, dx2], axis=self._axis)
grad_vars = dwf + dwg
# Reorder gradients (trainable_variables to variables kwarg order)
grad_vars = [grad_vars[i] for i in self_to_var_index]
# Restore batch normalization moving mean/variance for correctness.
self._load_ckpt_non_trainable_vars()
return dx, grad_vars # grad_fn end
return y, grad_fn # reversible end
activations = reversible(inputs)
return activations
@tf.keras.utils.register_keras_serializable(package='Vision')
class DepthwiseSeparableConvBlock(tf.keras.layers.Layer):
"""Creates a depthwise separable convolution block with batch normalization.
"""
def __init__(
self,
filters: int,
kernel_size: int = 3,
strides: int = 1,
regularize_depthwise=False,
activation: Text = 'relu6',
kernel_initializer: Text = 'VarianceScaling',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
dilation_rate: int = 1,
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
**kwargs):
"""Initializes a convolution block with batch normalization.
Args:
filters: An `int` number of filters for the first two convolutions. Note
that the third and final convolution will use 4 times as many filters.
kernel_size: An `int` that specifies the height and width of the 2D
convolution window.
strides: An `int` of block stride. If greater than 1, this block will
ultimately downsample the input.
regularize_depthwise: A `bool`. If Ture, apply regularization on
depthwise.
activation: A `str` name of the activation function.
kernel_initializer: A `str` of kernel_initializer for convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
dilation_rate: An `int` or tuple/list of 2 `int`, specifying the dilation
rate to use for dilated convolution. Can be a single integer to specify
the same value for all spatial dimensions.
use_sync_bn: A `bool`. If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
**kwargs: Additional keyword arguments to be passed.
"""
super(DepthwiseSeparableConvBlock, self).__init__(**kwargs)
self._filters = filters
self._kernel_size = kernel_size
self._strides = strides
self._activation = activation
self._regularize_depthwise = regularize_depthwise
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._dilation_rate = dilation_rate
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation_fn = tf_utils.get_activation(activation)
if regularize_depthwise:
self._depthsize_regularizer = kernel_regularizer
else:
self._depthsize_regularizer = None
def get_config(self):
config = {
'filters': self._filters,
'strides': self._strides,
'regularize_depthwise': self._regularize_depthwise,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon
}
base_config = super(DepthwiseSeparableConvBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
self._dwconv0 = tf.keras.layers.DepthwiseConv2D(
kernel_size=self._kernel_size,
strides=self._strides,
padding='same',
depth_multiplier=1,
dilation_rate=self._dilation_rate,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._depthsize_regularizer,
use_bias=False)
self._norm0 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn,
)
self._conv1 = tf.keras.layers.Conv2D(
filters=self._filters,
kernel_size=1,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer)
self._norm1 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn,
)
super(DepthwiseSeparableConvBlock, self).build(input_shape)
def call(self, inputs, training=None):
x = self._dwconv0(inputs)
x = self._norm0(x)
x = self._activation_fn(x)
x = self._conv1(x)
x = self._norm1(x)
return self._activation_fn(x)
@tf.keras.utils.register_keras_serializable(package='Vision')
class TuckerConvBlock(tf.keras.layers.Layer):
"""An Tucker block (generalized bottleneck)."""
def __init__(self,
in_filters,
out_filters,
input_compression_ratio,
output_compression_ratio,
strides,
kernel_size=3,
stochastic_depth_drop_rate=None,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
use_sync_bn=False,
divisible_by=1,
use_residual=True,
norm_momentum=0.99,
norm_epsilon=0.001,
**kwargs):
"""Initializes an inverted bottleneck block with BN after convolutions.
Args:
in_filters: An `int` number of filters of the input tensor.
out_filters: An `int` number of filters of the output tensor.
input_compression_ratio: An `float` of compression ratio for input
filters.
output_compression_ratio: An `float` of compression ratio for output
filters.
strides: An `int` block stride. If greater than 1, this block will
ultimately downsample the input.
kernel_size: An `int` kernel_size of the depthwise conv layer.
stochastic_depth_drop_rate: A `float` or None. if not None, drop rate for
the stochastic depth layer.
kernel_initializer: A `str` of kernel_initializer for convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2d.
Default to None.
activation: A `str` name of the activation function.
use_sync_bn: A `bool`. If True, use synchronized batch normalization.
divisible_by: An `int` that ensures all inner dimensions are divisible by
this number.
use_residual: A `bool` of whether to include residual connection between
input and output.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
**kwargs: Additional keyword arguments to be passed.
"""
super(TuckerConvBlock, self).__init__(**kwargs)
self._in_filters = in_filters
self._out_filters = out_filters
self._input_compression_ratio = input_compression_ratio
self._output_compression_ratio = output_compression_ratio
self._strides = strides
self._kernel_size = kernel_size
self._divisible_by = divisible_by
self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
self._use_sync_bn = use_sync_bn
self._use_residual = use_residual
self._activation = activation
self._kernel_initializer = kernel_initializer
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
def build(self, input_shape):
input_compressed_filters = nn_layers.make_divisible(
value=self._in_filters * self._input_compression_ratio,
divisor=self._divisible_by,
round_down_protect=False)
self._conv0 = tf.keras.layers.Conv2D(
filters=input_compressed_filters,
kernel_size=1,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm0 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn,
)
self._activation_layer0 = tf_utils.get_activation(
self._activation, use_keras_layer=True)
output_compressed_filters = nn_layers.make_divisible(
value=self._out_filters * self._output_compression_ratio,
divisor=self._divisible_by,
round_down_protect=False)
self._conv1 = tf.keras.layers.Conv2D(
filters=output_compressed_filters,
kernel_size=self._kernel_size,
strides=self._strides,
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm1 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn,
)
self._activation_layer1 = tf_utils.get_activation(
self._activation, use_keras_layer=True)
# Last 1x1 conv.
self._conv2 = tf.keras.layers.Conv2D(
filters=self._out_filters,
kernel_size=1,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm2 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn,
)
if self._stochastic_depth_drop_rate:
self._stochastic_depth = nn_layers.StochasticDepth(
self._stochastic_depth_drop_rate)
else:
self._stochastic_depth = None
self._add = tf.keras.layers.Add()
super(TuckerConvBlock, self).build(input_shape)
def get_config(self):
config = {
'in_filters': self._in_filters,
'out_filters': self._out_filters,
'input_compression_ratio': self._input_compression_ratio,
'output_compression_ratio': self._output_compression_ratio,
'strides': self._strides,
'kernel_size': self._kernel_size,
'divisible_by': self._divisible_by,
'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'use_residual': self._use_residual,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon
}
base_config = super(TuckerConvBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, training=None):
shortcut = inputs
x = self._conv0(inputs)
x = self._norm0(x)
x = self._activation_layer0(x)
x = self._conv1(x)
x = self._norm1(x)
x = self._activation_layer1(x)
x = self._conv2(x)
x = self._norm2(x)
if (self._use_residual and self._in_filters == self._out_filters and
self._strides == 1):
if self._stochastic_depth:
x = self._stochastic_depth(x, training=training)
x = self._add([x, shortcut])
return x
@tf.keras.utils.register_keras_serializable(package='Vision')
class LayerScale(tf.keras.layers.Layer):
"""LayerScale as introduced in CaiT: https://arxiv.org/abs/2103.17239.
Attributes:
init_values (float): value to initialize the diagonal matrix of
LayerScale.
"""
def __init__(self, init_values: float, **kwargs):
"""Initializes LayerScale."""
super().__init__(**kwargs)
self.gamma_init_value = init_values
def build(self, inputs_shape):
gamma_shape = (1, 1, inputs_shape[2])
self.gamma = self.add_weight(
name='layerscale_gamma',
shape=gamma_shape,
initializer=tf.keras.initializers.Constant(self.gamma_init_value),
trainable=True,
dtype=tf.float32,
)
def call(self, inputs, inputs_positions=None):
del inputs_positions
return tf.cast(self.gamma, inputs.dtype) * inputs
@tf.keras.utils.register_keras_serializable(package='Vision')
class TransformerEncoderBlock(nlp_modeling.layers.TransformerEncoderBlock):
"""TransformerEncoderBlock layer with stochastic depth and layerscale."""
def __init__(
self,
*args,
stochastic_depth_drop_rate=0.0,
layer_scale_init_value=0.0,
transformer_partition_dims=None,
max_attention_inference_parallelism=None,
**kwargs
):
"""Initializes TransformerEncoderBlock.
Args:
*args: positional arguments passed to super().__init__.
stochastic_depth_drop_rate: the drop rate for the stochastic depth layer.
layer_scale_init_value:
transformer_partition_dims: transformer spatial partition dimenstions.
max_attention_inference_parallelism: the number of examples to run in
parallel in the attention blocks during inference. Set this limit to
reduce the peak memory usage. If None, use vectorized operations to run
the whole batch in parallel.
**kwargs: keyword arguments passed to super().__init__.
"""
super().__init__(*args, **kwargs)
self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
self._layer_scale_init_value = layer_scale_init_value
self._transformer_partition_dims = transformer_partition_dims
self._max_attention_inference_parallelism = (
max_attention_inference_parallelism
)
def build(self, input_shape):
super().build(input_shape)
if self._stochastic_depth_drop_rate:
self._stochastic_depth = nn_layers.StochasticDepth(
self._stochastic_depth_drop_rate)
else:
self._stochastic_depth = lambda x, *args, **kwargs: tf.identity(x)
if self._layer_scale_init_value:
self._layer_scale_attn = LayerScale(
init_values=self._layer_scale_init_value, name='layer_scale_attn')
self._layer_scale_mlp = LayerScale(
init_values=self._layer_scale_init_value, name='layer_scale_mlp')
else:
self._layer_scale_attn = lambda x, *args, **kwargs: tf.identity(x)
self._layer_scale_mlp = lambda x, *args, **kwargs: tf.identity(x)
self._attention_layer = nn_layers.MultiHeadAttention(
num_heads=self._num_heads,
key_dim=self._key_dim,
value_dim=self._value_dim,
dropout=self._attention_dropout_rate,
use_bias=self._use_bias,
kernel_initializer=self._attention_initializer,
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
attention_axes=self._attention_axes,
output_shape=self._output_last_dim,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint,
max_inference_parallelism=self._max_attention_inference_parallelism,
partition_dims=self._transformer_partition_dims,
name='self_attention',
)
def get_config(self):
config = super().get_config()
config.update({
'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,
'layer_scale_init_value': self._layer_scale_init_value,
'transformer_partition_dims': self._transformer_partition_dims,
'max_attention_inference_parallelism': (
self._max_attention_inference_parallelism
),
})
return config
def call(self, inputs, output_range=None, training=None):
"""Transformer self-attention encoder block call."""
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
input_tensor, attention_mask = inputs
key_value = None
elif len(inputs) == 3:
input_tensor, key_value, attention_mask = inputs
else:
raise ValueError('Unexpected inputs to %s with length at %d' %
(self.__class__, len(inputs)))
else:
input_tensor, key_value, attention_mask = (inputs, None, None)
if output_range is None:
output_range = self._output_range
if output_range:
if self._norm_first:
source_tensor = input_tensor[:, 0:output_range, :]
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm(key_value)
target_tensor = input_tensor[:, 0:output_range, :]
if attention_mask is not None:
attention_mask = attention_mask[:, 0:output_range, :]
else:
if self._norm_first:
source_tensor = input_tensor
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is not None:
key_value = self._attention_layer_norm(key_value)
target_tensor = input_tensor
if key_value is None:
key_value = input_tensor
attention_output, attention_scores = self._attention_layer(
query=target_tensor,
value=key_value,
attention_mask=attention_mask,
return_attention_scores=True)
attention_output = self._attention_dropout(attention_output)
attention_output = self._layer_scale_attn(attention_output)
if self._norm_first:
# Important to not combine `self._norm_first` and
# `self._use_query_residual` into one if clause because else is only for
# `_norm_first == False`.
if self._use_query_residual:
attention_output = source_tensor + self._stochastic_depth(
attention_output, training=training)
source_attention_output = attention_output
attention_output = self._output_layer_norm(attention_output)
else:
if self._use_query_residual:
attention_output = target_tensor + self._stochastic_depth(
attention_output, training=training)
attention_output = self._attention_layer_norm(attention_output)
inner_output = self._intermediate_dense(attention_output)
inner_output = self._intermediate_activation_layer(inner_output)
inner_output = self._inner_dropout_layer(inner_output)
layer_output = self._output_dense(inner_output)
layer_output = self._output_dropout(layer_output)
# Layerscale after MLP.
layer_output = self._layer_scale_mlp(layer_output)
if self._norm_first:
layer_output = source_attention_output + self._stochastic_depth(
layer_output, training=training)
else:
# During mixed precision training, layer norm output is always fp32 for
# now. Casts fp32 for the subsequent add.
layer_output = tf.cast(layer_output, tf.float32)
layer_output = self._output_layer_norm(
layer_output
+ self._stochastic_depth(attention_output, training=training))
if self._return_attention_scores:
return layer_output, attention_scores
else:
return layer_output
@tf.keras.utils.register_keras_serializable(package='Vision')
class TransformerScaffold(nlp_modeling.layers.TransformerScaffold):
"""TransformerScaffold layer for vision applications."""
def __init__(
self,
*args,
stochastic_depth_drop_rate: float = 0.0,
return_attention_scores: bool = False,
ffn_has_residual_connection: bool = False,
max_attention_inference_parallelism: Optional[int] = None,
**kwargs
):
"""Initializes TransformerEncoderBlock.
Args:
*args: positional arguments passed to super().__init__.
stochastic_depth_drop_rate: the drop rate for the stochastic depth layer.
return_attention_scores: whether to return the attention output.
ffn_has_residual_connection: whether the feedforward network has internal
residual connection and layer norm. If False, the residual connection
and the layer norm op are called inside TransformerScaffold.
max_attention_inference_parallelism: the number of examples to run in
parallel in the attention blocks during inference. Set this limit to
reduce the peak memory usage. If None, use vectorized operations to run
the whole batch in parallel.
**kwargs: keyword arguments passed to super().__init__.
"""
super().__init__(*args, **kwargs)
self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
self._return_attention_scores = return_attention_scores
self._ffn_has_residual_connection = ffn_has_residual_connection
self._max_attention_inference_parallelism = (
max_attention_inference_parallelism
)
def build(self, input_shape: Union[tf.TensorShape, List[int]]):
if self._stochastic_depth_drop_rate:
self._stochastic_depth = nn_layers.StochasticDepth(
self._stochastic_depth_drop_rate)
else:
self._stochastic_depth = lambda x, *args, **kwargs: tf.identity(x)
super().build(input_shape)
if self._max_attention_inference_parallelism is not None:
attention_layer_config = self._attention_layer.get_config()
self._attention_layer = self._attention_cls.from_config({
**attention_layer_config,
'max_inference_parallelism': (
self._max_attention_inference_parallelism
),
})
def get_config(self):
config = super().get_config()
config.update({
'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,
'return_attention_scores': self._return_attention_scores,
'ffn_has_residual_connection': self._ffn_has_residual_connection,
'max_attention_inference_parallelism': (
self._max_attention_inference_parallelism
),
})
return config
def call(
self,
inputs: tf.Tensor,
training: Optional[bool] = None
) -> Union[tf.Tensor, Tuple[tf.Tensor, tf.Tensor]]:
"""Transformer self-attention encoder block call."""
if isinstance(inputs, (list, tuple)):
if len(inputs) == 2:
input_tensor, attention_mask = inputs
key_value = None
elif len(inputs) == 3:
input_tensor, key_value, attention_mask = inputs
else:
raise ValueError('Unexpected inputs to %s with length at %d' %
(self.__class__, len(inputs)))
else:
input_tensor, key_value, attention_mask = (inputs, None, None)
if self._norm_first:
source_tensor = input_tensor
input_tensor = self._attention_layer_norm(input_tensor)
if key_value is None:
key_value = input_tensor
attention_output, attention_scores = self._attention_layer(
query=input_tensor,
value=key_value,
attention_mask=attention_mask,
training=training,
return_attention_scores=True)
attention_output = self._attention_dropout(
attention_output, training=training)
if self._norm_first:
source_attention_output = source_tensor + self._stochastic_depth(
attention_output, training=training)
attention_output = self._output_layer_norm(
source_attention_output)
else:
attention_output = self._attention_layer_norm(
input_tensor +
self._stochastic_depth(attention_output, training=training))
if self._feedforward_block is None:
intermediate_output = self._intermediate_dense(attention_output)
intermediate_output = self._intermediate_activation_layer(
intermediate_output)
layer_output = self._output_dense(intermediate_output)
layer_output = self._output_dropout(layer_output, training=training)
else:
layer_output = self._feedforward_block(
attention_output, training=training)
if self._norm_first:
if self._ffn_has_residual_connection:
raise ValueError(
'In the case of `norm_first`, the residual connection should be'
"done in the TransformerScaffold call function, not FFN's"
'call function.')
output = source_attention_output + self._stochastic_depth(
layer_output, training=training)
else:
# During mixed precision training, layer norm output is always fp32 for
# now. Casts fp32 for the subsequent add.
layer_output = tf.cast(layer_output, tf.float32)
if self._ffn_has_residual_connection:
output = self._stochastic_depth(layer_output, training=training)
else:
output = self._output_layer_norm(
attention_output +
self._stochastic_depth(layer_output, training=training))
if self._return_attention_scores:
return output, attention_scores
else:
return output
| 72,010 | 37.263018 | 80 | py |
models | models-master/official/vision/modeling/backbones/spinenet_mobile.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions of Mobile SpineNet Networks."""
import math
from typing import Any, List, Optional, Tuple
# Import libraries
from absl import logging
import tensorflow as tf
from official.modeling import hyperparams
from official.modeling import tf_utils
from official.vision.modeling.backbones import factory
from official.vision.modeling.layers import nn_blocks
from official.vision.modeling.layers import nn_layers
from official.vision.ops import spatial_transform_ops
layers = tf.keras.layers
FILTER_SIZE_MAP = {
0: 8,
1: 16,
2: 24,
3: 40,
4: 80,
5: 112,
6: 112,
7: 112,
}
# The fixed SpineNet architecture discovered by NAS.
# Each element represents a specification of a building block:
# (block_level, block_fn, (input_offset0, input_offset1), is_output).
SPINENET_BLOCK_SPECS = [
(2, 'mbconv', (0, 1), False),
(2, 'mbconv', (1, 2), False),
(4, 'mbconv', (1, 2), False),
(3, 'mbconv', (3, 4), False),
(4, 'mbconv', (3, 5), False),
(6, 'mbconv', (4, 6), False),
(4, 'mbconv', (4, 6), False),
(5, 'mbconv', (7, 8), False),
(7, 'mbconv', (7, 9), False),
(5, 'mbconv', (9, 10), False),
(5, 'mbconv', (9, 11), False),
(4, 'mbconv', (6, 11), True),
(3, 'mbconv', (5, 11), True),
(5, 'mbconv', (8, 13), True),
(7, 'mbconv', (6, 15), True),
(6, 'mbconv', (13, 15), True),
]
SCALING_MAP = {
'49': {
'endpoints_num_filters': 48,
'filter_size_scale': 1.0,
'block_repeats': 1,
},
'49S': {
'endpoints_num_filters': 40,
'filter_size_scale': 0.65,
'block_repeats': 1,
},
'49XS': {
'endpoints_num_filters': 24,
'filter_size_scale': 0.6,
'block_repeats': 1,
},
}
class BlockSpec(object):
"""A container class that specifies the block configuration for SpineNet."""
def __init__(self, level: int, block_fn: str, input_offsets: Tuple[int, int],
is_output: bool):
self.level = level
self.block_fn = block_fn
self.input_offsets = input_offsets
self.is_output = is_output
def build_block_specs(
block_specs: Optional[List[Tuple[Any, ...]]] = None) -> List[BlockSpec]:
"""Builds the list of BlockSpec objects for SpineNet."""
if not block_specs:
block_specs = SPINENET_BLOCK_SPECS
logging.info('Building SpineNet block specs: %s', block_specs)
return [BlockSpec(*b) for b in block_specs]
@tf.keras.utils.register_keras_serializable(package='Vision')
class SpineNetMobile(tf.keras.Model):
"""Creates a Mobile SpineNet family model.
This implements:
[1] Xianzhi Du, Tsung-Yi Lin, Pengchong Jin, Golnaz Ghiasi, Mingxing Tan,
Yin Cui, Quoc V. Le, Xiaodan Song.
SpineNet: Learning Scale-Permuted Backbone for Recognition and Localization.
(https://arxiv.org/abs/1912.05027).
[2] Xianzhi Du, Tsung-Yi Lin, Pengchong Jin, Yin Cui, Mingxing Tan,
Quoc Le, Xiaodan Song.
Efficient Scale-Permuted Backbone with Learned Resource Distribution.
(https://arxiv.org/abs/2010.11426).
"""
def __init__(
self,
input_specs: tf.keras.layers.InputSpec = tf.keras.layers.InputSpec(
shape=[None, None, None, 3]),
min_level: int = 3,
max_level: int = 7,
block_specs: Optional[List[BlockSpec]] = None,
endpoints_num_filters: int = 256,
se_ratio: float = 0.2,
block_repeats: int = 1,
filter_size_scale: float = 1.0,
expand_ratio: int = 6,
init_stochastic_depth_rate=0.0,
kernel_initializer: str = 'VarianceScaling',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
use_keras_upsampling_2d: bool = False,
**kwargs):
"""Initializes a Mobile SpineNet model.
Args:
input_specs: A `tf.keras.layers.InputSpec` of the input tensor.
min_level: An `int` of min level for output mutiscale features.
max_level: An `int` of max level for output mutiscale features.
block_specs: The block specifications for the SpineNet model discovered by
NAS.
endpoints_num_filters: An `int` of feature dimension for the output
endpoints.
se_ratio: A `float` of Squeeze-and-Excitation ratio.
block_repeats: An `int` of number of blocks contained in the layer.
filter_size_scale: A `float` of multiplier for the filters (number of
channels) for all convolution ops. The value must be greater than zero.
Typical usage will be to set this value in (0, 1) to reduce the number
of parameters or computation cost of the model.
expand_ratio: An `integer` of expansion ratios for inverted bottleneck
blocks.
init_stochastic_depth_rate: A `float` of initial stochastic depth rate.
kernel_initializer: A str for kernel initializer of convolutional layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
Default to None.
activation: A `str` name of the activation function.
use_sync_bn: If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A small `float` added to variance to avoid dividing by zero.
use_keras_upsampling_2d: If True, use keras UpSampling2D layer.
**kwargs: Additional keyword arguments to be passed.
"""
self._input_specs = input_specs
self._min_level = min_level
self._max_level = max_level
self._block_specs = (
build_block_specs() if block_specs is None else block_specs
)
self._endpoints_num_filters = endpoints_num_filters
self._se_ratio = se_ratio
self._block_repeats = block_repeats
self._filter_size_scale = filter_size_scale
self._expand_ratio = expand_ratio
self._init_stochastic_depth_rate = init_stochastic_depth_rate
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._activation = activation
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._use_keras_upsampling_2d = use_keras_upsampling_2d
self._num_init_blocks = 2
self._norm = layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
# Build SpineNet.
inputs = tf.keras.Input(shape=input_specs.shape[1:])
net = self._build_stem(inputs=inputs)
input_width = input_specs.shape[2]
if input_width is None:
max_stride = max(map(lambda b: b.level, self._block_specs))
input_width = 2 ** max_stride
net = self._build_scale_permuted_network(net=net, input_width=input_width)
endpoints = self._build_endpoints(net=net)
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super().__init__(inputs=inputs, outputs=endpoints)
def _block_group(self,
inputs: tf.Tensor,
in_filters: int,
out_filters: int,
strides: int,
expand_ratio: int = 6,
block_repeats: int = 1,
se_ratio: float = 0.2,
stochastic_depth_drop_rate: Optional[float] = None,
name: str = 'block_group'):
"""Creates one group of blocks for the SpineNet model."""
x = nn_blocks.InvertedBottleneckBlock(
in_filters=in_filters,
out_filters=out_filters,
strides=strides,
se_gating_activation='hard_sigmoid',
se_ratio=se_ratio,
expand_ratio=expand_ratio,
stochastic_depth_drop_rate=stochastic_depth_drop_rate,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
inputs)
for _ in range(1, block_repeats):
x = nn_blocks.InvertedBottleneckBlock(
in_filters=in_filters,
out_filters=out_filters,
strides=1,
se_ratio=se_ratio,
expand_ratio=expand_ratio,
stochastic_depth_drop_rate=stochastic_depth_drop_rate,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
inputs)
return tf.keras.layers.Activation('linear', name=name)(x)
def _build_stem(self, inputs):
"""Builds SpineNet stem."""
x = layers.Conv2D(
filters=int(FILTER_SIZE_MAP[0] * self._filter_size_scale),
kernel_size=3,
strides=2,
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
inputs)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn)(
x)
x = tf_utils.get_activation(self._activation, use_keras_layer=True)(x)
net = []
stem_strides = [1, 2]
# Build the initial level 2 blocks.
for i in range(self._num_init_blocks):
x = self._block_group(
inputs=x,
in_filters=int(FILTER_SIZE_MAP[i] * self._filter_size_scale),
out_filters=int(FILTER_SIZE_MAP[i + 1] * self._filter_size_scale),
expand_ratio=self._expand_ratio,
strides=stem_strides[i],
se_ratio=self._se_ratio,
block_repeats=self._block_repeats,
name='stem_block_{}'.format(i + 1))
net.append(x)
return net
def _build_scale_permuted_network(self,
net,
input_width,
weighted_fusion=False):
"""Builds scale-permuted network."""
net_sizes = [
int(math.ceil(input_width / 2)),
int(math.ceil(input_width / 2**2))
]
num_outgoing_connections = [0] * len(net)
endpoints = {}
for i, block_spec in enumerate(self._block_specs):
# Update block level if it is larger than max_level to avoid building
# blocks smaller than requested.
block_spec.level = min(block_spec.level, self._max_level)
# Find out specs for the target block.
target_width = int(math.ceil(input_width / 2**block_spec.level))
target_num_filters = int(FILTER_SIZE_MAP[block_spec.level] *
self._filter_size_scale)
# Resample then merge input0 and input1.
parents = []
input0 = block_spec.input_offsets[0]
input1 = block_spec.input_offsets[1]
x0 = self._resample_with_sepconv(
inputs=net[input0],
input_width=net_sizes[input0],
target_width=target_width,
target_num_filters=target_num_filters)
parents.append(x0)
num_outgoing_connections[input0] += 1
x1 = self._resample_with_sepconv(
inputs=net[input1],
input_width=net_sizes[input1],
target_width=target_width,
target_num_filters=target_num_filters)
parents.append(x1)
num_outgoing_connections[input1] += 1
# Merge 0 outdegree blocks to the output block.
if block_spec.is_output:
for j, (j_feat,
j_connections) in enumerate(zip(net, num_outgoing_connections)):
if j_connections == 0 and (j_feat.shape[2] == target_width and
j_feat.shape[3] == x0.shape[3]):
parents.append(j_feat)
num_outgoing_connections[j] += 1
# pylint: disable=g-direct-tensorflow-import
if weighted_fusion:
dtype = parents[0].dtype
parent_weights = [
tf.nn.relu(tf.cast(tf.Variable(1.0, name='block{}_fusion{}'.format(
i, j)), dtype=dtype)) for j in range(len(parents))]
weights_sum = parent_weights[0]
for adder in parent_weights[1:]:
weights_sum = layers.Add()([weights_sum, adder])
parents = [
parents[i] * parent_weights[i] / (weights_sum + 0.0001)
for i in range(len(parents))
]
# Fuse all parent nodes then build a new block.
x = parents[0]
for adder in parents[1:]:
x = layers.Add()([x, adder])
x = tf_utils.get_activation(
self._activation, use_keras_layer=True)(x)
x = self._block_group(
inputs=x,
in_filters=target_num_filters,
out_filters=target_num_filters,
strides=1,
se_ratio=self._se_ratio,
expand_ratio=self._expand_ratio,
block_repeats=self._block_repeats,
stochastic_depth_drop_rate=nn_layers.get_stochastic_depth_rate(
self._init_stochastic_depth_rate, i + 1, len(self._block_specs)),
name='scale_permuted_block_{}'.format(i + 1))
net.append(x)
net_sizes.append(target_width)
num_outgoing_connections.append(0)
# Save output feats.
if block_spec.is_output:
if block_spec.level in endpoints:
raise ValueError('Duplicate feats found for output level {}.'.format(
block_spec.level))
if (block_spec.level < self._min_level or
block_spec.level > self._max_level):
logging.warning(
'SpineNet output level out of range [min_level, max_levle] = [%s, %s] will not be used for further processing.',
self._min_level, self._max_level)
endpoints[str(block_spec.level)] = x
return endpoints
def _build_endpoints(self, net):
"""Matches filter size for endpoints before sharing conv layers."""
endpoints = {}
for level in range(self._min_level, self._max_level + 1):
x = layers.Conv2D(
filters=self._endpoints_num_filters,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
net[str(level)])
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn)(
x)
x = tf_utils.get_activation(self._activation, use_keras_layer=True)(x)
endpoints[str(level)] = x
return endpoints
def _resample_with_sepconv(self, inputs, input_width, target_width,
target_num_filters):
"""Matches resolution and feature dimension."""
x = inputs
# Spatial resampling.
if input_width > target_width:
while input_width > target_width:
x = layers.DepthwiseConv2D(
kernel_size=3,
strides=2,
padding='SAME',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
x)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn)(
x)
x = tf_utils.get_activation(
self._activation, use_keras_layer=True)(x)
input_width /= 2
elif input_width < target_width:
scale = target_width // input_width
x = spatial_transform_ops.nearest_upsampling(
x, scale=scale, use_keras_layer=self._use_keras_upsampling_2d)
# Last 1x1 conv to match filter size.
x = layers.Conv2D(
filters=target_num_filters,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
x)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn)(
x)
return x
def get_config(self):
config_dict = {
'min_level': self._min_level,
'max_level': self._max_level,
'endpoints_num_filters': self._endpoints_num_filters,
'se_ratio': self._se_ratio,
'expand_ratio': self._expand_ratio,
'block_repeats': self._block_repeats,
'filter_size_scale': self._filter_size_scale,
'init_stochastic_depth_rate': self._init_stochastic_depth_rate,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'use_keras_upsampling_2d': self._use_keras_upsampling_2d,
}
return config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_backbone_builder('spinenet_mobile')
def build_spinenet_mobile(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model:
"""Builds Mobile SpineNet backbone from a config."""
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert backbone_type == 'spinenet_mobile', (f'Inconsistent backbone type '
f'{backbone_type}')
model_id = backbone_cfg.model_id
if model_id not in SCALING_MAP:
raise ValueError(
'Mobile SpineNet-{} is not a valid architecture.'.format(model_id))
scaling_params = SCALING_MAP[model_id]
return SpineNetMobile(
input_specs=input_specs,
min_level=backbone_cfg.min_level,
max_level=backbone_cfg.max_level,
endpoints_num_filters=scaling_params['endpoints_num_filters'],
block_repeats=scaling_params['block_repeats'],
filter_size_scale=scaling_params['filter_size_scale'],
se_ratio=backbone_cfg.se_ratio,
expand_ratio=backbone_cfg.expand_ratio,
init_stochastic_depth_rate=backbone_cfg.stochastic_depth_drop_rate,
kernel_regularizer=l2_regularizer,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
use_keras_upsampling_2d=backbone_cfg.use_keras_upsampling_2d)
| 20,774 | 36.910584 | 126 | py |
models | models-master/official/vision/modeling/backbones/spinenet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of SpineNet Networks."""
import math
from typing import Any, List, Optional, Tuple
# Import libraries
from absl import logging
import tensorflow as tf
from official.modeling import hyperparams
from official.modeling import tf_utils
from official.vision.modeling.backbones import factory
from official.vision.modeling.layers import nn_blocks
from official.vision.modeling.layers import nn_layers
from official.vision.ops import spatial_transform_ops
layers = tf.keras.layers
FILTER_SIZE_MAP = {
1: 32,
2: 64,
3: 128,
4: 256,
5: 256,
6: 256,
7: 256,
}
# The fixed SpineNet architecture discovered by NAS.
# Each element represents a specification of a building block:
# (block_level, block_fn, (input_offset0, input_offset1), is_output).
SPINENET_BLOCK_SPECS = [
(2, 'bottleneck', (0, 1), False),
(4, 'residual', (0, 1), False),
(3, 'bottleneck', (2, 3), False),
(4, 'bottleneck', (2, 4), False),
(6, 'residual', (3, 5), False),
(4, 'bottleneck', (3, 5), False),
(5, 'residual', (6, 7), False),
(7, 'residual', (6, 8), False),
(5, 'bottleneck', (8, 9), False),
(5, 'bottleneck', (8, 10), False),
(4, 'bottleneck', (5, 10), True),
(3, 'bottleneck', (4, 10), True),
(5, 'bottleneck', (7, 12), True),
(7, 'bottleneck', (5, 14), True),
(6, 'bottleneck', (12, 14), True),
(2, 'bottleneck', (2, 13), True),
]
SCALING_MAP = {
'49S': {
'endpoints_num_filters': 128,
'filter_size_scale': 0.65,
'resample_alpha': 0.5,
'block_repeats': 1,
},
'49': {
'endpoints_num_filters': 256,
'filter_size_scale': 1.0,
'resample_alpha': 0.5,
'block_repeats': 1,
},
'96': {
'endpoints_num_filters': 256,
'filter_size_scale': 1.0,
'resample_alpha': 0.5,
'block_repeats': 2,
},
'143': {
'endpoints_num_filters': 256,
'filter_size_scale': 1.0,
'resample_alpha': 1.0,
'block_repeats': 3,
},
# SpineNet-143 with 1.3x filter_size_scale.
'143L': {
'endpoints_num_filters': 256,
'filter_size_scale': 1.3,
'resample_alpha': 1.0,
'block_repeats': 3,
},
'190': {
'endpoints_num_filters': 512,
'filter_size_scale': 1.3,
'resample_alpha': 1.0,
'block_repeats': 4,
},
}
class BlockSpec(object):
"""A container class that specifies the block configuration for SpineNet."""
def __init__(self, level: int, block_fn: str, input_offsets: Tuple[int, int],
is_output: bool):
self.level = level
self.block_fn = block_fn
self.input_offsets = input_offsets
self.is_output = is_output
def build_block_specs(
block_specs: Optional[List[Tuple[Any, ...]]] = None) -> List[BlockSpec]:
"""Builds the list of BlockSpec objects for SpineNet."""
if not block_specs:
block_specs = SPINENET_BLOCK_SPECS
logging.info('Building SpineNet block specs: %s', block_specs)
return [BlockSpec(*b) for b in block_specs]
@tf.keras.utils.register_keras_serializable(package='Vision')
class SpineNet(tf.keras.Model):
"""Creates a SpineNet family model.
This implements:
Xianzhi Du, Tsung-Yi Lin, Pengchong Jin, Golnaz Ghiasi, Mingxing Tan,
Yin Cui, Quoc V. Le, Xiaodan Song.
SpineNet: Learning Scale-Permuted Backbone for Recognition and Localization.
(https://arxiv.org/abs/1912.05027)
"""
def __init__(
self,
input_specs: tf.keras.layers.InputSpec = tf.keras.layers.InputSpec(
shape=[None, None, None, 3]),
min_level: int = 3,
max_level: int = 7,
block_specs: Optional[List[BlockSpec]] = None,
endpoints_num_filters: int = 256,
resample_alpha: float = 0.5,
block_repeats: int = 1,
filter_size_scale: float = 1.0,
init_stochastic_depth_rate: float = 0.0,
kernel_initializer: str = 'VarianceScaling',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
**kwargs):
"""Initializes a SpineNet model.
Args:
input_specs: A `tf.keras.layers.InputSpec` of the input tensor.
min_level: An `int` of min level for output mutiscale features.
max_level: An `int` of max level for output mutiscale features.
block_specs: A list of block specifications for the SpineNet model
discovered by NAS.
endpoints_num_filters: An `int` of feature dimension for the output
endpoints.
resample_alpha: A `float` of resampling factor in cross-scale connections.
block_repeats: An `int` of number of blocks contained in the layer.
filter_size_scale: A `float` of multiplier for the filters (number of
channels) for all convolution ops. The value must be greater than zero.
Typical usage will be to set this value in (0, 1) to reduce the number
of parameters or computation cost of the model.
init_stochastic_depth_rate: A `float` of initial stochastic depth rate.
kernel_initializer: A str for kernel initializer of convolutional layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
Default to None.
activation: A `str` name of the activation function.
use_sync_bn: If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A small `float` added to variance to avoid dividing by zero.
**kwargs: Additional keyword arguments to be passed.
"""
self._input_specs = input_specs
self._min_level = min_level
self._max_level = max_level
self._block_specs = (
build_block_specs() if block_specs is None else block_specs
)
self._endpoints_num_filters = endpoints_num_filters
self._resample_alpha = resample_alpha
self._block_repeats = block_repeats
self._filter_size_scale = filter_size_scale
self._init_stochastic_depth_rate = init_stochastic_depth_rate
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._activation = activation
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._init_block_fn = 'bottleneck'
self._num_init_blocks = 2
self._set_activation_fn(activation)
self._norm = layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
# Build SpineNet.
inputs = tf.keras.Input(shape=input_specs.shape[1:])
net = self._build_stem(inputs=inputs)
input_width = input_specs.shape[2]
if input_width is None:
max_stride = max(map(lambda b: b.level, self._block_specs))
input_width = 2 ** max_stride
net = self._build_scale_permuted_network(net=net, input_width=input_width)
endpoints = self._build_endpoints(net=net)
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super(SpineNet, self).__init__(inputs=inputs, outputs=endpoints)
def _set_activation_fn(self, activation):
if activation == 'relu':
self._activation_fn = tf.nn.relu
elif activation == 'swish':
self._activation_fn = tf.nn.swish
else:
raise ValueError('Activation {} not implemented.'.format(activation))
def _block_group(self,
inputs: tf.Tensor,
filters: int,
strides: int,
block_fn_cand: str,
block_repeats: int = 1,
stochastic_depth_drop_rate: Optional[float] = None,
name: str = 'block_group'):
"""Creates one group of blocks for the SpineNet model."""
block_fn_candidates = {
'bottleneck': nn_blocks.BottleneckBlock,
'residual': nn_blocks.ResidualBlock,
}
block_fn = block_fn_candidates[block_fn_cand]
_, _, _, num_filters = inputs.get_shape().as_list()
if block_fn_cand == 'bottleneck':
use_projection = not (num_filters == (filters * 4) and strides == 1)
else:
use_projection = not (num_filters == filters and strides == 1)
x = block_fn(
filters=filters,
strides=strides,
use_projection=use_projection,
stochastic_depth_drop_rate=stochastic_depth_drop_rate,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
inputs)
for _ in range(1, block_repeats):
x = block_fn(
filters=filters,
strides=1,
use_projection=False,
stochastic_depth_drop_rate=stochastic_depth_drop_rate,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
x)
return tf.identity(x, name=name)
def _build_stem(self, inputs):
"""Builds SpineNet stem."""
x = layers.Conv2D(
filters=64,
kernel_size=7,
strides=2,
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
inputs)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn)(
x)
x = tf_utils.get_activation(self._activation_fn)(x)
x = layers.MaxPool2D(pool_size=3, strides=2, padding='same')(x)
net = []
# Build the initial level 2 blocks.
for i in range(self._num_init_blocks):
x = self._block_group(
inputs=x,
filters=int(FILTER_SIZE_MAP[2] * self._filter_size_scale),
strides=1,
block_fn_cand=self._init_block_fn,
block_repeats=self._block_repeats,
name='stem_block_{}'.format(i + 1))
net.append(x)
return net
def _build_scale_permuted_network(self,
net,
input_width,
weighted_fusion=False):
"""Builds scale-permuted network."""
net_sizes = [int(math.ceil(input_width / 2**2))] * len(net)
net_block_fns = [self._init_block_fn] * len(net)
num_outgoing_connections = [0] * len(net)
endpoints = {}
for i, block_spec in enumerate(self._block_specs):
# Find out specs for the target block.
target_width = int(math.ceil(input_width / 2**block_spec.level))
target_num_filters = int(FILTER_SIZE_MAP[block_spec.level] *
self._filter_size_scale)
target_block_fn = block_spec.block_fn
# Resample then merge input0 and input1.
parents = []
input0 = block_spec.input_offsets[0]
input1 = block_spec.input_offsets[1]
x0 = self._resample_with_alpha(
inputs=net[input0],
input_width=net_sizes[input0],
input_block_fn=net_block_fns[input0],
target_width=target_width,
target_num_filters=target_num_filters,
target_block_fn=target_block_fn,
alpha=self._resample_alpha)
parents.append(x0)
num_outgoing_connections[input0] += 1
x1 = self._resample_with_alpha(
inputs=net[input1],
input_width=net_sizes[input1],
input_block_fn=net_block_fns[input1],
target_width=target_width,
target_num_filters=target_num_filters,
target_block_fn=target_block_fn,
alpha=self._resample_alpha)
parents.append(x1)
num_outgoing_connections[input1] += 1
# Merge 0 outdegree blocks to the output block.
if block_spec.is_output:
for j, (j_feat,
j_connections) in enumerate(zip(net, num_outgoing_connections)):
if j_connections == 0 and (j_feat.shape[2] == target_width and
j_feat.shape[3] == x0.shape[3]):
parents.append(j_feat)
num_outgoing_connections[j] += 1
# pylint: disable=g-direct-tensorflow-import
if weighted_fusion:
dtype = parents[0].dtype
parent_weights = [
tf.nn.relu(tf.cast(tf.Variable(1.0, name='block{}_fusion{}'.format(
i, j)), dtype=dtype)) for j in range(len(parents))]
weights_sum = tf.add_n(parent_weights)
parents = [
parents[i] * parent_weights[i] / (weights_sum + 0.0001)
for i in range(len(parents))
]
# Fuse all parent nodes then build a new block.
x = tf_utils.get_activation(self._activation_fn)(tf.add_n(parents))
x = self._block_group(
inputs=x,
filters=target_num_filters,
strides=1,
block_fn_cand=target_block_fn,
block_repeats=self._block_repeats,
stochastic_depth_drop_rate=nn_layers.get_stochastic_depth_rate(
self._init_stochastic_depth_rate, i + 1, len(self._block_specs)),
name='scale_permuted_block_{}'.format(i + 1))
net.append(x)
net_sizes.append(target_width)
net_block_fns.append(target_block_fn)
num_outgoing_connections.append(0)
# Save output feats.
if block_spec.is_output:
if block_spec.level in endpoints:
raise ValueError('Duplicate feats found for output level {}.'.format(
block_spec.level))
if (block_spec.level < self._min_level or
block_spec.level > self._max_level):
logging.warning(
'SpineNet output level %s out of range [min_level, max_level] = '
'[%s, %s] will not be used for further processing.',
block_spec.level, self._min_level, self._max_level)
endpoints[str(block_spec.level)] = x
return endpoints
def _build_endpoints(self, net):
"""Matches filter size for endpoints before sharing conv layers."""
endpoints = {}
for level in range(self._min_level, self._max_level + 1):
x = layers.Conv2D(
filters=self._endpoints_num_filters,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
net[str(level)])
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn)(
x)
x = tf_utils.get_activation(self._activation_fn)(x)
endpoints[str(level)] = x
return endpoints
def _resample_with_alpha(self,
inputs,
input_width,
input_block_fn,
target_width,
target_num_filters,
target_block_fn,
alpha=0.5):
"""Matches resolution and feature dimension."""
_, _, _, input_num_filters = inputs.get_shape().as_list()
if input_block_fn == 'bottleneck':
input_num_filters /= 4
new_num_filters = int(input_num_filters * alpha)
x = layers.Conv2D(
filters=new_num_filters,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
inputs)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn)(
x)
x = tf_utils.get_activation(self._activation_fn)(x)
# Spatial resampling.
if input_width > target_width:
x = layers.Conv2D(
filters=new_num_filters,
kernel_size=3,
strides=2,
padding='SAME',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
x)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn)(
x)
x = tf_utils.get_activation(self._activation_fn)(x)
input_width /= 2
while input_width > target_width:
x = layers.MaxPool2D(pool_size=3, strides=2, padding='SAME')(x)
input_width /= 2
elif input_width < target_width:
scale = target_width // input_width
x = spatial_transform_ops.nearest_upsampling(x, scale=scale)
# Last 1x1 conv to match filter size.
if target_block_fn == 'bottleneck':
target_num_filters *= 4
x = layers.Conv2D(
filters=target_num_filters,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
x)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn)(
x)
return x
def get_config(self):
config_dict = {
'min_level': self._min_level,
'max_level': self._max_level,
'endpoints_num_filters': self._endpoints_num_filters,
'resample_alpha': self._resample_alpha,
'block_repeats': self._block_repeats,
'filter_size_scale': self._filter_size_scale,
'init_stochastic_depth_rate': self._init_stochastic_depth_rate,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon
}
return config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_backbone_builder('spinenet')
def build_spinenet(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model:
"""Builds SpineNet backbone from a config."""
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert backbone_type == 'spinenet', (f'Inconsistent backbone type '
f'{backbone_type}')
model_id = str(backbone_cfg.model_id)
if model_id not in SCALING_MAP:
raise ValueError(
'SpineNet-{} is not a valid architecture.'.format(model_id))
scaling_params = SCALING_MAP[model_id]
return SpineNet(
input_specs=input_specs,
min_level=backbone_cfg.min_level,
max_level=backbone_cfg.max_level,
endpoints_num_filters=scaling_params['endpoints_num_filters'],
resample_alpha=scaling_params['resample_alpha'],
block_repeats=scaling_params['block_repeats'],
filter_size_scale=scaling_params['filter_size_scale'],
init_stochastic_depth_rate=backbone_cfg.stochastic_depth_drop_rate,
kernel_regularizer=l2_regularizer,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon)
| 21,144 | 35.456897 | 80 | py |
models | models-master/official/vision/modeling/backbones/resnet_deeplab.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of Residual Networks with Deeplab modifications."""
from typing import Callable, Optional, Tuple, List
import numpy as np
import tensorflow as tf
from official.modeling import hyperparams
from official.modeling import tf_utils
from official.vision.modeling.backbones import factory
from official.vision.modeling.layers import nn_blocks
from official.vision.modeling.layers import nn_layers
layers = tf.keras.layers
# Specifications for different ResNet variants.
# Each entry specifies block configurations of the particular ResNet variant.
# Each element in the block configuration is in the following format:
# (block_fn, num_filters, block_repeats)
RESNET_SPECS = {
50: [
('bottleneck', 64, 3),
('bottleneck', 128, 4),
('bottleneck', 256, 6),
('bottleneck', 512, 3),
],
101: [
('bottleneck', 64, 3),
('bottleneck', 128, 4),
('bottleneck', 256, 23),
('bottleneck', 512, 3),
],
152: [
('bottleneck', 64, 3),
('bottleneck', 128, 8),
('bottleneck', 256, 36),
('bottleneck', 512, 3),
],
200: [
('bottleneck', 64, 3),
('bottleneck', 128, 24),
('bottleneck', 256, 36),
('bottleneck', 512, 3),
],
}
@tf.keras.utils.register_keras_serializable(package='Vision')
class DilatedResNet(tf.keras.Model):
"""Creates a ResNet model with Deeplabv3 modifications.
This backbone is suitable for semantic segmentation. This implements
Liang-Chieh Chen, George Papandreou, Florian Schroff, Hartwig Adam.
Rethinking Atrous Convolution for Semantic Image Segmentation.
(https://arxiv.org/pdf/1706.05587)
"""
def __init__(
self,
model_id: int,
output_stride: int,
input_specs: tf.keras.layers.InputSpec = layers.InputSpec(
shape=[None, None, None, 3]),
stem_type: str = 'v0',
resnetd_shortcut: bool = False,
replace_stem_max_pool: bool = False,
se_ratio: Optional[float] = None,
init_stochastic_depth_rate: float = 0.0,
multigrid: Optional[Tuple[int]] = None,
last_stage_repeats: int = 1,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_initializer: str = 'VarianceScaling',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initializes a ResNet model with DeepLab modification.
Args:
model_id: An `int` specifies depth of ResNet backbone model.
output_stride: An `int` of output stride, ratio of input to output
resolution.
input_specs: A `tf.keras.layers.InputSpec` of the input tensor.
stem_type: A `str` of stem type. Can be `v0` or `v1`. `v1` replaces 7x7
conv by 3 3x3 convs.
resnetd_shortcut: A `bool` of whether to use ResNet-D shortcut in
downsampling blocks.
replace_stem_max_pool: A `bool` of whether to replace the max pool in stem
with a stride-2 conv,
se_ratio: A `float` or None. Ratio of the Squeeze-and-Excitation layer.
init_stochastic_depth_rate: A `float` of initial stochastic depth rate.
multigrid: A tuple of the same length as the number of blocks in the last
resnet stage.
last_stage_repeats: An `int` that specifies how many times last stage is
repeated.
activation: A `str` name of the activation function.
use_sync_bn: If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_initializer: A str for kernel initializer of convolutional layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
Default to None.
**kwargs: Additional keyword arguments to be passed.
"""
self._model_id = model_id
self._output_stride = output_stride
self._input_specs = input_specs
self._use_sync_bn = use_sync_bn
self._activation = activation
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._norm = layers.BatchNormalization
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._stem_type = stem_type
self._resnetd_shortcut = resnetd_shortcut
self._replace_stem_max_pool = replace_stem_max_pool
self._se_ratio = se_ratio
self._init_stochastic_depth_rate = init_stochastic_depth_rate
if tf.keras.backend.image_data_format() == 'channels_last':
bn_axis = -1
else:
bn_axis = 1
# Build ResNet.
inputs = tf.keras.Input(shape=input_specs.shape[1:])
if stem_type == 'v0':
x = layers.Conv2D(
filters=64,
kernel_size=7,
strides=2,
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
inputs)
x = self._norm(
axis=bn_axis,
momentum=norm_momentum,
epsilon=norm_epsilon,
synchronized=use_sync_bn)(
x)
x = tf_utils.get_activation(activation)(x)
elif stem_type == 'v1':
x = layers.Conv2D(
filters=64,
kernel_size=3,
strides=2,
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
inputs)
x = self._norm(
axis=bn_axis,
momentum=norm_momentum,
epsilon=norm_epsilon,
synchronized=use_sync_bn)(
x)
x = tf_utils.get_activation(activation)(x)
x = layers.Conv2D(
filters=64,
kernel_size=3,
strides=1,
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
x)
x = self._norm(
axis=bn_axis,
momentum=norm_momentum,
epsilon=norm_epsilon,
synchronized=use_sync_bn)(
x)
x = tf_utils.get_activation(activation)(x)
x = layers.Conv2D(
filters=128,
kernel_size=3,
strides=1,
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
x)
x = self._norm(
axis=bn_axis,
momentum=norm_momentum,
epsilon=norm_epsilon,
synchronized=use_sync_bn)(
x)
x = tf_utils.get_activation(activation)(x)
else:
raise ValueError('Stem type {} not supported.'.format(stem_type))
if replace_stem_max_pool:
x = layers.Conv2D(
filters=64,
kernel_size=3,
strides=2,
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
x)
x = self._norm(
axis=bn_axis,
momentum=norm_momentum,
epsilon=norm_epsilon,
synchronized=use_sync_bn)(
x)
x = tf_utils.get_activation(activation, use_keras_layer=True)(x)
else:
x = layers.MaxPool2D(pool_size=3, strides=2, padding='same')(x)
normal_resnet_stage = int(np.math.log2(self._output_stride)) - 2
endpoints = {}
for i in range(normal_resnet_stage + 1):
spec = RESNET_SPECS[model_id][i]
if spec[0] == 'bottleneck':
block_fn = nn_blocks.BottleneckBlock
else:
raise ValueError('Block fn `{}` is not supported.'.format(spec[0]))
x = self._block_group(
inputs=x,
filters=spec[1],
strides=(1 if i == 0 else 2),
dilation_rate=1,
block_fn=block_fn,
block_repeats=spec[2],
stochastic_depth_drop_rate=nn_layers.get_stochastic_depth_rate(
self._init_stochastic_depth_rate, i + 2, 4 + last_stage_repeats),
name='block_group_l{}'.format(i + 2))
endpoints[str(i + 2)] = x
dilation_rate = 2
for i in range(normal_resnet_stage + 1, 3 + last_stage_repeats):
spec = RESNET_SPECS[model_id][i] if i < 3 else RESNET_SPECS[model_id][-1]
if spec[0] == 'bottleneck':
block_fn = nn_blocks.BottleneckBlock
else:
raise ValueError('Block fn `{}` is not supported.'.format(spec[0]))
x = self._block_group(
inputs=x,
filters=spec[1],
strides=1,
dilation_rate=dilation_rate,
block_fn=block_fn,
block_repeats=spec[2],
stochastic_depth_drop_rate=nn_layers.get_stochastic_depth_rate(
self._init_stochastic_depth_rate, i + 2, 4 + last_stage_repeats),
multigrid=multigrid if i >= 3 else None,
name='block_group_l{}'.format(i + 2))
dilation_rate *= 2
endpoints[str(normal_resnet_stage + 2)] = x
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super(DilatedResNet, self).__init__(
inputs=inputs, outputs=endpoints, **kwargs)
def _block_group(self,
inputs: tf.Tensor,
filters: int,
strides: int,
dilation_rate: int,
block_fn: Callable[..., tf.keras.layers.Layer],
block_repeats: int = 1,
stochastic_depth_drop_rate: float = 0.0,
multigrid: Optional[List[int]] = None,
name: str = 'block_group'):
"""Creates one group of blocks for the ResNet model.
Deeplab applies strides at the last block.
Args:
inputs: A `tf.Tensor` of size `[batch, channels, height, width]`.
filters: An `int` off number of filters for the first convolution of the
layer.
strides: An `int` of stride to use for the first convolution of the layer.
If greater than 1, this layer will downsample the input.
dilation_rate: An `int` of diluted convolution rates.
block_fn: Either `nn_blocks.ResidualBlock` or `nn_blocks.BottleneckBlock`.
block_repeats: An `int` of number of blocks contained in the layer.
stochastic_depth_drop_rate: A `float` of drop rate of the current block
group.
multigrid: A list of `int` or None. If specified, dilation rates for each
block is scaled up by its corresponding factor in the multigrid.
name: A `str` name for the block.
Returns:
The output `tf.Tensor` of the block layer.
"""
if multigrid is not None and len(multigrid) != block_repeats:
raise ValueError('multigrid has to match number of block_repeats')
if multigrid is None:
multigrid = [1] * block_repeats
# TODO(arashwan): move striding at the of the block.
x = block_fn(
filters=filters,
strides=strides,
dilation_rate=dilation_rate * multigrid[0],
use_projection=True,
stochastic_depth_drop_rate=stochastic_depth_drop_rate,
se_ratio=self._se_ratio,
resnetd_shortcut=self._resnetd_shortcut,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
inputs)
for i in range(1, block_repeats):
x = block_fn(
filters=filters,
strides=1,
dilation_rate=dilation_rate * multigrid[i],
use_projection=False,
stochastic_depth_drop_rate=stochastic_depth_drop_rate,
resnetd_shortcut=self._resnetd_shortcut,
se_ratio=self._se_ratio,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
x)
return tf.identity(x, name=name)
def get_config(self):
config_dict = {
'model_id': self._model_id,
'output_stride': self._output_stride,
'stem_type': self._stem_type,
'resnetd_shortcut': self._resnetd_shortcut,
'replace_stem_max_pool': self._replace_stem_max_pool,
'se_ratio': self._se_ratio,
'init_stochastic_depth_rate': self._init_stochastic_depth_rate,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
}
return config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_backbone_builder('dilated_resnet')
def build_dilated_resnet(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds ResNet backbone from a config."""
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert backbone_type == 'dilated_resnet', (f'Inconsistent backbone type '
f'{backbone_type}')
return DilatedResNet(
model_id=backbone_cfg.model_id,
output_stride=backbone_cfg.output_stride,
input_specs=input_specs,
stem_type=backbone_cfg.stem_type,
resnetd_shortcut=backbone_cfg.resnetd_shortcut,
replace_stem_max_pool=backbone_cfg.replace_stem_max_pool,
se_ratio=backbone_cfg.se_ratio,
init_stochastic_depth_rate=backbone_cfg.stochastic_depth_drop_rate,
multigrid=backbone_cfg.multigrid,
last_stage_repeats=backbone_cfg.last_stage_repeats,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
| 15,830 | 36.603325 | 139 | py |
models | models-master/official/vision/modeling/backbones/efficientnet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of EfficientNet Networks."""
import math
from typing import Any, List, Tuple
# Import libraries
import tensorflow as tf
from official.modeling import hyperparams
from official.modeling import tf_utils
from official.vision.modeling.backbones import factory
from official.vision.modeling.layers import nn_blocks
from official.vision.modeling.layers import nn_layers
layers = tf.keras.layers
# The fixed EfficientNet-B0 architecture discovered by NAS.
# Each element represents a specification of a building block:
# (block_fn, block_repeats, kernel_size, strides, expand_ratio, in_filters,
# out_filters, is_output)
EN_B0_BLOCK_SPECS = [
('mbconv', 1, 3, 1, 1, 32, 16, False),
('mbconv', 2, 3, 2, 6, 16, 24, True),
('mbconv', 2, 5, 2, 6, 24, 40, True),
('mbconv', 3, 3, 2, 6, 40, 80, False),
('mbconv', 3, 5, 1, 6, 80, 112, True),
('mbconv', 4, 5, 2, 6, 112, 192, False),
('mbconv', 1, 3, 1, 6, 192, 320, True),
]
SCALING_MAP = {
'b0': dict(width_scale=1.0, depth_scale=1.0),
'b1': dict(width_scale=1.0, depth_scale=1.1),
'b2': dict(width_scale=1.1, depth_scale=1.2),
'b3': dict(width_scale=1.2, depth_scale=1.4),
'b4': dict(width_scale=1.4, depth_scale=1.8),
'b5': dict(width_scale=1.6, depth_scale=2.2),
'b6': dict(width_scale=1.8, depth_scale=2.6),
'b7': dict(width_scale=2.0, depth_scale=3.1),
}
class BlockSpec():
"""A container class that specifies the block configuration for MnasNet."""
def __init__(self, block_fn: str, block_repeats: int, kernel_size: int,
strides: int, expand_ratio: float, in_filters: int,
out_filters: int, is_output: bool, width_scale: float,
depth_scale: float):
self.block_fn = block_fn
self.block_repeats = round_repeats(block_repeats, depth_scale)
self.kernel_size = kernel_size
self.strides = strides
self.expand_ratio = expand_ratio
self.in_filters = nn_layers.round_filters(in_filters, width_scale)
self.out_filters = nn_layers.round_filters(out_filters, width_scale)
self.is_output = is_output
def round_repeats(repeats: int, multiplier: float, skip: bool = False) -> int:
"""Returns rounded number of filters based on depth multiplier."""
if skip or not multiplier:
return repeats
return int(math.ceil(multiplier * repeats))
def block_spec_decoder(specs: List[Tuple[Any, ...]], width_scale: float,
depth_scale: float) -> List[BlockSpec]:
"""Decodes and returns specs for a block."""
decoded_specs = []
for s in specs:
s = s + (
width_scale,
depth_scale,
)
decoded_specs.append(BlockSpec(*s))
return decoded_specs
@tf.keras.utils.register_keras_serializable(package='Vision')
class EfficientNet(tf.keras.Model):
"""Creates an EfficientNet family model.
This implements the EfficientNet model from:
Mingxing Tan, Quoc V. Le.
EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks.
(https://arxiv.org/pdf/1905.11946)
"""
def __init__(self,
model_id: str,
input_specs: tf.keras.layers.InputSpec = layers.InputSpec(
shape=[None, None, None, 3]),
se_ratio: float = 0.0,
stochastic_depth_drop_rate: float = 0.0,
kernel_initializer: str = 'VarianceScaling',
kernel_regularizer: tf.keras.regularizers.Regularizer = None,
bias_regularizer: tf.keras.regularizers.Regularizer = None,
activation: str = 'relu',
se_inner_activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001, # pytype: disable=annotation-type-mismatch # typed-keras
**kwargs):
"""Initializes an EfficientNet model.
Args:
model_id: A `str` of model ID of EfficientNet.
input_specs: A `tf.keras.layers.InputSpec` of the input tensor.
se_ratio: A `float` of squeeze and excitation ratio for inverted
bottleneck blocks.
stochastic_depth_drop_rate: A `float` of drop rate for drop connect layer.
kernel_initializer: A `str` for kernel initializer of convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
Default to None.
activation: A `str` of name of the activation function.
se_inner_activation: A `str` of name of the activation function used in
Sequeeze and Excitation layer.
use_sync_bn: If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
**kwargs: Additional keyword arguments to be passed.
"""
self._model_id = model_id
self._input_specs = input_specs
self._se_ratio = se_ratio
self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
self._use_sync_bn = use_sync_bn
self._activation = activation
self._se_inner_activation = se_inner_activation
self._kernel_initializer = kernel_initializer
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._norm = layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
bn_axis = -1
else:
bn_axis = 1
# Build EfficientNet.
inputs = tf.keras.Input(shape=input_specs.shape[1:])
width_scale = SCALING_MAP[model_id]['width_scale']
depth_scale = SCALING_MAP[model_id]['depth_scale']
# Build stem.
x = layers.Conv2D(
filters=nn_layers.round_filters(32, width_scale),
kernel_size=3,
strides=2,
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
inputs)
x = self._norm(
axis=bn_axis,
momentum=norm_momentum,
epsilon=norm_epsilon,
synchronized=use_sync_bn)(
x)
x = tf_utils.get_activation(activation)(x)
# Build intermediate blocks.
endpoints = {}
endpoint_level = 2
decoded_specs = block_spec_decoder(EN_B0_BLOCK_SPECS, width_scale,
depth_scale)
for i, specs in enumerate(decoded_specs):
x = self._block_group(
inputs=x, specs=specs, name='block_group_{}'.format(i))
if specs.is_output:
endpoints[str(endpoint_level)] = x
endpoint_level += 1
# Build output specs for downstream tasks.
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
# Build the final conv for classification.
x = layers.Conv2D(
filters=nn_layers.round_filters(1280, width_scale),
kernel_size=1,
strides=1,
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
x)
x = self._norm(
axis=bn_axis,
momentum=norm_momentum,
epsilon=norm_epsilon,
synchronized=use_sync_bn)(
x)
endpoints[str(endpoint_level)] = tf_utils.get_activation(activation)(x)
super(EfficientNet, self).__init__(
inputs=inputs, outputs=endpoints, **kwargs)
def _block_group(self,
inputs: tf.Tensor,
specs: BlockSpec,
name: str = 'block_group'):
"""Creates one group of blocks for the EfficientNet model.
Args:
inputs: A `tf.Tensor` of size `[batch, channels, height, width]`.
specs: The specifications for one inverted bottleneck block group.
name: A `str` name for the block.
Returns:
The output `tf.Tensor` of the block layer.
"""
if specs.block_fn == 'mbconv':
block_fn = nn_blocks.InvertedBottleneckBlock
else:
raise ValueError('Block func {} not supported.'.format(specs.block_fn))
x = block_fn(
in_filters=specs.in_filters,
out_filters=specs.out_filters,
expand_ratio=specs.expand_ratio,
strides=specs.strides,
kernel_size=specs.kernel_size,
se_ratio=self._se_ratio,
stochastic_depth_drop_rate=self._stochastic_depth_drop_rate,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
se_inner_activation=self._se_inner_activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
inputs)
for _ in range(1, specs.block_repeats):
x = block_fn(
in_filters=specs.out_filters, # Set 'in_filters' to 'out_filters'.
out_filters=specs.out_filters,
expand_ratio=specs.expand_ratio,
strides=1, # Fix strides to 1.
kernel_size=specs.kernel_size,
se_ratio=self._se_ratio,
stochastic_depth_drop_rate=self._stochastic_depth_drop_rate,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
se_inner_activation=self._se_inner_activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
x)
return tf.identity(x, name=name)
def get_config(self):
config_dict = {
'model_id': self._model_id,
'se_ratio': self._se_ratio,
'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon
}
return config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_backbone_builder('efficientnet')
def build_efficientnet(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None,
se_inner_activation: str = 'relu') -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds EfficientNet backbone from a config."""
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert backbone_type == 'efficientnet', (f'Inconsistent backbone type '
f'{backbone_type}')
return EfficientNet(
model_id=backbone_cfg.model_id,
input_specs=input_specs,
stochastic_depth_drop_rate=backbone_cfg.stochastic_depth_drop_rate,
se_ratio=backbone_cfg.se_ratio,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer,
se_inner_activation=se_inner_activation)
| 12,428 | 36.663636 | 116 | py |
models | models-master/official/vision/modeling/backbones/resnet_3d_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for resnet."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.vision.modeling.backbones import resnet_3d
class ResNet3DTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(128, 50, 4, 'v0', False, 0.0),
(128, 50, 4, 'v1', False, 0.2),
(256, 50, 4, 'v1', True, 0.2),
)
def test_network_creation(self, input_size, model_id, endpoint_filter_scale,
stem_type, se_ratio, init_stochastic_depth_rate):
"""Test creation of ResNet3D family models."""
tf.keras.backend.set_image_data_format('channels_last')
temporal_strides = [1, 1, 1, 1]
temporal_kernel_sizes = [(3, 3, 3), (3, 1, 3, 1), (3, 1, 3, 1, 3, 1),
(1, 3, 1)]
use_self_gating = [True, False, True, False]
network = resnet_3d.ResNet3D(
model_id=model_id,
temporal_strides=temporal_strides,
temporal_kernel_sizes=temporal_kernel_sizes,
use_self_gating=use_self_gating,
stem_type=stem_type,
se_ratio=se_ratio,
init_stochastic_depth_rate=init_stochastic_depth_rate)
inputs = tf.keras.Input(shape=(8, input_size, input_size, 3), batch_size=1)
endpoints = network(inputs)
self.assertAllEqual([
1, 2, input_size / 2**2, input_size / 2**2, 64 * endpoint_filter_scale
], endpoints['2'].shape.as_list())
self.assertAllEqual([
1, 2, input_size / 2**3, input_size / 2**3, 128 * endpoint_filter_scale
], endpoints['3'].shape.as_list())
self.assertAllEqual([
1, 2, input_size / 2**4, input_size / 2**4, 256 * endpoint_filter_scale
], endpoints['4'].shape.as_list())
self.assertAllEqual([
1, 2, input_size / 2**5, input_size / 2**5, 512 * endpoint_filter_scale
], endpoints['5'].shape.as_list())
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
model_id=50,
temporal_strides=[1, 1, 1, 1],
temporal_kernel_sizes=[(3, 3, 3), (3, 1, 3, 1), (3, 1, 3, 1, 3, 1),
(1, 3, 1)],
stem_type='v0',
stem_conv_temporal_kernel_size=5,
stem_conv_temporal_stride=2,
stem_pool_temporal_stride=2,
se_ratio=0.0,
use_self_gating=None,
init_stochastic_depth_rate=0.0,
use_sync_bn=False,
activation='relu',
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
)
network = resnet_3d.ResNet3D(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = resnet_3d.ResNet3D.from_config(network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 3,789 | 35.796117 | 79 | py |
models | models-master/official/vision/modeling/backbones/mobiledet_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Mobiledet."""
import itertools
from absl.testing import parameterized
import tensorflow as tf
from official.vision.modeling.backbones import mobiledet
class MobileDetTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
'MobileDetCPU',
'MobileDetDSP',
'MobileDetEdgeTPU',
'MobileDetGPU',
)
def test_serialize_deserialize(self, model_id):
# Create a network object that sets all of its config options.
kwargs = dict(
model_id=model_id,
filter_size_scale=1.0,
use_sync_bn=False,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
norm_momentum=0.99,
norm_epsilon=0.001,
min_depth=8,
divisible_by=8,
regularize_depthwise=False,
)
network = mobiledet.MobileDet(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = mobiledet.MobileDet.from_config(network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
@parameterized.parameters(
itertools.product(
[1, 3],
[
'MobileDetCPU',
'MobileDetDSP',
'MobileDetEdgeTPU',
'MobileDetGPU',
],
))
def test_input_specs(self, input_dim, model_id):
"""Test different input feature dimensions."""
tf.keras.backend.set_image_data_format('channels_last')
input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, input_dim])
network = mobiledet.MobileDet(model_id=model_id, input_specs=input_specs)
inputs = tf.keras.Input(shape=(128, 128, input_dim), batch_size=1)
_ = network(inputs)
@parameterized.parameters(
itertools.product(
[
'MobileDetCPU',
'MobileDetDSP',
'MobileDetEdgeTPU',
'MobileDetGPU',
],
[32, 224],
))
def test_mobiledet_creation(self, model_id, input_size):
"""Test creation of MobileDet family models."""
tf.keras.backend.set_image_data_format('channels_last')
mobiledet_layers = {
# The number of filters of layers having outputs been collected
# for filter_size_scale = 1.0
'MobileDetCPU': [8, 16, 32, 72, 144],
'MobileDetDSP': [24, 32, 64, 144, 240],
'MobileDetEdgeTPU': [16, 16, 40, 96, 384],
'MobileDetGPU': [16, 32, 64, 128, 384],
}
network = mobiledet.MobileDet(model_id=model_id,
filter_size_scale=1.0)
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
endpoints = network(inputs)
for idx, num_filter in enumerate(mobiledet_layers[model_id]):
self.assertAllEqual(
[1, input_size / 2 ** (idx+1), input_size / 2 ** (idx+1), num_filter],
endpoints[str(idx+1)].shape.as_list())
| 3,794 | 32 | 80 | py |
models | models-master/official/vision/modeling/backbones/efficientnet_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for EfficientNet."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.vision.modeling.backbones import efficientnet
class EfficientNetTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(32, 224)
def test_network_creation(self, input_size):
"""Test creation of EfficientNet family models."""
tf.keras.backend.set_image_data_format('channels_last')
network = efficientnet.EfficientNet(model_id='b0')
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
endpoints = network(inputs)
self.assertAllEqual([1, input_size / 2**2, input_size / 2**2, 24],
endpoints['2'].shape.as_list())
self.assertAllEqual([1, input_size / 2**3, input_size / 2**3, 40],
endpoints['3'].shape.as_list())
self.assertAllEqual([1, input_size / 2**4, input_size / 2**4, 112],
endpoints['4'].shape.as_list())
self.assertAllEqual([1, input_size / 2**5, input_size / 2**5, 320],
endpoints['5'].shape.as_list())
@parameterized.parameters('b0', 'b3', 'b6')
def test_network_scaling(self, model_id):
"""Test compound scaling."""
efficientnet_params = {
'b0': 4049564,
'b3': 10783528,
'b6': 40960136,
}
tf.keras.backend.set_image_data_format('channels_last')
input_size = 32
network = efficientnet.EfficientNet(model_id=model_id, se_ratio=0.25)
self.assertEqual(network.count_params(), efficientnet_params[model_id])
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
_ = network(inputs)
@parameterized.parameters(1, 3)
def test_input_specs(self, input_dim):
"""Test different input feature dimensions."""
tf.keras.backend.set_image_data_format('channels_last')
input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, input_dim])
network = efficientnet.EfficientNet(model_id='b0', input_specs=input_specs)
inputs = tf.keras.Input(shape=(128, 128, input_dim), batch_size=1)
_ = network(inputs)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
model_id='b0',
se_ratio=0.25,
stochastic_depth_drop_rate=None,
use_sync_bn=False,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
norm_momentum=0.99,
norm_epsilon=0.001,
)
network = efficientnet.EfficientNet(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = efficientnet.EfficientNet.from_config(network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 3,752 | 35.086538 | 80 | py |
models | models-master/official/vision/modeling/backbones/resnet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of ResNet and ResNet-RS models."""
from typing import Callable, Optional
# Import libraries
import tensorflow as tf
from official.modeling import hyperparams
from official.modeling import tf_utils
from official.vision.modeling.backbones import factory
from official.vision.modeling.layers import nn_blocks
from official.vision.modeling.layers import nn_layers
layers = tf.keras.layers
# Specifications for different ResNet variants.
# Each entry specifies block configurations of the particular ResNet variant.
# Each element in the block configuration is in the following format:
# (block_fn, num_filters, block_repeats)
RESNET_SPECS = {
10: [
('residual', 64, 1),
('residual', 128, 1),
('residual', 256, 1),
('residual', 512, 1),
],
18: [
('residual', 64, 2),
('residual', 128, 2),
('residual', 256, 2),
('residual', 512, 2),
],
26: [
('residual', 64, 3),
('residual', 128, 3),
('residual', 256, 3),
('residual', 512, 3),
],
34: [
('residual', 64, 3),
('residual', 128, 4),
('residual', 256, 6),
('residual', 512, 3),
],
50: [
('bottleneck', 64, 3),
('bottleneck', 128, 4),
('bottleneck', 256, 6),
('bottleneck', 512, 3),
],
101: [
('bottleneck', 64, 3),
('bottleneck', 128, 4),
('bottleneck', 256, 23),
('bottleneck', 512, 3),
],
152: [
('bottleneck', 64, 3),
('bottleneck', 128, 8),
('bottleneck', 256, 36),
('bottleneck', 512, 3),
],
200: [
('bottleneck', 64, 3),
('bottleneck', 128, 24),
('bottleneck', 256, 36),
('bottleneck', 512, 3),
],
270: [
('bottleneck', 64, 4),
('bottleneck', 128, 29),
('bottleneck', 256, 53),
('bottleneck', 512, 4),
],
350: [
('bottleneck', 64, 4),
('bottleneck', 128, 36),
('bottleneck', 256, 72),
('bottleneck', 512, 4),
],
420: [
('bottleneck', 64, 4),
('bottleneck', 128, 44),
('bottleneck', 256, 87),
('bottleneck', 512, 4),
],
}
@tf.keras.utils.register_keras_serializable(package='Vision')
class ResNet(tf.keras.Model):
"""Creates ResNet and ResNet-RS family models.
This implements the Deep Residual Network from:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
Deep Residual Learning for Image Recognition.
(https://arxiv.org/pdf/1512.03385) and
Irwan Bello, William Fedus, Xianzhi Du, Ekin D. Cubuk, Aravind Srinivas,
Tsung-Yi Lin, Jonathon Shlens, Barret Zoph.
Revisiting ResNets: Improved Training and Scaling Strategies.
(https://arxiv.org/abs/2103.07579).
"""
def __init__(
self,
model_id: int,
input_specs: tf.keras.layers.InputSpec = layers.InputSpec(
shape=[None, None, None, 3]),
depth_multiplier: float = 1.0,
stem_type: str = 'v0',
resnetd_shortcut: bool = False,
replace_stem_max_pool: bool = False,
se_ratio: Optional[float] = None,
init_stochastic_depth_rate: float = 0.0,
scale_stem: bool = True,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_initializer: str = 'VarianceScaling',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bn_trainable: bool = True,
**kwargs):
"""Initializes a ResNet model.
Args:
model_id: An `int` of the depth of ResNet backbone model.
input_specs: A `tf.keras.layers.InputSpec` of the input tensor.
depth_multiplier: A `float` of the depth multiplier to uniformaly scale up
all layers in channel size. This argument is also referred to as
`width_multiplier` in (https://arxiv.org/abs/2103.07579).
stem_type: A `str` of stem type of ResNet. Default to `v0`. If set to
`v1`, use ResNet-D type stem (https://arxiv.org/abs/1812.01187).
resnetd_shortcut: A `bool` of whether to use ResNet-D shortcut in
downsampling blocks.
replace_stem_max_pool: A `bool` of whether to replace the max pool in stem
with a stride-2 conv,
se_ratio: A `float` or None. Ratio of the Squeeze-and-Excitation layer.
init_stochastic_depth_rate: A `float` of initial stochastic depth rate.
scale_stem: A `bool` of whether to scale stem layers.
activation: A `str` name of the activation function.
use_sync_bn: If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A small `float` added to variance to avoid dividing by zero.
kernel_initializer: A str for kernel initializer of convolutional layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
Default to None.
bn_trainable: A `bool` that indicates whether batch norm layers should be
trainable. Default to True.
**kwargs: Additional keyword arguments to be passed.
"""
self._model_id = model_id
self._input_specs = input_specs
self._depth_multiplier = depth_multiplier
self._stem_type = stem_type
self._resnetd_shortcut = resnetd_shortcut
self._replace_stem_max_pool = replace_stem_max_pool
self._se_ratio = se_ratio
self._init_stochastic_depth_rate = init_stochastic_depth_rate
self._scale_stem = scale_stem
self._use_sync_bn = use_sync_bn
self._activation = activation
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._norm = layers.BatchNormalization
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._bn_trainable = bn_trainable
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
# Build ResNet.
inputs = tf.keras.Input(shape=input_specs.shape[1:])
x = self._stem(inputs)
endpoints = {}
for i, spec in enumerate(RESNET_SPECS[model_id]):
if spec[0] == 'residual':
block_fn = nn_blocks.ResidualBlock
elif spec[0] == 'bottleneck':
block_fn = nn_blocks.BottleneckBlock
else:
raise ValueError('Block fn `{}` is not supported.'.format(spec[0]))
x = self._block_group(
inputs=x,
filters=int(spec[1] * self._depth_multiplier),
strides=(1 if i == 0 else 2),
block_fn=block_fn,
block_repeats=spec[2],
stochastic_depth_drop_rate=nn_layers.get_stochastic_depth_rate(
self._init_stochastic_depth_rate, i + 2, 5),
name='block_group_l{}'.format(i + 2))
endpoints[str(i + 2)] = x
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super(ResNet, self).__init__(inputs=inputs, outputs=endpoints, **kwargs)
def _stem(self, inputs):
stem_depth_multiplier = self._depth_multiplier if self._scale_stem else 1.0
if self._stem_type == 'v0':
x = layers.Conv2D(
filters=int(64 * stem_depth_multiplier),
kernel_size=7,
strides=2,
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
)(inputs)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
trainable=self._bn_trainable,
synchronized=self._use_sync_bn,
)(x)
x = tf_utils.get_activation(self._activation, use_keras_layer=True)(x)
elif self._stem_type == 'v1':
x = layers.Conv2D(
filters=int(32 * stem_depth_multiplier),
kernel_size=3,
strides=2,
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
)(inputs)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
trainable=self._bn_trainable,
synchronized=self._use_sync_bn,
)(x)
x = tf_utils.get_activation(self._activation, use_keras_layer=True)(x)
x = layers.Conv2D(
filters=int(32 * stem_depth_multiplier),
kernel_size=3,
strides=1,
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
)(x)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
trainable=self._bn_trainable,
synchronized=self._use_sync_bn,
)(x)
x = tf_utils.get_activation(self._activation, use_keras_layer=True)(x)
x = layers.Conv2D(
filters=int(64 * stem_depth_multiplier),
kernel_size=3,
strides=1,
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
)(x)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
trainable=self._bn_trainable,
synchronized=self._use_sync_bn,
)(x)
x = tf_utils.get_activation(self._activation, use_keras_layer=True)(x)
else:
raise ValueError('Stem type {} not supported.'.format(self._stem_type))
if self._replace_stem_max_pool:
x = layers.Conv2D(
filters=int(64 * self._depth_multiplier),
kernel_size=3,
strides=2,
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
)(x)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
trainable=self._bn_trainable,
synchronized=self._use_sync_bn,
)(x)
x = tf_utils.get_activation(self._activation, use_keras_layer=True)(x)
else:
x = layers.MaxPool2D(pool_size=3, strides=2, padding='same')(x)
return x
def _block_group(self,
inputs: tf.Tensor,
filters: int,
strides: int,
block_fn: Callable[..., tf.keras.layers.Layer],
block_repeats: int = 1,
stochastic_depth_drop_rate: float = 0.0,
name: str = 'block_group'):
"""Creates one group of blocks for the ResNet model.
Args:
inputs: A `tf.Tensor` of size `[batch, channels, height, width]`.
filters: An `int` number of filters for the first convolution of the
layer.
strides: An `int` stride to use for the first convolution of the layer.
If greater than 1, this layer will downsample the input.
block_fn: The type of block group. Either `nn_blocks.ResidualBlock` or
`nn_blocks.BottleneckBlock`.
block_repeats: An `int` number of blocks contained in the layer.
stochastic_depth_drop_rate: A `float` of drop rate of the current block
group.
name: A `str` name for the block.
Returns:
The output `tf.Tensor` of the block layer.
"""
x = block_fn(
filters=filters,
strides=strides,
use_projection=True,
stochastic_depth_drop_rate=stochastic_depth_drop_rate,
se_ratio=self._se_ratio,
resnetd_shortcut=self._resnetd_shortcut,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
bn_trainable=self._bn_trainable)(
inputs)
for _ in range(1, block_repeats):
x = block_fn(
filters=filters,
strides=1,
use_projection=False,
stochastic_depth_drop_rate=stochastic_depth_drop_rate,
se_ratio=self._se_ratio,
resnetd_shortcut=self._resnetd_shortcut,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
bn_trainable=self._bn_trainable)(
x)
return tf.keras.layers.Activation('linear', name=name)(x)
def get_config(self):
config_dict = {
'model_id': self._model_id,
'depth_multiplier': self._depth_multiplier,
'stem_type': self._stem_type,
'resnetd_shortcut': self._resnetd_shortcut,
'replace_stem_max_pool': self._replace_stem_max_pool,
'activation': self._activation,
'se_ratio': self._se_ratio,
'init_stochastic_depth_rate': self._init_stochastic_depth_rate,
'scale_stem': self._scale_stem,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'bn_trainable': self._bn_trainable
}
return config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_backbone_builder('resnet')
def build_resnet(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds ResNet backbone from a config."""
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert backbone_type == 'resnet', (f'Inconsistent backbone type '
f'{backbone_type}')
return ResNet(
model_id=backbone_cfg.model_id,
input_specs=input_specs,
depth_multiplier=backbone_cfg.depth_multiplier,
stem_type=backbone_cfg.stem_type,
resnetd_shortcut=backbone_cfg.resnetd_shortcut,
replace_stem_max_pool=backbone_cfg.replace_stem_max_pool,
se_ratio=backbone_cfg.se_ratio,
init_stochastic_depth_rate=backbone_cfg.stochastic_depth_drop_rate,
scale_stem=backbone_cfg.scale_stem,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer,
bn_trainable=backbone_cfg.bn_trainable)
| 16,374 | 35.797753 | 139 | py |
models | models-master/official/vision/modeling/backbones/resnet_deeplab_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for resnet_deeplab models."""
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.vision.modeling.backbones import resnet_deeplab
class ResNetTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(128, 50, 4, 8),
(128, 101, 4, 8),
(128, 152, 4, 8),
(128, 200, 4, 8),
(128, 50, 4, 16),
(128, 101, 4, 16),
(128, 152, 4, 16),
(128, 200, 4, 16),
)
def test_network_creation(self, input_size, model_id,
endpoint_filter_scale, output_stride):
"""Test creation of ResNet models."""
tf.keras.backend.set_image_data_format('channels_last')
network = resnet_deeplab.DilatedResNet(model_id=model_id,
output_stride=output_stride)
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
endpoints = network(inputs)
print(endpoints)
self.assertAllEqual([
1, input_size / output_stride, input_size / output_stride,
512 * endpoint_filter_scale
], endpoints[str(int(np.math.log2(output_stride)))].shape.as_list())
@parameterized.parameters(
('v0', None, 0.0, False, False),
('v1', None, 0.0, False, False),
('v1', 0.25, 0.0, False, False),
('v1', 0.25, 0.2, False, False),
('v1', 0.25, 0.0, True, False),
('v1', 0.25, 0.2, False, True),
('v1', None, 0.2, True, True),
)
def test_network_features(self, stem_type, se_ratio,
init_stochastic_depth_rate, resnetd_shortcut,
replace_stem_max_pool):
"""Test additional features of ResNet models."""
input_size = 128
model_id = 50
endpoint_filter_scale = 4
output_stride = 8
tf.keras.backend.set_image_data_format('channels_last')
network = resnet_deeplab.DilatedResNet(
model_id=model_id,
output_stride=output_stride,
stem_type=stem_type,
resnetd_shortcut=resnetd_shortcut,
replace_stem_max_pool=replace_stem_max_pool,
se_ratio=se_ratio,
init_stochastic_depth_rate=init_stochastic_depth_rate)
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
endpoints = network(inputs)
print(endpoints)
self.assertAllEqual([
1, input_size / output_stride, input_size / output_stride,
512 * endpoint_filter_scale
], endpoints[str(int(np.math.log2(output_stride)))].shape.as_list())
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
use_sync_bn=[False, True],
))
def test_sync_bn_multiple_devices(self, strategy, use_sync_bn):
"""Test for sync bn on TPU and GPU devices."""
inputs = np.random.rand(64, 128, 128, 3)
tf.keras.backend.set_image_data_format('channels_last')
with strategy.scope():
network = resnet_deeplab.DilatedResNet(
model_id=50, output_stride=8, use_sync_bn=use_sync_bn)
_ = network(inputs)
@parameterized.parameters(1, 3, 4)
def test_input_specs(self, input_dim):
"""Test different input feature dimensions."""
tf.keras.backend.set_image_data_format('channels_last')
input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, input_dim])
network = resnet_deeplab.DilatedResNet(
model_id=50, output_stride=8, input_specs=input_specs)
inputs = tf.keras.Input(shape=(128, 128, input_dim), batch_size=1)
_ = network(inputs)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
model_id=50,
output_stride=8,
stem_type='v0',
se_ratio=0.25,
init_stochastic_depth_rate=0.2,
resnetd_shortcut=False,
replace_stem_max_pool=False,
use_sync_bn=False,
activation='relu',
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
)
network = resnet_deeplab.DilatedResNet(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = resnet_deeplab.DilatedResNet.from_config(network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 5,510 | 34.326923 | 80 | py |
models | models-master/official/vision/modeling/backbones/factory_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for factory functions."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from tensorflow.python.distribute import combinations
from official.vision.configs import backbones as backbones_cfg
from official.vision.configs import backbones_3d as backbones_3d_cfg
from official.vision.configs import common as common_cfg
from official.vision.modeling import backbones
from official.vision.modeling.backbones import factory
class FactoryTest(tf.test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(model_id=[18, 34, 50, 101, 152],))
def test_resnet_creation(self, model_id):
"""Test creation of ResNet models."""
network = backbones.ResNet(
model_id=model_id, se_ratio=0.0, norm_momentum=0.99, norm_epsilon=1e-5)
backbone_config = backbones_cfg.Backbone(
type='resnet',
resnet=backbones_cfg.ResNet(model_id=model_id, se_ratio=0.0))
norm_activation_config = common_cfg.NormActivation(
norm_momentum=0.99, norm_epsilon=1e-5, use_sync_bn=False)
factory_network = factory.build_backbone(
input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]),
backbone_config=backbone_config,
norm_activation_config=norm_activation_config)
network_config = network.get_config()
factory_network_config = factory_network.get_config()
self.assertEqual(network_config, factory_network_config)
@combinations.generate(
combinations.combine(
model_id=['b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7'],
se_ratio=[0.0, 0.25],
))
def test_efficientnet_creation(self, model_id, se_ratio):
"""Test creation of EfficientNet models."""
network = backbones.EfficientNet(
model_id=model_id,
se_ratio=se_ratio,
norm_momentum=0.99,
norm_epsilon=1e-5)
backbone_config = backbones_cfg.Backbone(
type='efficientnet',
efficientnet=backbones_cfg.EfficientNet(
model_id=model_id, se_ratio=se_ratio))
norm_activation_config = common_cfg.NormActivation(
norm_momentum=0.99, norm_epsilon=1e-5, use_sync_bn=False)
factory_network = factory.build_backbone(
input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]),
backbone_config=backbone_config,
norm_activation_config=norm_activation_config)
network_config = network.get_config()
factory_network_config = factory_network.get_config()
self.assertEqual(network_config, factory_network_config)
@combinations.generate(
combinations.combine(
model_id=['MobileNetV1', 'MobileNetV2',
'MobileNetV3Large', 'MobileNetV3Small',
'MobileNetV3EdgeTPU'],
filter_size_scale=[1.0, 0.75],
))
def test_mobilenet_creation(self, model_id, filter_size_scale):
"""Test creation of Mobilenet models."""
network = backbones.MobileNet(
model_id=model_id,
filter_size_scale=filter_size_scale,
norm_momentum=0.99,
norm_epsilon=1e-5)
backbone_config = backbones_cfg.Backbone(
type='mobilenet',
mobilenet=backbones_cfg.MobileNet(
model_id=model_id, filter_size_scale=filter_size_scale))
norm_activation_config = common_cfg.NormActivation(
norm_momentum=0.99, norm_epsilon=1e-5, use_sync_bn=False)
factory_network = factory.build_backbone(
input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]),
backbone_config=backbone_config,
norm_activation_config=norm_activation_config)
network_config = network.get_config()
factory_network_config = factory_network.get_config()
self.assertEqual(network_config, factory_network_config)
@combinations.generate(combinations.combine(model_id=['49'],))
def test_spinenet_creation(self, model_id):
"""Test creation of SpineNet models."""
input_size = 128
min_level = 3
max_level = 7
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size, input_size, 3])
network = backbones.SpineNet(
input_specs=input_specs,
min_level=min_level,
max_level=max_level,
norm_momentum=0.99,
norm_epsilon=1e-5)
backbone_config = backbones_cfg.Backbone(
type='spinenet',
spinenet=backbones_cfg.SpineNet(model_id=model_id))
norm_activation_config = common_cfg.NormActivation(
norm_momentum=0.99, norm_epsilon=1e-5, use_sync_bn=False)
factory_network = factory.build_backbone(
input_specs=tf.keras.layers.InputSpec(
shape=[None, input_size, input_size, 3]),
backbone_config=backbone_config,
norm_activation_config=norm_activation_config)
network_config = network.get_config()
factory_network_config = factory_network.get_config()
self.assertEqual(network_config, factory_network_config)
@combinations.generate(
combinations.combine(model_id=[38, 56, 104],))
def test_revnet_creation(self, model_id):
"""Test creation of RevNet models."""
network = backbones.RevNet(
model_id=model_id, norm_momentum=0.99, norm_epsilon=1e-5)
backbone_config = backbones_cfg.Backbone(
type='revnet',
revnet=backbones_cfg.RevNet(model_id=model_id))
norm_activation_config = common_cfg.NormActivation(
norm_momentum=0.99, norm_epsilon=1e-5, use_sync_bn=False)
factory_network = factory.build_backbone(
input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]),
backbone_config=backbone_config,
norm_activation_config=norm_activation_config)
network_config = network.get_config()
factory_network_config = factory_network.get_config()
self.assertEqual(network_config, factory_network_config)
@combinations.generate(combinations.combine(model_type=['resnet_3d'],))
def test_resnet_3d_creation(self, model_type):
"""Test creation of ResNet 3D models."""
backbone_cfg = backbones_3d_cfg.Backbone3D(type=model_type).get()
temporal_strides = []
temporal_kernel_sizes = []
for block_spec in backbone_cfg.block_specs:
temporal_strides.append(block_spec.temporal_strides)
temporal_kernel_sizes.append(block_spec.temporal_kernel_sizes)
_ = backbones.ResNet3D(
model_id=backbone_cfg.model_id,
temporal_strides=temporal_strides,
temporal_kernel_sizes=temporal_kernel_sizes,
norm_momentum=0.99,
norm_epsilon=1e-5)
@combinations.generate(
combinations.combine(
model_id=[
'MobileDetCPU',
'MobileDetDSP',
'MobileDetEdgeTPU',
'MobileDetGPU'],
filter_size_scale=[1.0, 0.75],
))
def test_mobiledet_creation(self, model_id, filter_size_scale):
"""Test creation of Mobiledet models."""
network = backbones.MobileDet(
model_id=model_id,
filter_size_scale=filter_size_scale,
norm_momentum=0.99,
norm_epsilon=1e-5)
backbone_config = backbones_cfg.Backbone(
type='mobiledet',
mobiledet=backbones_cfg.MobileDet(
model_id=model_id, filter_size_scale=filter_size_scale))
norm_activation_config = common_cfg.NormActivation(
norm_momentum=0.99, norm_epsilon=1e-5, use_sync_bn=False)
factory_network = factory.build_backbone(
input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]),
backbone_config=backbone_config,
norm_activation_config=norm_activation_config)
network_config = network.get_config()
factory_network_config = factory_network.get_config()
self.assertEqual(network_config, factory_network_config)
if __name__ == '__main__':
tf.test.main()
| 8,383 | 35.77193 | 79 | py |
models | models-master/official/vision/modeling/backbones/vit_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for VIT."""
import math
from absl.testing import parameterized
import tensorflow as tf
from official.vision.modeling.backbones import vit
class VisionTransformerTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(224, 85798656),
(256, 85844736),
)
def test_network_creation(self, input_size, params_count):
"""Test creation of VisionTransformer family models."""
tf.keras.backend.set_image_data_format('channels_last')
input_specs = tf.keras.layers.InputSpec(
shape=[2, input_size, input_size, 3])
network = vit.VisionTransformer(input_specs=input_specs)
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
_ = network(inputs)
self.assertEqual(network.count_params(), params_count)
@parameterized.product(
patch_size=[6, 4],
output_2d_feature_maps=[True, False],
pooler=['none', 'gap', 'token'],
)
def test_network_with_diferent_configs(
self, patch_size, output_2d_feature_maps, pooler):
tf.keras.backend.set_image_data_format('channels_last')
input_size = 24
expected_feat_level = str(round(math.log2(patch_size)))
num_patch_rows = input_size // patch_size
input_specs = tf.keras.layers.InputSpec(
shape=[2, input_size, input_size, 3])
network = vit.VisionTransformer(
input_specs=input_specs,
patch_size=patch_size,
pooler=pooler,
hidden_size=8,
mlp_dim=8,
num_layers=1,
num_heads=2,
representation_size=16,
output_2d_feature_maps=output_2d_feature_maps)
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
output = network(inputs)
if pooler == 'none':
self.assertEqual(
output['encoded_tokens'].shape, [1, num_patch_rows**2, 16])
else:
self.assertEqual(output['pre_logits'].shape, [1, 1, 1, 16])
if output_2d_feature_maps:
self.assertIn(expected_feat_level, output)
self.assertIn(expected_feat_level, network.output_specs)
self.assertEqual(
network.output_specs[expected_feat_level][1:],
[num_patch_rows, num_patch_rows, 8])
else:
self.assertNotIn(expected_feat_level, output)
def test_posembedding_interpolation(self):
tf.keras.backend.set_image_data_format('channels_last')
input_size = 256
input_specs = tf.keras.layers.InputSpec(
shape=[2, input_size, input_size, 3])
network = vit.VisionTransformer(
input_specs=input_specs,
patch_size=16,
pooler='gap',
pos_embed_shape=(14, 14)) # (224 // 16)
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
output = network(inputs)['pre_logits']
self.assertEqual(output.shape, [1, 1, 1, 768])
if __name__ == '__main__':
tf.test.main()
| 3,453 | 33.19802 | 76 | py |
models | models-master/official/vision/modeling/backbones/vit.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VisionTransformer models."""
import math
from typing import Optional, Tuple
from absl import logging
import tensorflow as tf
from official.modeling import activations
from official.vision.modeling.backbones import factory
from official.vision.modeling.backbones.vit_specs import VIT_SPECS
from official.vision.modeling.layers import nn_blocks
from official.vision.modeling.layers import nn_layers
layers = tf.keras.layers
class AddPositionEmbs(layers.Layer):
"""Adds (optionally learned) positional embeddings to the inputs."""
def __init__(self,
posemb_init: Optional[tf.keras.initializers.Initializer] = None,
posemb_origin_shape: Optional[Tuple[int, int]] = None,
posemb_target_shape: Optional[Tuple[int, int]] = None,
**kwargs):
"""Constructs Positional Embedding module.
The logic of this module is: the learnable positional embeddings length will
be determined by the inputs_shape or posemb_origin_shape (if provided)
during the construction. If the posemb_target_shape is provided and is
different from the positional embeddings length, the embeddings will be
interpolated during the forward call.
Args:
posemb_init: The positional embedding initializer.
posemb_origin_shape: The intended positional embedding shape.
posemb_target_shape: The potential target shape positional embedding may
be interpolated to.
**kwargs: other args.
"""
super().__init__(**kwargs)
self.posemb_init = posemb_init
self.posemb_origin_shape = posemb_origin_shape
self.posemb_target_shape = posemb_target_shape
def build(self, inputs_shape):
if self.posemb_origin_shape is not None:
pos_emb_length = self.posemb_origin_shape[0] * self.posemb_origin_shape[1]
else:
pos_emb_length = inputs_shape[1]
pos_emb_shape = (1, pos_emb_length, inputs_shape[2])
self.pos_embedding = self.add_weight(
'pos_embedding', pos_emb_shape, initializer=self.posemb_init)
def _interpolate(self, pos_embedding: tf.Tensor, from_shape: Tuple[int, int],
to_shape: Tuple[int, int]) -> tf.Tensor:
"""Interpolates the positional embeddings."""
logging.info('Interpolating postional embedding from length: %s to %s',
from_shape, to_shape)
grid_emb = tf.reshape(pos_embedding, [1] + list(from_shape) + [-1])
# NOTE: Using BILINEAR interpolation by default.
grid_emb = tf.image.resize(grid_emb, to_shape)
return tf.reshape(grid_emb, [1, to_shape[0] * to_shape[1], -1])
def call(self, inputs, inputs_positions=None):
del inputs_positions
pos_embedding = self.pos_embedding
# inputs.shape is (batch_size, seq_len, emb_dim).
if inputs.shape[1] != pos_embedding.shape[1]:
pos_embedding = self._interpolate(
pos_embedding,
from_shape=self.posemb_origin_shape,
to_shape=self.posemb_target_shape)
pos_embedding = tf.cast(pos_embedding, inputs.dtype)
return inputs + pos_embedding
class TokenLayer(layers.Layer):
"""A simple layer to wrap token parameters."""
def build(self, inputs_shape):
self.cls = self.add_weight(
'cls', (1, 1, inputs_shape[-1]), initializer='zeros')
def call(self, inputs):
cls = tf.cast(self.cls, inputs.dtype)
cls = cls + tf.zeros_like(inputs[:, 0:1]) # A hacky way to tile.
x = tf.concat([cls, inputs], axis=1)
return x
class Encoder(layers.Layer):
"""Transformer Encoder."""
def __init__(self,
num_layers,
mlp_dim,
num_heads,
dropout_rate=0.1,
attention_dropout_rate=0.1,
kernel_regularizer=None,
inputs_positions=None,
init_stochastic_depth_rate=0.0,
kernel_initializer='glorot_uniform',
add_pos_embed=True,
pos_embed_origin_shape=None,
pos_embed_target_shape=None,
layer_scale_init_value=0.0,
transformer_partition_dims=None,
**kwargs):
super().__init__(**kwargs)
self._num_layers = num_layers
self._mlp_dim = mlp_dim
self._num_heads = num_heads
self._dropout_rate = dropout_rate
self._attention_dropout_rate = attention_dropout_rate
self._kernel_regularizer = kernel_regularizer
self._inputs_positions = inputs_positions
self._init_stochastic_depth_rate = init_stochastic_depth_rate
self._kernel_initializer = kernel_initializer
self._add_pos_embed = add_pos_embed
self._pos_embed_origin_shape = pos_embed_origin_shape
self._pos_embed_target_shape = pos_embed_target_shape
self._layer_scale_init_value = layer_scale_init_value
self._transformer_partition_dims = transformer_partition_dims
def build(self, input_shape):
if self._add_pos_embed:
self._pos_embed = AddPositionEmbs(
posemb_init=tf.keras.initializers.RandomNormal(stddev=0.02),
posemb_origin_shape=self._pos_embed_origin_shape,
posemb_target_shape=self._pos_embed_target_shape,
name='posembed_input')
self._dropout = layers.Dropout(rate=self._dropout_rate)
self._encoder_layers = []
# Set layer norm epsilons to 1e-6 to be consistent with JAX implementation.
# https://flax.readthedocs.io/en/latest/_autosummary/flax.deprecated.nn.LayerNorm.html
for i in range(self._num_layers):
encoder_layer = nn_blocks.TransformerEncoderBlock(
inner_activation=activations.gelu,
num_attention_heads=self._num_heads,
inner_dim=self._mlp_dim,
output_dropout=self._dropout_rate,
attention_dropout=self._attention_dropout_rate,
kernel_regularizer=self._kernel_regularizer,
kernel_initializer=self._kernel_initializer,
norm_first=True,
stochastic_depth_drop_rate=nn_layers.get_stochastic_depth_rate(
self._init_stochastic_depth_rate, i + 1, self._num_layers),
norm_epsilon=1e-6,
layer_scale_init_value=self._layer_scale_init_value,
transformer_partition_dims=self._transformer_partition_dims)
self._encoder_layers.append(encoder_layer)
self._norm = layers.LayerNormalization(epsilon=1e-6)
super().build(input_shape)
def call(self, inputs, training=None):
x = inputs
if self._add_pos_embed:
x = self._pos_embed(x, inputs_positions=self._inputs_positions)
x = self._dropout(x, training=training)
for encoder_layer in self._encoder_layers:
x = encoder_layer(x, training=training)
x = self._norm(x)
return x
def get_config(self):
config = super().get_config()
updates = {
'num_layers': self._num_layers,
'mlp_dim': self._mlp_dim,
'num_heads': self._num_heads,
'dropout_rate': self._dropout_rate,
'attention_dropout_rate': self._attention_dropout_rate,
'kernel_regularizer': self._kernel_regularizer,
'inputs_positions': self._inputs_positions,
'init_stochastic_depth_rate': self._init_stochastic_depth_rate,
'kernel_initializer': self._kernel_initializer,
'add_pos_embed': self._add_pos_embed,
'pos_embed_origin_shape': self._pos_embed_origin_shape,
'pos_embed_target_shape': self._pos_embed_target_shape,
'layer_scale_init_value': self._layer_scale_init_value,
'transformer_partition_dims': self._transformer_partition_dims,
}
config.update(updates)
return config
class VisionTransformer(tf.keras.Model):
"""Class to build VisionTransformer family model."""
def __init__(
self,
mlp_dim=3072,
num_heads=12,
num_layers=12,
attention_dropout_rate=0.0,
dropout_rate=0.1,
init_stochastic_depth_rate=0.0,
input_specs=layers.InputSpec(shape=[None, None, None, 3]),
patch_size=16,
hidden_size=768,
representation_size=0,
pooler='token',
kernel_regularizer=None,
original_init: bool = True,
output_encoded_tokens: bool = True,
output_2d_feature_maps: bool = False,
pos_embed_shape: Optional[Tuple[int, int]] = None,
layer_scale_init_value: float = 0.0,
transformer_partition_dims: Optional[Tuple[int, int, int, int]] = None,
):
"""VisionTransformer initialization function."""
self._mlp_dim = mlp_dim
self._num_heads = num_heads
self._num_layers = num_layers
self._hidden_size = hidden_size
self._patch_size = patch_size
inputs = tf.keras.Input(shape=input_specs.shape[1:])
x = layers.Conv2D(
filters=hidden_size,
kernel_size=patch_size,
strides=patch_size,
padding='valid',
kernel_regularizer=kernel_regularizer,
kernel_initializer='lecun_normal' if original_init else 'he_uniform')(
inputs)
if tf.keras.backend.image_data_format() == 'channels_last':
rows_axis, cols_axis = (1, 2)
else:
rows_axis, cols_axis = (2, 3)
# The reshape below assumes the data_format is 'channels_last,' so
# transpose to that. Once the data is flattened by the reshape, the
# data_format is irrelevant, so no need to update
# tf.keras.backend.image_data_format.
x = tf.transpose(x, perm=[0, 2, 3, 1])
pos_embed_target_shape = (x.shape[rows_axis], x.shape[cols_axis])
feat_h = input_specs.shape[rows_axis] // patch_size
feat_w = input_specs.shape[cols_axis] // patch_size
seq_len = feat_h * feat_w
x = tf.reshape(x, [-1, seq_len, hidden_size])
# If we want to add a class token, add it here.
if pooler == 'token':
x = TokenLayer(name='cls')(x)
x = Encoder(
num_layers=num_layers,
mlp_dim=mlp_dim,
num_heads=num_heads,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate,
kernel_regularizer=kernel_regularizer,
kernel_initializer='glorot_uniform' if original_init else dict(
class_name='TruncatedNormal', config=dict(stddev=.02)),
init_stochastic_depth_rate=init_stochastic_depth_rate,
pos_embed_origin_shape=pos_embed_shape,
pos_embed_target_shape=pos_embed_target_shape,
layer_scale_init_value=layer_scale_init_value)(
x)
if pooler == 'token':
output_feature = x[:, 1:]
x = x[:, 0]
elif pooler == 'gap':
output_feature = x
x = tf.reduce_mean(x, axis=1)
elif pooler == 'none':
output_feature = x
x = tf.identity(x, name='encoded_tokens')
else:
raise ValueError(f'unrecognized pooler type: {pooler}')
endpoints = {}
if output_2d_feature_maps:
# Use the closest feature level.
feat_level = round(math.log2(patch_size))
logging.info(
'VisionTransformer patch size %d and feature level: %d',
patch_size,
feat_level,
)
endpoints[str(feat_level)] = tf.reshape(
output_feature, [-1, feat_h, feat_w, x.shape.as_list()[-1]])
# Don"t include `pre_logits` or `encoded_tokens` to support decoders.
self._output_specs = {k: v.shape for k, v in endpoints.items()}
if representation_size:
x = layers.Dense(
representation_size,
kernel_regularizer=kernel_regularizer,
name='pre_logits',
kernel_initializer='lecun_normal' if original_init else 'he_uniform',
)(x)
x = tf.nn.tanh(x)
else:
x = tf.identity(x, name='pre_logits')
if pooler == 'none':
if output_encoded_tokens:
endpoints['encoded_tokens'] = x
else:
endpoints['pre_logits'] = tf.reshape(
x, [-1, 1, 1, representation_size or hidden_size])
super().__init__(inputs=inputs, outputs=endpoints)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_backbone_builder('vit')
def build_vit(input_specs,
backbone_config,
norm_activation_config,
l2_regularizer=None):
"""Build ViT model."""
del norm_activation_config
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert backbone_type == 'vit', (f'Inconsistent backbone type '
f'{backbone_type}')
backbone_cfg.override(VIT_SPECS[backbone_cfg.model_name])
logging.info(
(
'ViT specs: mlp_dim=%d, num_heads=%d, num_layers=%d,'
'patch_size=%d, hidden_size=%d, representation_size=%d.'
),
backbone_cfg.transformer.mlp_dim,
backbone_cfg.transformer.num_heads,
backbone_cfg.transformer.num_layers,
backbone_cfg.patch_size,
backbone_cfg.hidden_size,
backbone_cfg.representation_size,
)
return VisionTransformer(
mlp_dim=backbone_cfg.transformer.mlp_dim,
num_heads=backbone_cfg.transformer.num_heads,
num_layers=backbone_cfg.transformer.num_layers,
attention_dropout_rate=backbone_cfg.transformer.attention_dropout_rate,
dropout_rate=backbone_cfg.transformer.dropout_rate,
init_stochastic_depth_rate=backbone_cfg.init_stochastic_depth_rate,
input_specs=input_specs,
patch_size=backbone_cfg.patch_size,
hidden_size=backbone_cfg.hidden_size,
representation_size=backbone_cfg.representation_size,
pooler=backbone_cfg.pooler,
kernel_regularizer=l2_regularizer,
original_init=backbone_cfg.original_init,
output_encoded_tokens=backbone_cfg.output_encoded_tokens,
output_2d_feature_maps=backbone_cfg.output_2d_feature_maps,
layer_scale_init_value=backbone_cfg.layer_scale_init_value,
pos_embed_shape=backbone_cfg.pos_embed_shape,
transformer_partition_dims=backbone_cfg.transformer_partition_dims)
| 14,428 | 36.971053 | 90 | py |
models | models-master/official/vision/modeling/backbones/revnet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of RevNet."""
from typing import Any, Callable, Dict, Optional
# Import libraries
import tensorflow as tf
from official.modeling import hyperparams
from official.modeling import tf_utils
from official.vision.modeling.backbones import factory
from official.vision.modeling.layers import nn_blocks
# Specifications for different RevNet variants.
# Each entry specifies block configurations of the particular RevNet variant.
# Each element in the block configuration is in the following format:
# (block_fn, num_filters, block_repeats)
REVNET_SPECS = {
38: [
('residual', 32, 3),
('residual', 64, 3),
('residual', 112, 3),
],
56: [
('bottleneck', 128, 2),
('bottleneck', 256, 2),
('bottleneck', 512, 3),
('bottleneck', 832, 2),
],
104: [
('bottleneck', 128, 2),
('bottleneck', 256, 2),
('bottleneck', 512, 11),
('bottleneck', 832, 2),
],
}
@tf.keras.utils.register_keras_serializable(package='Vision')
class RevNet(tf.keras.Model):
"""Creates a Reversible ResNet (RevNet) family model.
This implements:
Aidan N. Gomez, Mengye Ren, Raquel Urtasun, Roger B. Grosse.
The Reversible Residual Network: Backpropagation Without Storing
Activations.
(https://arxiv.org/pdf/1707.04585.pdf)
"""
def __init__(
self,
model_id: int,
input_specs: tf.keras.layers.InputSpec = tf.keras.layers.InputSpec(
shape=[None, None, None, 3]),
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_initializer: str = 'VarianceScaling',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initializes a RevNet model.
Args:
model_id: An `int` of depth/id of ResNet backbone model.
input_specs: A `tf.keras.layers.InputSpec` of the input tensor.
activation: A `str` name of the activation function.
use_sync_bn: If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_initializer: A str for kernel initializer of convolutional layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
**kwargs: Additional keyword arguments to be passed.
"""
self._model_id = model_id
self._input_specs = input_specs
self._use_sync_bn = use_sync_bn
self._activation = activation
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._norm = tf.keras.layers.BatchNormalization
axis = -1 if tf.keras.backend.image_data_format() == 'channels_last' else 1
# Build RevNet.
inputs = tf.keras.Input(shape=input_specs.shape[1:])
x = tf.keras.layers.Conv2D(
filters=REVNET_SPECS[model_id][0][1],
kernel_size=7, strides=2, use_bias=False, padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer)(inputs)
x = self._norm(
axis=axis,
momentum=norm_momentum,
epsilon=norm_epsilon,
synchronized=use_sync_bn)(x)
x = tf_utils.get_activation(activation)(x)
x = tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')(x)
endpoints = {}
for i, spec in enumerate(REVNET_SPECS[model_id]):
if spec[0] == 'residual':
inner_block_fn = nn_blocks.ResidualInner
elif spec[0] == 'bottleneck':
inner_block_fn = nn_blocks.BottleneckResidualInner
else:
raise ValueError('Block fn `{}` is not supported.'.format(spec[0]))
if spec[1] % 2 != 0:
raise ValueError('Number of output filters must be even to ensure '
'splitting in channel dimension for reversible blocks')
x = self._block_group(
inputs=x,
filters=spec[1],
strides=(1 if i == 0 else 2),
inner_block_fn=inner_block_fn,
block_repeats=spec[2],
batch_norm_first=(i != 0), # Only skip on first block
name='revblock_group_{}'.format(i + 2))
endpoints[str(i + 2)] = x
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super(RevNet, self).__init__(inputs=inputs, outputs=endpoints, **kwargs)
def _block_group(self,
inputs: tf.Tensor,
filters: int,
strides: int,
inner_block_fn: Callable[..., tf.keras.layers.Layer],
block_repeats: int,
batch_norm_first: bool,
name: str = 'revblock_group') -> tf.Tensor:
"""Creates one reversible block for RevNet model.
Args:
inputs: A `tf.Tensor` of size `[batch, channels, height, width]`.
filters: An `int` number of filters for the first convolution of the
layer.
strides: An `int` stride to use for the first convolution of the layer. If
greater than 1, this block group will downsample the input.
inner_block_fn: Either `nn_blocks.ResidualInner` or
`nn_blocks.BottleneckResidualInner`.
block_repeats: An `int` number of blocks contained in this block group.
batch_norm_first: A `bool` that specifies whether to apply
BatchNormalization and activation layer before feeding into convolution
layers.
name: A `str` name for the block.
Returns:
The output `tf.Tensor` of the block layer.
"""
x = inputs
for i in range(block_repeats):
is_first_block = i == 0
# Only first residual layer in block gets downsampled
curr_strides = strides if is_first_block else 1
f = inner_block_fn(
filters=filters // 2,
strides=curr_strides,
batch_norm_first=batch_norm_first and is_first_block,
kernel_regularizer=self._kernel_regularizer)
g = inner_block_fn(
filters=filters // 2,
strides=1,
batch_norm_first=batch_norm_first and is_first_block,
kernel_regularizer=self._kernel_regularizer)
x = nn_blocks.ReversibleLayer(f, g)(x)
return tf.identity(x, name=name)
def get_config(self) -> Dict[str, Any]:
config_dict = {
'model_id': self._model_id,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
}
return config_dict
@classmethod
def from_config(cls,
config: Dict[str, Any],
custom_objects: Optional[Any] = None) -> tf.keras.Model:
return cls(**config)
@property
def output_specs(self) -> Dict[int, tf.TensorShape]:
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs # pytype: disable=bad-return-type # trace-all-classes
@factory.register_backbone_builder('revnet')
def build_revnet(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds RevNet backbone from a config."""
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert backbone_type == 'revnet', (f'Inconsistent backbone type '
f'{backbone_type}')
return RevNet(
model_id=backbone_cfg.model_id,
input_specs=input_specs,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
| 8,787 | 36.716738 | 139 | py |
models | models-master/official/vision/modeling/backbones/factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Backbone registers and factory method.
One can regitered a new backbone model by the following two steps:
1 Import the factory and register the build in the backbone file.
2 Import the backbone class and add a build in __init__.py.
```
# my_backbone.py
from modeling.backbones import factory
class MyBackbone():
...
@factory.register_backbone_builder('my_backbone')
def build_my_backbone():
return MyBackbone()
# backbones/__init__.py adds import
from modeling.backbones.my_backbone import MyBackbone
```
If one wants the MyBackbone class to be used only by those binary
then don't imported the backbone module in backbones/__init__.py, but import it
in place that uses it.
"""
from typing import Sequence, Union
# Import libraries
import tensorflow as tf
from official.core import registry
from official.modeling import hyperparams
_REGISTERED_BACKBONE_CLS = {}
def register_backbone_builder(key: str):
"""Decorates a builder of backbone class.
The builder should be a Callable (a class or a function).
This decorator supports registration of backbone builder as follows:
```
class MyBackbone(tf.keras.Model):
pass
@register_backbone_builder('mybackbone')
def builder(input_specs, config, l2_reg):
return MyBackbone(...)
# Builds a MyBackbone object.
my_backbone = build_backbone_3d(input_specs, config, l2_reg)
```
Args:
key: A `str` of key to look up the builder.
Returns:
A callable for using as class decorator that registers the decorated class
for creation from an instance of task_config_cls.
"""
return registry.register(_REGISTERED_BACKBONE_CLS, key)
def build_backbone(input_specs: Union[tf.keras.layers.InputSpec,
Sequence[tf.keras.layers.InputSpec]],
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: tf.keras.regularizers.Regularizer = None,
**kwargs) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Builds backbone from a config.
Args:
input_specs: A (sequence of) `tf.keras.layers.InputSpec` of input.
backbone_config: A `OneOfConfig` of backbone config.
norm_activation_config: A config for normalization/activation layer.
l2_regularizer: A `tf.keras.regularizers.Regularizer` object. Default to
None.
**kwargs: Additional keyword args to be passed to backbone builder.
Returns:
A `tf.keras.Model` instance of the backbone.
"""
backbone_builder = registry.lookup(_REGISTERED_BACKBONE_CLS,
backbone_config.type)
return backbone_builder(
input_specs=input_specs,
backbone_config=backbone_config,
norm_activation_config=norm_activation_config,
l2_regularizer=l2_regularizer,
**kwargs)
| 3,494 | 29.929204 | 106 | py |
models | models-master/official/vision/modeling/backbones/resnet_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for resnet."""
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.vision.modeling.backbones import resnet
class ResNetTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(128, 10, 1),
(128, 18, 1),
(128, 26, 1),
(128, 34, 1),
(128, 50, 4),
(128, 101, 4),
(128, 152, 4),
)
def test_network_creation(self, input_size, model_id,
endpoint_filter_scale):
"""Test creation of ResNet family models."""
resnet_params = {
10: 4915904,
18: 11190464,
26: 17465024,
34: 21306048,
50: 23561152,
101: 42605504,
152: 58295232,
}
tf.keras.backend.set_image_data_format('channels_last')
network = resnet.ResNet(model_id=model_id)
self.assertEqual(network.count_params(), resnet_params[model_id])
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
endpoints = network(inputs)
self.assertAllEqual(
[1, input_size / 2**2, input_size / 2**2, 64 * endpoint_filter_scale],
endpoints['2'].shape.as_list())
self.assertAllEqual(
[1, input_size / 2**3, input_size / 2**3, 128 * endpoint_filter_scale],
endpoints['3'].shape.as_list())
self.assertAllEqual(
[1, input_size / 2**4, input_size / 2**4, 256 * endpoint_filter_scale],
endpoints['4'].shape.as_list())
self.assertAllEqual(
[1, input_size / 2**5, input_size / 2**5, 512 * endpoint_filter_scale],
endpoints['5'].shape.as_list())
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
use_sync_bn=[False, True],
))
def test_sync_bn_multiple_devices(self, strategy, use_sync_bn):
"""Test for sync bn on TPU and GPU devices."""
inputs = np.random.rand(64, 128, 128, 3)
tf.keras.backend.set_image_data_format('channels_last')
with strategy.scope():
network = resnet.ResNet(model_id=50, use_sync_bn=use_sync_bn)
_ = network(inputs)
@parameterized.parameters(
(128, 34, 1, 'v0', None, 0.0, 1.0, False, False),
(128, 34, 1, 'v1', 0.25, 0.2, 1.25, True, True),
(128, 50, 4, 'v0', None, 0.0, 1.5, False, False),
(128, 50, 4, 'v1', 0.25, 0.2, 2.0, True, True),
)
def test_resnet_rs(self, input_size, model_id, endpoint_filter_scale,
stem_type, se_ratio, init_stochastic_depth_rate,
depth_multiplier, resnetd_shortcut, replace_stem_max_pool):
"""Test creation of ResNet family models."""
tf.keras.backend.set_image_data_format('channels_last')
network = resnet.ResNet(
model_id=model_id,
depth_multiplier=depth_multiplier,
stem_type=stem_type,
resnetd_shortcut=resnetd_shortcut,
replace_stem_max_pool=replace_stem_max_pool,
se_ratio=se_ratio,
init_stochastic_depth_rate=init_stochastic_depth_rate)
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
_ = network(inputs)
@parameterized.parameters(1, 3, 4)
def test_input_specs(self, input_dim):
"""Test different input feature dimensions."""
tf.keras.backend.set_image_data_format('channels_last')
input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, input_dim])
network = resnet.ResNet(model_id=50, input_specs=input_specs)
inputs = tf.keras.Input(shape=(128, 128, input_dim), batch_size=1)
_ = network(inputs)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
model_id=50,
depth_multiplier=1.0,
stem_type='v0',
se_ratio=None,
resnetd_shortcut=False,
replace_stem_max_pool=False,
init_stochastic_depth_rate=0.0,
scale_stem=True,
use_sync_bn=False,
activation='relu',
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
bn_trainable=True)
network = resnet.ResNet(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = resnet.ResNet.from_config(network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 5,545 | 34.101266 | 80 | py |
models | models-master/official/vision/modeling/backbones/spinenet_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for SpineNet."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.vision.modeling.backbones import spinenet
class SpineNetTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(128, 0.65, 1, 0.5, 128, 4, 6),
(256, 1.0, 1, 0.5, 256, 3, 6),
(384, 1.0, 2, 0.5, 256, 4, 7),
(512, 1.0, 3, 1.0, 256, 3, 7),
(640, 1.3, 4, 1.0, 384, 3, 7),
)
def test_network_creation(self, input_size, filter_size_scale, block_repeats,
resample_alpha, endpoints_num_filters, min_level,
max_level):
"""Test creation of SpineNet models."""
tf.keras.backend.set_image_data_format('channels_last')
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size, input_size, 3])
model = spinenet.SpineNet(
input_specs=input_specs,
min_level=min_level,
max_level=max_level,
endpoints_num_filters=endpoints_num_filters,
resample_alpha=resample_alpha,
block_repeats=block_repeats,
filter_size_scale=filter_size_scale,
init_stochastic_depth_rate=0.2,
)
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
endpoints = model(inputs)
for l in range(min_level, max_level + 1):
self.assertIn(str(l), endpoints.keys())
self.assertAllEqual(
[1, input_size / 2**l, input_size / 2**l, endpoints_num_filters],
endpoints[str(l)].shape.as_list())
@parameterized.parameters(
((128, 128), (128, 128)),
((128, 128), (256, 256)),
((640, 640), (896, 1664)),
)
def test_load_from_different_input_specs(self, input_size_1, input_size_2):
"""Test loading checkpoints with different input size."""
def build_spinenet(input_size):
tf.keras.backend.set_image_data_format('channels_last')
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size[0], input_size[1], 3])
model = spinenet.SpineNet(
input_specs=input_specs,
min_level=3,
max_level=7,
endpoints_num_filters=384,
resample_alpha=1.0,
block_repeats=2,
filter_size_scale=0.5)
return model
model_1 = build_spinenet(input_size_1)
model_2 = build_spinenet(input_size_2)
ckpt_1 = tf.train.Checkpoint(backbone=model_1)
ckpt_2 = tf.train.Checkpoint(backbone=model_2)
ckpt_path = self.get_temp_dir() + '/ckpt'
ckpt_1.write(ckpt_path)
ckpt_2.restore(ckpt_path).expect_partial()
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
min_level=3,
max_level=7,
endpoints_num_filters=256,
resample_alpha=0.5,
block_repeats=1,
filter_size_scale=1.0,
init_stochastic_depth_rate=0.2,
use_sync_bn=False,
activation='relu',
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
)
network = spinenet.SpineNet(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = spinenet.SpineNet.from_config(network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
@parameterized.parameters(
('relu', tf.nn.relu),
('swish', tf.nn.swish)
)
def test_activation(self, activation, activation_fn):
model = spinenet.SpineNet(activation=activation)
self.assertEqual(model._activation_fn, activation_fn)
def test_invalid_activation_raises_valurerror(self):
with self.assertRaises(ValueError):
spinenet.SpineNet(activation='invalid_activation_name')
if __name__ == '__main__':
tf.test.main()
| 4,724 | 32.75 | 79 | py |
models | models-master/official/vision/modeling/backbones/mobilenet_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for MobileNet."""
import itertools
import math
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.vision.modeling.backbones import mobilenet
class MobileNetTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
'MobileNetV1',
'MobileNetV2',
'MobileNetV3Large',
'MobileNetV3Small',
'MobileNetV3EdgeTPU',
'MobileNetMultiAVG',
'MobileNetMultiMAX',
'MobileNetMultiAVGSeg',
'MobileNetMultiMAXSeg',
'MobileNetV3SmallReducedFilters',
)
def test_serialize_deserialize(self, model_id):
# Create a network object that sets all of its config options.
kwargs = dict(
model_id=model_id,
filter_size_scale=1.0,
stochastic_depth_drop_rate=None,
use_sync_bn=False,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
norm_momentum=0.99,
norm_epsilon=0.001,
output_stride=None,
min_depth=8,
divisible_by=8,
regularize_depthwise=False,
finegrain_classification_mode=True
)
network = mobilenet.MobileNet(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = mobilenet.MobileNet.from_config(network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
@parameterized.parameters(
itertools.product(
[1, 3],
[
'MobileNetV1',
'MobileNetV2',
'MobileNetV3Large',
'MobileNetV3Small',
'MobileNetV3EdgeTPU',
'MobileNetMultiAVG',
'MobileNetMultiMAX',
'MobileNetMultiAVGSeg',
'MobileNetMultiMAXSeg',
'MobileNetV3SmallReducedFilters',
],
))
def test_input_specs(self, input_dim, model_id):
"""Test different input feature dimensions."""
tf.keras.backend.set_image_data_format('channels_last')
input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, input_dim])
network = mobilenet.MobileNet(model_id=model_id, input_specs=input_specs)
inputs = tf.keras.Input(shape=(128, 128, input_dim), batch_size=1)
_ = network(inputs)
@parameterized.parameters(
itertools.product(
[
'MobileNetV1',
'MobileNetV2',
'MobileNetV3Large',
'MobileNetV3Small',
'MobileNetV3EdgeTPU',
'MobileNetMultiAVG',
'MobileNetMultiMAX',
'MobileNetMultiAVGSeg',
'MobileNetV3SmallReducedFilters',
],
[32, 224],
))
def test_mobilenet_creation(self, model_id,
input_size):
"""Test creation of MobileNet family models."""
tf.keras.backend.set_image_data_format('channels_last')
mobilenet_layers = {
# The number of filters of layers having outputs been collected
# for filter_size_scale = 1.0
'MobileNetV1': [128, 256, 512, 1024],
'MobileNetV2': [24, 32, 96, 320],
'MobileNetV3Small': [16, 24, 48, 96],
'MobileNetV3Large': [24, 40, 112, 160],
'MobileNetV3EdgeTPU': [32, 48, 96, 192],
'MobileNetMultiMAX': [32, 64, 128, 160],
'MobileNetMultiAVG': [32, 64, 160, 192],
'MobileNetMultiAVGSeg': [32, 64, 160, 96],
'MobileNetMultiMAXSeg': [32, 64, 128, 96],
'MobileNetV3SmallReducedFilters': [16, 24, 48, 48],
}
network = mobilenet.MobileNet(model_id=model_id,
filter_size_scale=1.0)
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
endpoints = network(inputs)
for idx, num_filter in enumerate(mobilenet_layers[model_id]):
self.assertAllEqual(
[1, input_size / 2 ** (idx+2), input_size / 2 ** (idx+2), num_filter],
endpoints[str(idx+2)].shape.as_list())
@parameterized.parameters(
itertools.product(
[
'MobileNetV1',
'MobileNetV2',
'MobileNetV3Large',
'MobileNetV3Small',
'MobileNetV3EdgeTPU',
'MobileNetMultiAVG',
'MobileNetMultiMAX',
'MobileNetMultiAVGSeg',
'MobileNetMultiMAXSeg',
'MobileNetV3SmallReducedFilters',
],
[32, 224],
))
def test_mobilenet_intermediate_layers(self, model_id, input_size):
tf.keras.backend.set_image_data_format('channels_last')
# Tests the mobilenet intermediate depthwise layers.
mobilenet_depthwise_layers = {
# The number of filters of depthwise layers having outputs been
# collected for filter_size_scale = 1.0. Only tests the mobilenet
# model with inverted bottleneck block using depthwise which excludes
# MobileNetV1.
'MobileNetV1': [],
'MobileNetV2': [144, 192, 576, 960],
'MobileNetV3Small': [16, 88, 144, 576],
'MobileNetV3Large': [72, 120, 672, 960],
'MobileNetV3EdgeTPU': [None, None, 384, 1280],
'MobileNetMultiMAX': [96, 128, 384, 640],
'MobileNetMultiAVG': [64, 192, 640, 768],
'MobileNetMultiAVGSeg': [64, 192, 640, 384],
'MobileNetMultiMAXSeg': [96, 128, 384, 320],
'MobileNetV3SmallReducedFilters': [16, 88, 144, 288],
}
network = mobilenet.MobileNet(model_id=model_id,
filter_size_scale=1.0,
output_intermediate_endpoints=True)
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
endpoints = network(inputs)
for idx, num_filter in enumerate(mobilenet_depthwise_layers[model_id]):
# Not using depthwise conv in this layer.
if num_filter is None:
continue
self.assertAllEqual(
[1, input_size / 2**(idx + 2), input_size / 2**(idx + 2), num_filter],
endpoints[str(idx + 2) + '/depthwise'].shape.as_list())
@parameterized.parameters(
itertools.product(
[
'MobileNetV1',
'MobileNetV2',
'MobileNetV3Large',
'MobileNetV3Small',
'MobileNetV3EdgeTPU',
'MobileNetMultiAVG',
'MobileNetMultiMAX',
'MobileNetMultiMAX',
'MobileNetMultiAVGSeg',
'MobileNetMultiMAXSeg',
'MobileNetV3SmallReducedFilters',
],
[1.0, 0.75],
))
def test_mobilenet_scaling(self, model_id,
filter_size_scale):
"""Test for creation of a MobileNet classifier."""
mobilenet_params = {
('MobileNetV1', 1.0): 3228864,
('MobileNetV1', 0.75): 1832976,
('MobileNetV2', 1.0): 2257984,
('MobileNetV2', 0.75): 1382064,
('MobileNetV3Large', 1.0): 4226432,
('MobileNetV3Large', 0.75): 2731616,
('MobileNetV3Small', 1.0): 1529968,
('MobileNetV3Small', 0.75): 1026552,
('MobileNetV3EdgeTPU', 1.0): 2849312,
('MobileNetV3EdgeTPU', 0.75): 1737288,
('MobileNetMultiAVG', 1.0): 3704416,
('MobileNetMultiAVG', 0.75): 2349704,
('MobileNetMultiMAX', 1.0): 3174560,
('MobileNetMultiMAX', 0.75): 2045816,
('MobileNetMultiAVGSeg', 1.0): 2239840,
('MobileNetMultiAVGSeg', 0.75): 1395272,
('MobileNetMultiMAXSeg', 1.0): 1929088,
('MobileNetMultiMAXSeg', 0.75): 1216544,
('MobileNetV3SmallReducedFilters', 1.0): 694880,
('MobileNetV3SmallReducedFilters', 0.75): 505960,
}
input_size = 224
network = mobilenet.MobileNet(model_id=model_id,
filter_size_scale=filter_size_scale)
self.assertEqual(network.count_params(),
mobilenet_params[(model_id, filter_size_scale)])
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
_ = network(inputs)
@parameterized.parameters(
itertools.product(
[
'MobileNetV1',
'MobileNetV2',
'MobileNetV3Large',
'MobileNetV3Small',
'MobileNetV3EdgeTPU',
'MobileNetMultiAVG',
'MobileNetMultiMAX',
'MobileNetMultiAVGSeg',
'MobileNetMultiMAXSeg',
'MobileNetV3SmallReducedFilters',
],
[8, 16, 32],
))
def test_mobilenet_output_stride(self, model_id, output_stride):
"""Test for creation of a MobileNet with different output strides."""
tf.keras.backend.set_image_data_format('channels_last')
mobilenet_layers = {
# The number of filters of the layers outputs been collected
# for filter_size_scale = 1.0.
'MobileNetV1': 1024,
'MobileNetV2': 320,
'MobileNetV3Small': 96,
'MobileNetV3Large': 160,
'MobileNetV3EdgeTPU': 192,
'MobileNetMultiMAX': 160,
'MobileNetMultiAVG': 192,
'MobileNetMultiAVGSeg': 448,
'MobileNetMultiMAXSeg': 448,
'MobileNetV3SmallReducedFilters': 48,
}
network = mobilenet.MobileNet(
model_id=model_id, filter_size_scale=1.0, output_stride=output_stride)
level = int(math.log2(output_stride))
input_size = 224
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
endpoints = network(inputs)
num_filter = mobilenet_layers[model_id]
self.assertAllEqual(
[1, input_size / output_stride, input_size / output_stride, num_filter],
endpoints[str(level)].shape.as_list())
if __name__ == '__main__':
tf.test.main()
| 10,596 | 34.441472 | 80 | py |
models | models-master/official/vision/modeling/backbones/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Backbones package definition."""
from official.vision.modeling.backbones.efficientnet import EfficientNet
from official.vision.modeling.backbones.mobiledet import MobileDet
from official.vision.modeling.backbones.mobilenet import MobileNet
from official.vision.modeling.backbones.resnet import ResNet
from official.vision.modeling.backbones.resnet_3d import ResNet3D
from official.vision.modeling.backbones.resnet_deeplab import DilatedResNet
from official.vision.modeling.backbones.revnet import RevNet
from official.vision.modeling.backbones.spinenet import SpineNet
from official.vision.modeling.backbones.spinenet_mobile import SpineNetMobile
from official.vision.modeling.backbones.vit import VisionTransformer
| 1,329 | 48.259259 | 77 | py |
models | models-master/official/vision/modeling/backbones/resnet_3d.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of 3D Residual Networks."""
from typing import Callable, List, Tuple, Optional
# Import libraries
import tensorflow as tf
from official.modeling import hyperparams
from official.modeling import tf_utils
from official.vision.modeling.backbones import factory
from official.vision.modeling.layers import nn_blocks_3d
from official.vision.modeling.layers import nn_layers
layers = tf.keras.layers
RESNET_SPECS = {
50: [
('bottleneck3d', 64, 3),
('bottleneck3d', 128, 4),
('bottleneck3d', 256, 6),
('bottleneck3d', 512, 3),
],
101: [
('bottleneck3d', 64, 3),
('bottleneck3d', 128, 4),
('bottleneck3d', 256, 23),
('bottleneck3d', 512, 3),
],
152: [
('bottleneck3d', 64, 3),
('bottleneck3d', 128, 8),
('bottleneck3d', 256, 36),
('bottleneck3d', 512, 3),
],
200: [
('bottleneck3d', 64, 3),
('bottleneck3d', 128, 24),
('bottleneck3d', 256, 36),
('bottleneck3d', 512, 3),
],
270: [
('bottleneck3d', 64, 4),
('bottleneck3d', 128, 29),
('bottleneck3d', 256, 53),
('bottleneck3d', 512, 4),
],
300: [
('bottleneck3d', 64, 4),
('bottleneck3d', 128, 36),
('bottleneck3d', 256, 54),
('bottleneck3d', 512, 4),
],
350: [
('bottleneck3d', 64, 4),
('bottleneck3d', 128, 36),
('bottleneck3d', 256, 72),
('bottleneck3d', 512, 4),
],
}
@tf.keras.utils.register_keras_serializable(package='Vision')
class ResNet3D(tf.keras.Model):
"""Creates a 3D ResNet family model."""
def __init__(
self,
model_id: int,
temporal_strides: List[int],
temporal_kernel_sizes: List[Tuple[int]],
use_self_gating: Optional[List[int]] = None,
input_specs: tf.keras.layers.InputSpec = layers.InputSpec(
shape=[None, None, None, None, 3]),
stem_type: str = 'v0',
stem_conv_temporal_kernel_size: int = 5,
stem_conv_temporal_stride: int = 2,
stem_pool_temporal_stride: int = 2,
init_stochastic_depth_rate: float = 0.0,
activation: str = 'relu',
se_ratio: Optional[float] = None,
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_initializer: str = 'VarianceScaling',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initializes a 3D ResNet model.
Args:
model_id: An `int` of depth of ResNet backbone model.
temporal_strides: A list of integers that specifies the temporal strides
for all 3d blocks.
temporal_kernel_sizes: A list of tuples that specifies the temporal kernel
sizes for all 3d blocks in different block groups.
use_self_gating: A list of booleans to specify applying self-gating module
or not in each block group. If None, self-gating is not applied.
input_specs: A `tf.keras.layers.InputSpec` of the input tensor.
stem_type: A `str` of stem type of ResNet. Default to `v0`. If set to
`v1`, use ResNet-D type stem (https://arxiv.org/abs/1812.01187).
stem_conv_temporal_kernel_size: An `int` of temporal kernel size for the
first conv layer.
stem_conv_temporal_stride: An `int` of temporal stride for the first conv
layer.
stem_pool_temporal_stride: An `int` of temporal stride for the first pool
layer.
init_stochastic_depth_rate: A `float` of initial stochastic depth rate.
activation: A `str` of name of the activation function.
se_ratio: A `float` or None. Ratio of the Squeeze-and-Excitation layer.
use_sync_bn: If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_initializer: A str for kernel initializer of convolutional layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
Default to None.
**kwargs: Additional keyword arguments to be passed.
"""
self._model_id = model_id
self._temporal_strides = temporal_strides
self._temporal_kernel_sizes = temporal_kernel_sizes
self._input_specs = input_specs
self._stem_type = stem_type
self._stem_conv_temporal_kernel_size = stem_conv_temporal_kernel_size
self._stem_conv_temporal_stride = stem_conv_temporal_stride
self._stem_pool_temporal_stride = stem_pool_temporal_stride
self._use_self_gating = use_self_gating
self._se_ratio = se_ratio
self._init_stochastic_depth_rate = init_stochastic_depth_rate
self._use_sync_bn = use_sync_bn
self._activation = activation
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._norm = layers.BatchNormalization
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
# Build ResNet3D backbone.
inputs = tf.keras.Input(shape=input_specs.shape[1:])
endpoints = self._build_model(inputs)
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super(ResNet3D, self).__init__(inputs=inputs, outputs=endpoints, **kwargs)
def _build_model(self, inputs):
"""Builds model architecture.
Args:
inputs: the keras input spec.
Returns:
endpoints: A dictionary of backbone endpoint features.
"""
# Build stem.
x = self._build_stem(inputs, stem_type=self._stem_type)
temporal_kernel_size = 1 if self._stem_pool_temporal_stride == 1 else 3
x = layers.MaxPool3D(
pool_size=[temporal_kernel_size, 3, 3],
strides=[self._stem_pool_temporal_stride, 2, 2],
padding='same')(x)
# Build intermediate blocks and endpoints.
resnet_specs = RESNET_SPECS[self._model_id]
if len(self._temporal_strides) != len(resnet_specs) or len(
self._temporal_kernel_sizes) != len(resnet_specs):
raise ValueError(
'Number of blocks in temporal specs should equal to resnet_specs.')
endpoints = {}
for i, resnet_spec in enumerate(resnet_specs):
if resnet_spec[0] == 'bottleneck3d':
block_fn = nn_blocks_3d.BottleneckBlock3D
else:
raise ValueError('Block fn `{}` is not supported.'.format(
resnet_spec[0]))
use_self_gating = (
self._use_self_gating[i] if self._use_self_gating else False)
x = self._block_group(
inputs=x,
filters=resnet_spec[1],
temporal_kernel_sizes=self._temporal_kernel_sizes[i],
temporal_strides=self._temporal_strides[i],
spatial_strides=(1 if i == 0 else 2),
block_fn=block_fn,
block_repeats=resnet_spec[2],
stochastic_depth_drop_rate=nn_layers.get_stochastic_depth_rate(
self._init_stochastic_depth_rate, i + 2, 5),
use_self_gating=use_self_gating,
name='block_group_l{}'.format(i + 2))
endpoints[str(i + 2)] = x
return endpoints
def _build_stem(self, inputs, stem_type):
"""Builds stem layer."""
# Build stem.
if stem_type == 'v0':
x = layers.Conv3D(
filters=64,
kernel_size=[self._stem_conv_temporal_kernel_size, 7, 7],
strides=[self._stem_conv_temporal_stride, 2, 2],
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
inputs)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn)(x)
x = tf_utils.get_activation(self._activation)(x)
elif stem_type == 'v1':
x = layers.Conv3D(
filters=32,
kernel_size=[self._stem_conv_temporal_kernel_size, 3, 3],
strides=[self._stem_conv_temporal_stride, 2, 2],
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
inputs)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn)(x)
x = tf_utils.get_activation(self._activation)(x)
x = layers.Conv3D(
filters=32,
kernel_size=[1, 3, 3],
strides=[1, 1, 1],
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
x)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn)(x)
x = tf_utils.get_activation(self._activation)(x)
x = layers.Conv3D(
filters=64,
kernel_size=[1, 3, 3],
strides=[1, 1, 1],
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
x)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn)(x)
x = tf_utils.get_activation(self._activation)(x)
else:
raise ValueError(f'Stem type {stem_type} not supported.')
return x
def _block_group(self,
inputs: tf.Tensor,
filters: int,
temporal_kernel_sizes: Tuple[int],
temporal_strides: int,
spatial_strides: int,
block_fn: Callable[
...,
tf.keras.layers.Layer] = nn_blocks_3d.BottleneckBlock3D,
block_repeats: int = 1,
stochastic_depth_drop_rate: float = 0.0,
use_self_gating: bool = False,
name: str = 'block_group'):
"""Creates one group of blocks for the ResNet3D model.
Args:
inputs: A `tf.Tensor` of size `[batch, channels, height, width]`.
filters: An `int` of number of filters for the first convolution of the
layer.
temporal_kernel_sizes: A tuple that specifies the temporal kernel sizes
for each block in the current group.
temporal_strides: An `int` of temporal strides for the first convolution
in this group.
spatial_strides: An `int` stride to use for the first convolution of the
layer. If greater than 1, this layer will downsample the input.
block_fn: Either `nn_blocks.ResidualBlock` or `nn_blocks.BottleneckBlock`.
block_repeats: An `int` of number of blocks contained in the layer.
stochastic_depth_drop_rate: A `float` of drop rate of the current block
group.
use_self_gating: A `bool` that specifies whether to apply self-gating
module or not.
name: A `str` name for the block.
Returns:
The output `tf.Tensor` of the block layer.
"""
if len(temporal_kernel_sizes) != block_repeats:
raise ValueError(
'Number of elements in `temporal_kernel_sizes` must equal to `block_repeats`.'
)
# Only apply self-gating module in the last block.
use_self_gating_list = [False] * (block_repeats - 1) + [use_self_gating]
x = block_fn(
filters=filters,
temporal_kernel_size=temporal_kernel_sizes[0],
temporal_strides=temporal_strides,
spatial_strides=spatial_strides,
stochastic_depth_drop_rate=stochastic_depth_drop_rate,
use_self_gating=use_self_gating_list[0],
se_ratio=self._se_ratio,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
inputs)
for i in range(1, block_repeats):
x = block_fn(
filters=filters,
temporal_kernel_size=temporal_kernel_sizes[i],
temporal_strides=1,
spatial_strides=1,
stochastic_depth_drop_rate=stochastic_depth_drop_rate,
use_self_gating=use_self_gating_list[i],
se_ratio=self._se_ratio,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
x)
return tf.identity(x, name=name)
def get_config(self):
config_dict = {
'model_id': self._model_id,
'temporal_strides': self._temporal_strides,
'temporal_kernel_sizes': self._temporal_kernel_sizes,
'stem_type': self._stem_type,
'stem_conv_temporal_kernel_size': self._stem_conv_temporal_kernel_size,
'stem_conv_temporal_stride': self._stem_conv_temporal_stride,
'stem_pool_temporal_stride': self._stem_pool_temporal_stride,
'use_self_gating': self._use_self_gating,
'se_ratio': self._se_ratio,
'init_stochastic_depth_rate': self._init_stochastic_depth_rate,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
}
return config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_backbone_builder('resnet_3d')
def build_resnet3d(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None
) -> tf.keras.Model:
"""Builds ResNet 3d backbone from a config."""
backbone_cfg = backbone_config.get()
# Flatten configs before passing to the backbone.
temporal_strides = []
temporal_kernel_sizes = []
use_self_gating = []
for block_spec in backbone_cfg.block_specs:
temporal_strides.append(block_spec.temporal_strides)
temporal_kernel_sizes.append(block_spec.temporal_kernel_sizes)
use_self_gating.append(block_spec.use_self_gating)
return ResNet3D(
model_id=backbone_cfg.model_id,
temporal_strides=temporal_strides,
temporal_kernel_sizes=temporal_kernel_sizes,
use_self_gating=use_self_gating,
input_specs=input_specs,
stem_type=backbone_cfg.stem_type,
stem_conv_temporal_kernel_size=backbone_cfg
.stem_conv_temporal_kernel_size,
stem_conv_temporal_stride=backbone_cfg.stem_conv_temporal_stride,
stem_pool_temporal_stride=backbone_cfg.stem_pool_temporal_stride,
init_stochastic_depth_rate=backbone_cfg.stochastic_depth_drop_rate,
se_ratio=backbone_cfg.se_ratio,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
@factory.register_backbone_builder('resnet_3d_rs')
def build_resnet3d_rs(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None
) -> tf.keras.Model:
"""Builds ResNet-3D-RS backbone from a config."""
backbone_cfg = backbone_config.get()
# Flatten configs before passing to the backbone.
temporal_strides = []
temporal_kernel_sizes = []
use_self_gating = []
for i, block_spec in enumerate(backbone_cfg.block_specs):
temporal_strides.append(block_spec.temporal_strides)
use_self_gating.append(block_spec.use_self_gating)
block_repeats_i = RESNET_SPECS[backbone_cfg.model_id][i][-1]
temporal_kernel_sizes.append(list(block_spec.temporal_kernel_sizes) *
block_repeats_i)
return ResNet3D(
model_id=backbone_cfg.model_id,
temporal_strides=temporal_strides,
temporal_kernel_sizes=temporal_kernel_sizes,
use_self_gating=use_self_gating,
input_specs=input_specs,
stem_type=backbone_cfg.stem_type,
stem_conv_temporal_kernel_size=backbone_cfg
.stem_conv_temporal_kernel_size,
stem_conv_temporal_stride=backbone_cfg.stem_conv_temporal_stride,
stem_pool_temporal_stride=backbone_cfg.stem_pool_temporal_stride,
init_stochastic_depth_rate=backbone_cfg.stochastic_depth_drop_rate,
se_ratio=backbone_cfg.se_ratio,
activation=norm_activation_config.activation,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
| 18,647 | 37.931106 | 88 | py |
models | models-master/official/vision/modeling/backbones/mobilenet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of MobileNet Networks."""
import dataclasses
from typing import Optional, Dict, Any, Tuple
# Import libraries
import tensorflow as tf
from official.modeling import hyperparams
from official.modeling import tf_utils
from official.vision.modeling.backbones import factory
from official.vision.modeling.layers import nn_blocks
from official.vision.modeling.layers import nn_layers
layers = tf.keras.layers
# pylint: disable=pointless-string-statement
@tf.keras.utils.register_keras_serializable(package='Vision')
class Conv2DBNBlock(tf.keras.layers.Layer):
"""A convolution block with batch normalization."""
def __init__(
self,
filters: int,
kernel_size: int = 3,
strides: int = 1,
use_bias: bool = False,
use_explicit_padding: bool = False,
activation: str = 'relu6',
kernel_initializer: str = 'VarianceScaling',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
use_normalization: bool = True,
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
**kwargs):
"""A convolution block with batch normalization.
Args:
filters: An `int` number of filters for the first two convolutions. Note
that the third and final convolution will use 4 times as many filters.
kernel_size: An `int` specifying the height and width of the 2D
convolution window.
strides: An `int` of block stride. If greater than 1, this block will
ultimately downsample the input.
use_bias: If True, use bias in the convolution layer.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
activation: A `str` name of the activation function.
kernel_initializer: A `str` for kernel initializer of convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
Default to None.
use_normalization: If True, use batch normalization.
use_sync_bn: If True, use synchronized batch normalization.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
**kwargs: Additional keyword arguments to be passed.
"""
super(Conv2DBNBlock, self).__init__(**kwargs)
self._filters = filters
self._kernel_size = kernel_size
self._strides = strides
self._activation = activation
self._use_bias = use_bias
self._use_explicit_padding = use_explicit_padding
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._use_normalization = use_normalization
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._norm = tf.keras.layers.BatchNormalization
if use_explicit_padding and kernel_size > 1:
self._padding = 'valid'
else:
self._padding = 'same'
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
def get_config(self):
config = {
'filters': self._filters,
'strides': self._strides,
'kernel_size': self._kernel_size,
'use_bias': self._use_bias,
'use_explicit_padding': self._use_explicit_padding,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'use_normalization': self._use_normalization,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon
}
base_config = super(Conv2DBNBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
if self._use_explicit_padding and self._kernel_size > 1:
padding_size = nn_layers.get_padding_for_kernel_size(self._kernel_size)
self._pad = tf.keras.layers.ZeroPadding2D(padding_size)
self._conv0 = tf.keras.layers.Conv2D(
filters=self._filters,
kernel_size=self._kernel_size,
strides=self._strides,
padding=self._padding,
use_bias=self._use_bias,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
if self._use_normalization:
self._norm0 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon,
synchronized=self._use_sync_bn)
self._activation_layer = tf_utils.get_activation(
self._activation, use_keras_layer=True)
super(Conv2DBNBlock, self).build(input_shape)
def call(self, inputs, training=None):
if self._use_explicit_padding and self._kernel_size > 1:
inputs = self._pad(inputs)
x = self._conv0(inputs)
if self._use_normalization:
x = self._norm0(x)
return self._activation_layer(x)
"""
Architecture: https://arxiv.org/abs/1704.04861.
"MobileNets: Efficient Convolutional Neural Networks for Mobile Vision
Applications" Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko,
Weijun Wang, Tobias Weyand, Marco Andreetto, Hartwig Adam
"""
MNV1_BLOCK_SPECS = {
'spec_name': 'MobileNetV1',
'block_spec_schema': ['block_fn', 'kernel_size', 'strides',
'filters', 'is_output'],
'block_specs': [
('convbn', 3, 2, 32, False),
('depsepconv', 3, 1, 64, False),
('depsepconv', 3, 2, 128, False),
('depsepconv', 3, 1, 128, True),
('depsepconv', 3, 2, 256, False),
('depsepconv', 3, 1, 256, True),
('depsepconv', 3, 2, 512, False),
('depsepconv', 3, 1, 512, False),
('depsepconv', 3, 1, 512, False),
('depsepconv', 3, 1, 512, False),
('depsepconv', 3, 1, 512, False),
('depsepconv', 3, 1, 512, True),
('depsepconv', 3, 2, 1024, False),
('depsepconv', 3, 1, 1024, True),
]
}
"""
Architecture: https://arxiv.org/abs/1801.04381
"MobileNetV2: Inverted Residuals and Linear Bottlenecks"
Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen
"""
MNV2_BLOCK_SPECS = {
'spec_name': 'MobileNetV2',
'block_spec_schema': ['block_fn', 'kernel_size', 'strides', 'filters',
'expand_ratio', 'is_output'],
'block_specs': [
('convbn', 3, 2, 32, None, False),
('invertedbottleneck', 3, 1, 16, 1., False),
('invertedbottleneck', 3, 2, 24, 6., False),
('invertedbottleneck', 3, 1, 24, 6., True),
('invertedbottleneck', 3, 2, 32, 6., False),
('invertedbottleneck', 3, 1, 32, 6., False),
('invertedbottleneck', 3, 1, 32, 6., True),
('invertedbottleneck', 3, 2, 64, 6., False),
('invertedbottleneck', 3, 1, 64, 6., False),
('invertedbottleneck', 3, 1, 64, 6., False),
('invertedbottleneck', 3, 1, 64, 6., False),
('invertedbottleneck', 3, 1, 96, 6., False),
('invertedbottleneck', 3, 1, 96, 6., False),
('invertedbottleneck', 3, 1, 96, 6., True),
('invertedbottleneck', 3, 2, 160, 6., False),
('invertedbottleneck', 3, 1, 160, 6., False),
('invertedbottleneck', 3, 1, 160, 6., False),
('invertedbottleneck', 3, 1, 320, 6., True),
('convbn', 1, 1, 1280, None, False),
]
}
"""
Architecture: https://arxiv.org/abs/1905.02244
"Searching for MobileNetV3"
Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,
Weijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V. Le, Hartwig Adam
"""
MNV3Large_BLOCK_SPECS = {
'spec_name': 'MobileNetV3Large',
'block_spec_schema': ['block_fn', 'kernel_size', 'strides', 'filters',
'activation', 'se_ratio', 'expand_ratio',
'use_normalization', 'use_bias', 'is_output'],
'block_specs': [
('convbn', 3, 2, 16,
'hard_swish', None, None, True, False, False),
('invertedbottleneck', 3, 1, 16,
'relu', None, 1., None, False, False),
('invertedbottleneck', 3, 2, 24,
'relu', None, 4., None, False, False),
('invertedbottleneck', 3, 1, 24,
'relu', None, 3., None, False, True),
('invertedbottleneck', 5, 2, 40,
'relu', 0.25, 3., None, False, False),
('invertedbottleneck', 5, 1, 40,
'relu', 0.25, 3., None, False, False),
('invertedbottleneck', 5, 1, 40,
'relu', 0.25, 3., None, False, True),
('invertedbottleneck', 3, 2, 80,
'hard_swish', None, 6., None, False, False),
('invertedbottleneck', 3, 1, 80,
'hard_swish', None, 2.5, None, False, False),
('invertedbottleneck', 3, 1, 80,
'hard_swish', None, 2.3, None, False, False),
('invertedbottleneck', 3, 1, 80,
'hard_swish', None, 2.3, None, False, False),
('invertedbottleneck', 3, 1, 112,
'hard_swish', 0.25, 6., None, False, False),
('invertedbottleneck', 3, 1, 112,
'hard_swish', 0.25, 6., None, False, True),
('invertedbottleneck', 5, 2, 160,
'hard_swish', 0.25, 6., None, False, False),
('invertedbottleneck', 5, 1, 160,
'hard_swish', 0.25, 6., None, False, False),
('invertedbottleneck', 5, 1, 160,
'hard_swish', 0.25, 6., None, False, True),
('convbn', 1, 1, 960,
'hard_swish', None, None, True, False, False),
('gpooling', None, None, None,
None, None, None, None, None, False),
('convbn', 1, 1, 1280,
'hard_swish', None, None, False, True, False),
]
}
MNV3Small_BLOCK_SPECS = {
'spec_name': 'MobileNetV3Small',
'block_spec_schema': ['block_fn', 'kernel_size', 'strides', 'filters',
'activation', 'se_ratio', 'expand_ratio',
'use_normalization', 'use_bias', 'is_output'],
'block_specs': [
('convbn', 3, 2, 16,
'hard_swish', None, None, True, False, False),
('invertedbottleneck', 3, 2, 16,
'relu', 0.25, 1, None, False, True),
('invertedbottleneck', 3, 2, 24,
'relu', None, 72. / 16, None, False, False),
('invertedbottleneck', 3, 1, 24,
'relu', None, 88. / 24, None, False, True),
('invertedbottleneck', 5, 2, 40,
'hard_swish', 0.25, 4., None, False, False),
('invertedbottleneck', 5, 1, 40,
'hard_swish', 0.25, 6., None, False, False),
('invertedbottleneck', 5, 1, 40,
'hard_swish', 0.25, 6., None, False, False),
('invertedbottleneck', 5, 1, 48,
'hard_swish', 0.25, 3., None, False, False),
('invertedbottleneck', 5, 1, 48,
'hard_swish', 0.25, 3., None, False, True),
('invertedbottleneck', 5, 2, 96,
'hard_swish', 0.25, 6., None, False, False),
('invertedbottleneck', 5, 1, 96,
'hard_swish', 0.25, 6., None, False, False),
('invertedbottleneck', 5, 1, 96,
'hard_swish', 0.25, 6., None, False, True),
('convbn', 1, 1, 576,
'hard_swish', None, None, True, False, False),
('gpooling', None, None, None,
None, None, None, None, None, False),
('convbn', 1, 1, 1024,
'hard_swish', None, None, False, True, False),
]
}
"""
The EdgeTPU version is taken from
github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v3.py
"""
MNV3EdgeTPU_BLOCK_SPECS = {
'spec_name': 'MobileNetV3EdgeTPU',
'block_spec_schema': ['block_fn', 'kernel_size', 'strides', 'filters',
'activation', 'se_ratio', 'expand_ratio',
'use_residual', 'use_depthwise', 'is_output'],
'block_specs': [
('convbn', 3, 2, 32, 'relu', None, None, None, None, False),
('invertedbottleneck', 3, 1, 16, 'relu', None, 1., True, False, False),
('invertedbottleneck', 3, 2, 32, 'relu', None, 8., True, False, False),
('invertedbottleneck', 3, 1, 32, 'relu', None, 4., True, False, False),
('invertedbottleneck', 3, 1, 32, 'relu', None, 4., True, False, False),
('invertedbottleneck', 3, 1, 32, 'relu', None, 4., True, False, True),
('invertedbottleneck', 3, 2, 48, 'relu', None, 8., True, False, False),
('invertedbottleneck', 3, 1, 48, 'relu', None, 4., True, False, False),
('invertedbottleneck', 3, 1, 48, 'relu', None, 4., True, False, False),
('invertedbottleneck', 3, 1, 48, 'relu', None, 4., True, False, True),
('invertedbottleneck', 3, 2, 96, 'relu', None, 8., True, True, False),
('invertedbottleneck', 3, 1, 96, 'relu', None, 4., True, True, False),
('invertedbottleneck', 3, 1, 96, 'relu', None, 4., True, True, False),
('invertedbottleneck', 3, 1, 96, 'relu', None, 4., True, True, False),
('invertedbottleneck', 3, 1, 96, 'relu', None, 8., False, True, False),
('invertedbottleneck', 3, 1, 96, 'relu', None, 4., True, True, False),
('invertedbottleneck', 3, 1, 96, 'relu', None, 4., True, True, False),
('invertedbottleneck', 3, 1, 96, 'relu', None, 4., True, True, True),
('invertedbottleneck', 5, 2, 160, 'relu', None, 8., True, True, False),
('invertedbottleneck', 5, 1, 160, 'relu', None, 4., True, True, False),
('invertedbottleneck', 5, 1, 160, 'relu', None, 4., True, True, False),
('invertedbottleneck', 5, 1, 160, 'relu', None, 4., True, True, False),
('invertedbottleneck', 3, 1, 192, 'relu', None, 8., True, True, True),
('convbn', 1, 1, 1280, 'relu', None, None, None, None, False),
]
}
"""
Architecture: https://arxiv.org/pdf/2008.08178.pdf
"Discovering Multi-Hardware Mobile Models via Architecture Search"
Grace Chu, Okan Arikan, Gabriel Bender, Weijun Wang,
Achille Brighton, Pieter-Jan Kindermans, Hanxiao Liu,
Berkin Akin, Suyog Gupta, and Andrew Howard
"""
MNMultiMAX_BLOCK_SPECS = {
'spec_name': 'MobileNetMultiMAX',
'block_spec_schema': [
'block_fn', 'kernel_size', 'strides', 'filters', 'activation',
'expand_ratio', 'use_normalization', 'use_bias', 'is_output'
],
'block_specs': [
('convbn', 3, 2, 32, 'relu', None, True, False, False),
('invertedbottleneck', 3, 2, 32, 'relu', 3., None, False, True),
('invertedbottleneck', 5, 2, 64, 'relu', 6., None, False, False),
('invertedbottleneck', 3, 1, 64, 'relu', 2., None, False, False),
('invertedbottleneck', 3, 1, 64, 'relu', 2., None, False, True),
('invertedbottleneck', 5, 2, 128, 'relu', 6., None, False, False),
('invertedbottleneck', 3, 1, 128, 'relu', 4., None, False, False),
('invertedbottleneck', 3, 1, 128, 'relu', 3., None, False, False),
('invertedbottleneck', 3, 1, 128, 'relu', 3., None, False, False),
('invertedbottleneck', 3, 1, 128, 'relu', 6., None, False, False),
('invertedbottleneck', 3, 1, 128, 'relu', 3., None, False, True),
('invertedbottleneck', 3, 2, 160, 'relu', 6., None, False, False),
('invertedbottleneck', 5, 1, 160, 'relu', 4., None, False, False),
('invertedbottleneck', 3, 1, 160, 'relu', 5., None, False, False),
('invertedbottleneck', 5, 1, 160, 'relu', 4., None, False, True),
('convbn', 1, 1, 960, 'relu', None, True, False, False),
('gpooling', None, None, None, None, None, None, None, False),
# Remove bias and add batch norm for the last layer to support QAT
# and achieve slightly better accuracy.
('convbn', 1, 1, 1280, 'relu', None, True, False, False),
]
}
MNMultiAVG_BLOCK_SPECS = {
'spec_name': 'MobileNetMultiAVG',
'block_spec_schema': [
'block_fn', 'kernel_size', 'strides', 'filters', 'activation',
'expand_ratio', 'use_normalization', 'use_bias', 'is_output'
],
'block_specs': [
('convbn', 3, 2, 32, 'relu', None, True, False, False),
('invertedbottleneck', 3, 2, 32, 'relu', 3., None, False, False),
('invertedbottleneck', 3, 1, 32, 'relu', 2., None, False, True),
('invertedbottleneck', 5, 2, 64, 'relu', 5., None, False, False),
('invertedbottleneck', 3, 1, 64, 'relu', 3., None, False, False),
('invertedbottleneck', 3, 1, 64, 'relu', 2., None, False, False),
('invertedbottleneck', 3, 1, 64, 'relu', 3., None, False, True),
('invertedbottleneck', 5, 2, 128, 'relu', 6., None, False, False),
('invertedbottleneck', 3, 1, 128, 'relu', 3., None, False, False),
('invertedbottleneck', 3, 1, 128, 'relu', 3., None, False, False),
('invertedbottleneck', 3, 1, 128, 'relu', 3., None, False, False),
('invertedbottleneck', 3, 1, 160, 'relu', 6., None, False, False),
('invertedbottleneck', 3, 1, 160, 'relu', 4., None, False, True),
('invertedbottleneck', 3, 2, 192, 'relu', 6., None, False, False),
('invertedbottleneck', 5, 1, 192, 'relu', 4., None, False, False),
('invertedbottleneck', 5, 1, 192, 'relu', 4., None, False, False),
('invertedbottleneck', 5, 1, 192, 'relu', 4., None, False, True),
('convbn', 1, 1, 960, 'relu', None, True, False, False),
('gpooling', None, None, None, None, None, None, None, False),
# Remove bias and add batch norm for the last layer to support QAT
# and achieve slightly better accuracy.
('convbn', 1, 1, 1280, 'relu', None, True, False, False),
]
}
# Similar to MobileNetMultiAVG and used for segmentation task.
# Reduced the filters by a factor of 2 in the last block.
MNMultiAVG_SEG_BLOCK_SPECS = {
'spec_name':
'MobileNetMultiAVGSeg',
'block_spec_schema': [
'block_fn', 'kernel_size', 'strides', 'filters', 'activation',
'expand_ratio', 'use_normalization', 'use_bias', 'is_output'
],
'block_specs': [
('convbn', 3, 2, 32, 'relu', None, True, False, False),
('invertedbottleneck', 3, 2, 32, 'relu', 3., True, False, False),
('invertedbottleneck', 3, 1, 32, 'relu', 2., True, False, True),
('invertedbottleneck', 5, 2, 64, 'relu', 5., True, False, False),
('invertedbottleneck', 3, 1, 64, 'relu', 3., True, False, False),
('invertedbottleneck', 3, 1, 64, 'relu', 2., True, False, False),
('invertedbottleneck', 3, 1, 64, 'relu', 3., True, False, True),
('invertedbottleneck', 5, 2, 128, 'relu', 6., True, False, False),
('invertedbottleneck', 3, 1, 128, 'relu', 3., True, False, False),
('invertedbottleneck', 3, 1, 128, 'relu', 3., True, False, False),
('invertedbottleneck', 3, 1, 128, 'relu', 3., True, False, False),
('invertedbottleneck', 3, 1, 160, 'relu', 6., True, False, False),
('invertedbottleneck', 3, 1, 160, 'relu', 4., True, False, True),
('invertedbottleneck', 3, 2, 192, 'relu', 6., True, False, False),
('invertedbottleneck', 5, 1, 96, 'relu', 2., True, False, False),
('invertedbottleneck', 5, 1, 96, 'relu', 4., True, False, False),
('invertedbottleneck', 5, 1, 96, 'relu', 4., True, False, True),
('convbn', 1, 1, 448, 'relu', None, True, False, True),
('gpooling', None, None, None, None, None, None, None, False),
# Remove bias and add batch norm for the last layer to support QAT
# and achieve slightly better accuracy.
('convbn', 1, 1, 1280, 'relu', None, True, False, False),
]
}
# Similar to MobileNetMultiMax and used for segmentation task.
# Reduced the filters by a factor of 2 in the last block.
MNMultiMAX_SEG_BLOCK_SPECS = {
'spec_name':
'MobileNetMultiMAXSeg',
'block_spec_schema': [
'block_fn', 'kernel_size', 'strides', 'filters', 'activation',
'expand_ratio', 'use_normalization', 'use_bias', 'is_output'
],
'block_specs': [
('convbn', 3, 2, 32, 'relu', None, True, False, False),
('invertedbottleneck', 3, 2, 32, 'relu', 3., True, False, True),
('invertedbottleneck', 5, 2, 64, 'relu', 6., True, False, False),
('invertedbottleneck', 3, 1, 64, 'relu', 2., True, False, False),
('invertedbottleneck', 3, 1, 64, 'relu', 2., True, False, True),
('invertedbottleneck', 5, 2, 128, 'relu', 6., True, False, False),
('invertedbottleneck', 3, 1, 128, 'relu', 4., True, False, False),
('invertedbottleneck', 3, 1, 128, 'relu', 3., True, False, False),
('invertedbottleneck', 3, 1, 128, 'relu', 3., True, False, False),
('invertedbottleneck', 3, 1, 128, 'relu', 6., True, False, False),
('invertedbottleneck', 3, 1, 128, 'relu', 3., True, False, True),
('invertedbottleneck', 3, 2, 160, 'relu', 6., True, False, False),
('invertedbottleneck', 5, 1, 96, 'relu', 2., True, False, False),
('invertedbottleneck', 3, 1, 96, 'relu', 4., True, False, False),
('invertedbottleneck', 5, 1, 96, 'relu', 320.0 / 96, True, False, True),
('convbn', 1, 1, 448, 'relu', None, True, False, True),
('gpooling', None, None, None, None, None, None, None, False),
# Remove bias and add batch norm for the last layer to support QAT
# and achieve slightly better accuracy.
('convbn', 1, 1, 1280, 'relu', None, True, False, False),
]
}
# A smaller MNV3Small, with reduced filters for the last few layers
MNV3SmallReducedFilters = {
'spec_name':
'MobilenetV3SmallReducedFilters',
'block_spec_schema': [
'block_fn', 'kernel_size', 'strides', 'filters', 'activation',
'se_ratio', 'expand_ratio', 'use_normalization', 'use_bias', 'is_output'
],
'block_specs': [
('convbn', 3, 2, 16, 'hard_swish', None, None, True, False, False),
('invertedbottleneck', 3, 2, 16, 'relu', 0.25, 1, None, False, True),
('invertedbottleneck', 3, 2, 24, 'relu', None, 72. / 16, None, False,
False),
('invertedbottleneck', 3, 1, 24, 'relu', None, 88. / 24, None, False,
True),
('invertedbottleneck', 5, 2, 40, 'hard_swish', 0.25, 4, None, False,
False),
('invertedbottleneck', 5, 1, 40, 'hard_swish', 0.25, 6, None, False,
False),
('invertedbottleneck', 5, 1, 40, 'hard_swish', 0.25, 6, None, False,
False),
('invertedbottleneck', 5, 1, 48, 'hard_swish', 0.25, 3, None, False,
False),
('invertedbottleneck', 5, 1, 48, 'hard_swish', 0.25, 3, None, False,
True),
# Layers below are different from MobileNetV3Small and have
# half as many filters
('invertedbottleneck', 5, 2, 48, 'hard_swish', 0.25, 3, None, False,
False),
('invertedbottleneck', 5, 1, 48, 'hard_swish', 0.25, 6, None, False,
False),
('invertedbottleneck', 5, 1, 48, 'hard_swish', 0.25, 6, None, False,
True),
('convbn', 1, 1, 288, 'hard_swish', None, None, True, False, False),
('gpooling', None, None, None, None, None, None, None, None, False),
('convbn', 1, 1, 1024, 'hard_swish', None, None, False, True, False),
]
}
SUPPORTED_SPECS_MAP = {
'MobileNetV1': MNV1_BLOCK_SPECS,
'MobileNetV2': MNV2_BLOCK_SPECS,
'MobileNetV3Large': MNV3Large_BLOCK_SPECS,
'MobileNetV3Small': MNV3Small_BLOCK_SPECS,
'MobileNetV3EdgeTPU': MNV3EdgeTPU_BLOCK_SPECS,
'MobileNetMultiMAX': MNMultiMAX_BLOCK_SPECS,
'MobileNetMultiAVG': MNMultiAVG_BLOCK_SPECS,
'MobileNetMultiAVGSeg': MNMultiAVG_SEG_BLOCK_SPECS,
'MobileNetMultiMAXSeg': MNMultiMAX_SEG_BLOCK_SPECS,
'MobileNetV3SmallReducedFilters': MNV3SmallReducedFilters,
}
@dataclasses.dataclass
class BlockSpec(hyperparams.Config):
"""A container class that specifies the block configuration for MobileNet."""
block_fn: str = 'convbn'
kernel_size: int = 3
strides: int = 1
filters: int = 32
use_bias: bool = False
use_normalization: bool = True
activation: str = 'relu6'
# Used for block type InvertedResConv.
expand_ratio: Optional[float] = 6.
# Used for block type InvertedResConv with SE.
se_ratio: Optional[float] = None
use_depthwise: bool = True
use_residual: bool = True
is_output: bool = True
def block_spec_decoder(
specs: Dict[Any, Any],
filter_size_scale: float,
# Set to 1 for mobilenetv1.
divisible_by: int = 8,
finegrain_classification_mode: bool = True):
"""Decodes specs for a block.
Args:
specs: A `dict` specification of block specs of a mobilenet version.
filter_size_scale: A `float` multiplier for the filter size for all
convolution ops. The value must be greater than zero. Typical usage will
be to set this value in (0, 1) to reduce the number of parameters or
computation cost of the model.
divisible_by: An `int` that ensures all inner dimensions are divisible by
this number.
finegrain_classification_mode: If True, the model will keep the last layer
large even for small multipliers, following
https://arxiv.org/abs/1801.04381.
Returns:
A list of `BlockSpec` that defines structure of the base network.
"""
spec_name = specs['spec_name']
block_spec_schema = specs['block_spec_schema']
block_specs = specs['block_specs']
if not block_specs:
raise ValueError(
'The block spec cannot be empty for {} !'.format(spec_name))
if len(block_specs[0]) != len(block_spec_schema):
raise ValueError('The block spec values {} do not match with '
'the schema {}'.format(block_specs[0], block_spec_schema))
decoded_specs = []
for s in block_specs:
kw_s = dict(zip(block_spec_schema, s))
decoded_specs.append(BlockSpec(**kw_s))
# This adjustment applies to V2 and V3
if (spec_name != 'MobileNetV1'
and finegrain_classification_mode
and filter_size_scale < 1.0):
decoded_specs[-1].filters /= filter_size_scale # pytype: disable=annotation-type-mismatch
for ds in decoded_specs:
if ds.filters:
ds.filters = nn_layers.round_filters(filters=ds.filters,
multiplier=filter_size_scale,
divisor=divisible_by,
min_depth=8)
return decoded_specs
@tf.keras.utils.register_keras_serializable(package='Vision')
class MobileNet(tf.keras.Model):
"""Creates a MobileNet family model."""
def __init__(
self,
model_id: str = 'MobileNetV2',
filter_size_scale: float = 1.0,
input_specs: tf.keras.layers.InputSpec = layers.InputSpec(
shape=[None, None, None, 3]),
# The followings are for hyper-parameter tuning.
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_initializer: str = 'VarianceScaling',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
# The followings should be kept the same most of the times.
output_stride: Optional[int] = None,
min_depth: int = 8,
# divisible is not used in MobileNetV1.
divisible_by: int = 8,
stochastic_depth_drop_rate: float = 0.0,
regularize_depthwise: bool = False,
use_sync_bn: bool = False,
# finegrain is not used in MobileNetV1.
finegrain_classification_mode: bool = True,
output_intermediate_endpoints: bool = False,
**kwargs):
"""Initializes a MobileNet model.
Args:
model_id: A `str` of MobileNet version. The supported values are
`MobileNetV1`, `MobileNetV2`, `MobileNetV3Large`, `MobileNetV3Small`,
`MobileNetV3EdgeTPU`, `MobileNetMultiMAX` and `MobileNetMultiAVG`.
filter_size_scale: A `float` of multiplier for the filters (number of
channels) for all convolution ops. The value must be greater than zero.
Typical usage will be to set this value in (0, 1) to reduce the number
of parameters or computation cost of the model.
input_specs: A `tf.keras.layers.InputSpec` of specs of the input tensor.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_initializer: A `str` for kernel initializer of convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
Default to None.
output_stride: An `int` that specifies the requested ratio of input to
output spatial resolution. If not None, then we invoke atrous
convolution if necessary to prevent the network from reducing the
spatial resolution of activation maps. Allowed values are 8 (accurate
fully convolutional mode), 16 (fast fully convolutional mode), 32
(classification mode).
min_depth: An `int` of minimum depth (number of channels) for all
convolution ops. Enforced when filter_size_scale < 1, and not an active
constraint when filter_size_scale >= 1.
divisible_by: An `int` that ensures all inner dimensions are divisible by
this number.
stochastic_depth_drop_rate: A `float` of drop rate for drop connect layer.
regularize_depthwise: If Ture, apply regularization on depthwise.
use_sync_bn: If True, use synchronized batch normalization.
finegrain_classification_mode: If True, the model will keep the last layer
large even for small multipliers, following
https://arxiv.org/abs/1801.04381.
output_intermediate_endpoints: A `bool` of whether or not output the
intermediate endpoints.
**kwargs: Additional keyword arguments to be passed.
"""
if model_id not in SUPPORTED_SPECS_MAP:
raise ValueError('The MobileNet version {} '
'is not supported'.format(model_id))
if filter_size_scale <= 0:
raise ValueError('filter_size_scale is not greater than zero.')
if output_stride is not None:
if model_id == 'MobileNetV1':
if output_stride not in [8, 16, 32]:
raise ValueError('Only allowed output_stride values are 8, 16, 32.')
else:
if output_stride == 0 or (output_stride > 1 and output_stride % 2):
raise ValueError('Output stride must be None, 1 or a multiple of 2.')
self._model_id = model_id
self._input_specs = input_specs
self._filter_size_scale = filter_size_scale
self._min_depth = min_depth
self._output_stride = output_stride
self._divisible_by = divisible_by
self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
self._regularize_depthwise = regularize_depthwise
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._finegrain_classification_mode = finegrain_classification_mode
self._output_intermediate_endpoints = output_intermediate_endpoints
inputs = tf.keras.Input(shape=input_specs.shape[1:])
block_specs = SUPPORTED_SPECS_MAP.get(model_id)
self._decoded_specs = block_spec_decoder(
specs=block_specs,
filter_size_scale=self._filter_size_scale,
divisible_by=self._get_divisible_by(),
finegrain_classification_mode=self._finegrain_classification_mode)
x, endpoints, next_endpoint_level = self._mobilenet_base(inputs=inputs)
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
# Don't include the final layer in `self._output_specs` to support decoders.
endpoints[str(next_endpoint_level)] = x
super(MobileNet, self).__init__(
inputs=inputs, outputs=endpoints, **kwargs)
def _get_divisible_by(self):
if self._model_id == 'MobileNetV1':
return 1
else:
return self._divisible_by
def _mobilenet_base(self,
inputs: tf.Tensor
) -> Tuple[tf.Tensor, Dict[str, tf.Tensor], int]:
"""Builds the base MobileNet architecture.
Args:
inputs: A `tf.Tensor` of shape `[batch_size, height, width, channels]`.
Returns:
A tuple of output Tensor and dictionary that collects endpoints.
"""
input_shape = inputs.get_shape().as_list()
if len(input_shape) != 4:
raise ValueError('Expected rank 4 input, was: %d' % len(input_shape))
# The current_stride variable keeps track of the output stride of the
# activations, i.e., the running product of convolution strides up to the
# current network layer. This allows us to invoke atrous convolution
# whenever applying the next convolution would result in the activations
# having output stride larger than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
net = inputs
endpoints = {}
endpoint_level = 2
for i, block_def in enumerate(self._decoded_specs):
block_name = 'block_group_{}_{}'.format(block_def.block_fn, i)
# A small catch for gpooling block with None strides
if not block_def.strides:
block_def.strides = 1
if (self._output_stride is not None and
current_stride == self._output_stride):
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
layer_stride = 1
layer_rate = rate
rate *= block_def.strides
else:
layer_stride = block_def.strides
layer_rate = 1
current_stride *= block_def.strides
intermediate_endpoints = {}
if block_def.block_fn == 'convbn':
net = Conv2DBNBlock(
filters=block_def.filters,
kernel_size=block_def.kernel_size,
strides=block_def.strides,
activation=block_def.activation,
use_bias=block_def.use_bias,
use_normalization=block_def.use_normalization,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon
)(net)
elif block_def.block_fn == 'depsepconv':
net = nn_blocks.DepthwiseSeparableConvBlock(
filters=block_def.filters,
kernel_size=block_def.kernel_size,
strides=layer_stride,
activation=block_def.activation,
dilation_rate=layer_rate,
regularize_depthwise=self._regularize_depthwise,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
)(net)
elif block_def.block_fn == 'invertedbottleneck':
use_rate = rate
if layer_rate > 1 and block_def.kernel_size != 1:
# We will apply atrous rate in the following cases:
# 1) When kernel_size is not in params, the operation then uses
# default kernel size 3x3.
# 2) When kernel_size is in params, and if the kernel_size is not
# equal to (1, 1) (there is no need to apply atrous convolution to
# any 1x1 convolution).
use_rate = layer_rate
in_filters = net.shape.as_list()[-1]
block = nn_blocks.InvertedBottleneckBlock(
in_filters=in_filters,
out_filters=block_def.filters,
kernel_size=block_def.kernel_size,
strides=layer_stride,
expand_ratio=block_def.expand_ratio,
se_ratio=block_def.se_ratio,
expand_se_in_filters=True,
se_gating_activation='hard_sigmoid',
activation=block_def.activation,
use_depthwise=block_def.use_depthwise,
use_residual=block_def.use_residual,
dilation_rate=use_rate,
regularize_depthwise=self._regularize_depthwise,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
stochastic_depth_drop_rate=self._stochastic_depth_drop_rate,
divisible_by=self._get_divisible_by(),
output_intermediate_endpoints=self._output_intermediate_endpoints,
)
if self._output_intermediate_endpoints:
net, intermediate_endpoints = block(net)
else:
net = block(net)
elif block_def.block_fn == 'gpooling':
net = layers.GlobalAveragePooling2D(keepdims=True)(net)
else:
raise ValueError('Unknown block type {} for layer {}'.format(
block_def.block_fn, i))
net = tf.keras.layers.Activation('linear', name=block_name)(net)
if block_def.is_output:
endpoints[str(endpoint_level)] = net
for key, tensor in intermediate_endpoints.items():
endpoints[str(endpoint_level) + '/' + key] = tensor
if current_stride != self._output_stride:
endpoint_level += 1
if str(endpoint_level) in endpoints:
endpoint_level += 1
return net, endpoints, endpoint_level
def get_config(self):
config_dict = {
'model_id': self._model_id,
'filter_size_scale': self._filter_size_scale,
'min_depth': self._min_depth,
'output_stride': self._output_stride,
'divisible_by': self._divisible_by,
'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,
'regularize_depthwise': self._regularize_depthwise,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'finegrain_classification_mode': self._finegrain_classification_mode,
}
return config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_backbone_builder('mobilenet')
def build_mobilenet(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None
) -> tf.keras.Model:
"""Builds MobileNet backbone from a config."""
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert backbone_type == 'mobilenet', (f'Inconsistent backbone type '
f'{backbone_type}')
return MobileNet(
model_id=backbone_cfg.model_id,
filter_size_scale=backbone_cfg.filter_size_scale,
input_specs=input_specs,
stochastic_depth_drop_rate=backbone_cfg.stochastic_depth_drop_rate,
output_stride=backbone_cfg.output_stride,
output_intermediate_endpoints=backbone_cfg.output_intermediate_endpoints,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
| 40,965 | 42.860814 | 94 | py |
models | models-master/official/vision/modeling/backbones/revnet_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for RevNet."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.vision.modeling.backbones import revnet
class RevNetTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(128, 56, 4),
(128, 104, 4),
)
def test_network_creation(self, input_size, model_id,
endpoint_filter_scale):
"""Test creation of RevNet family models."""
tf.keras.backend.set_image_data_format('channels_last')
network = revnet.RevNet(model_id=model_id)
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
endpoints = network(inputs)
network.summary()
self.assertAllEqual(
[1, input_size / 2**2, input_size / 2**2, 128 * endpoint_filter_scale],
endpoints['2'].shape.as_list())
self.assertAllEqual(
[1, input_size / 2**3, input_size / 2**3, 256 * endpoint_filter_scale],
endpoints['3'].shape.as_list())
self.assertAllEqual(
[1, input_size / 2**4, input_size / 2**4, 512 * endpoint_filter_scale],
endpoints['4'].shape.as_list())
self.assertAllEqual(
[1, input_size / 2**5, input_size / 2**5, 832 * endpoint_filter_scale],
endpoints['5'].shape.as_list())
@parameterized.parameters(1, 3, 4)
def test_input_specs(self, input_dim):
"""Test different input feature dimensions."""
tf.keras.backend.set_image_data_format('channels_last')
input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, input_dim])
network = revnet.RevNet(model_id=56, input_specs=input_specs)
inputs = tf.keras.Input(shape=(128, 128, input_dim), batch_size=1)
_ = network(inputs)
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
model_id=56,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
)
network = revnet.RevNet(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = revnet.RevNet.from_config(network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 3,215 | 33.956522 | 80 | py |
models | models-master/official/vision/modeling/backbones/spinenet_mobile_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SpineNet."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.vision.modeling.backbones import spinenet_mobile
class SpineNetMobileTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(128, 0.6, 1, 0.0, 24),
(128, 0.65, 1, 0.2, 40),
(256, 1.0, 1, 0.2, 48),
)
def test_network_creation(self, input_size, filter_size_scale, block_repeats,
se_ratio, endpoints_num_filters):
"""Test creation of SpineNet models."""
min_level = 3
max_level = 7
tf.keras.backend.set_image_data_format('channels_last')
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size, input_size, 3])
model = spinenet_mobile.SpineNetMobile(
input_specs=input_specs,
min_level=min_level,
max_level=max_level,
endpoints_num_filters=endpoints_num_filters,
resample_alpha=se_ratio,
block_repeats=block_repeats,
filter_size_scale=filter_size_scale,
init_stochastic_depth_rate=0.2,
)
inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)
endpoints = model(inputs)
for l in range(min_level, max_level + 1):
self.assertIn(str(l), endpoints.keys())
self.assertAllEqual(
[1, input_size / 2**l, input_size / 2**l, endpoints_num_filters],
endpoints[str(l)].shape.as_list())
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
min_level=3,
max_level=7,
endpoints_num_filters=256,
se_ratio=0.2,
expand_ratio=6,
block_repeats=1,
filter_size_scale=1.0,
init_stochastic_depth_rate=0.2,
use_sync_bn=False,
activation='relu',
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
use_keras_upsampling_2d=False,
)
network = spinenet_mobile.SpineNetMobile(**kwargs)
expected_config = dict(kwargs)
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = spinenet_mobile.SpineNetMobile.from_config(
network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == '__main__':
tf.test.main()
| 3,948 | 34.258929 | 80 | py |
models | models-master/official/vision/modeling/backbones/mobiledet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definitions of MobileDet Networks."""
import dataclasses
from typing import Any, Dict, Optional, Tuple, List
import tensorflow as tf
from official.modeling import hyperparams
from official.vision.modeling.backbones import factory
from official.vision.modeling.backbones import mobilenet
from official.vision.modeling.layers import nn_blocks
from official.vision.modeling.layers import nn_layers
layers = tf.keras.layers
# pylint: disable=pointless-string-statement
"""
Architecture: https://arxiv.org/abs/2004.14525.
"MobileDets: Searching for Object Detection Architectures for
Mobile Accelerators" Yunyang Xiong, Hanxiao Liu, Suyog Gupta, Berkin Akin,
Gabriel Bender, Yongzhe Wang, Pieter-Jan Kindermans, Mingxing Tan, Vikas Singh,
Bo Chen
Note that `round_down_protection` flag should be set to false for scaling
of the network.
"""
MD_CPU_BLOCK_SPECS = {
'spec_name': 'MobileDetCPU',
# [expand_ratio] is set to 1 and [use_residual] is set to false
# for inverted_bottleneck_no_expansion
# [se_ratio] is set to 0.25 for all inverted_bottleneck layers
# [activation] is set to 'hard_swish' for all applicable layers
'block_spec_schema': ['block_fn', 'kernel_size', 'strides', 'filters',
'activation', 'se_ratio', 'expand_ratio',
'use_residual', 'is_output'],
'block_specs': [
('convbn', 3, 2, 16, 'hard_swish', None, None, None, False),
# inverted_bottleneck_no_expansion
('invertedbottleneck', 3, 1, 8, 'hard_swish', 0.25, 1., False, True),
('invertedbottleneck', 3, 2, 16, 'hard_swish', 0.25, 4., False, True),
('invertedbottleneck', 3, 2, 32, 'hard_swish', 0.25, 8., False, False),
('invertedbottleneck', 3, 1, 32, 'hard_swish', 0.25, 4., True, False),
('invertedbottleneck', 3, 1, 32, 'hard_swish', 0.25, 4., True, False),
('invertedbottleneck', 3, 1, 32, 'hard_swish', 0.25, 4., True, True),
('invertedbottleneck', 5, 2, 72, 'hard_swish', 0.25, 8., False, False),
('invertedbottleneck', 3, 1, 72, 'hard_swish', 0.25, 8., True, False),
('invertedbottleneck', 5, 1, 72, 'hard_swish', 0.25, 4., True, False),
('invertedbottleneck', 3, 1, 72, 'hard_swish', 0.25, 4., True, False),
('invertedbottleneck', 3, 1, 72, 'hard_swish', 0.25, 8., False, False),
('invertedbottleneck', 3, 1, 72, 'hard_swish', 0.25, 8., True, False),
('invertedbottleneck', 3, 1, 72, 'hard_swish', 0.25, 8., True, False),
('invertedbottleneck', 3, 1, 72, 'hard_swish', 0.25, 8., True, True),
('invertedbottleneck', 5, 2, 104, 'hard_swish', 0.25, 8., False, False),
('invertedbottleneck', 5, 1, 104, 'hard_swish', 0.25, 4., True, False),
('invertedbottleneck', 5, 1, 104, 'hard_swish', 0.25, 4., True, False),
('invertedbottleneck', 3, 1, 104, 'hard_swish', 0.25, 4., True, False),
('invertedbottleneck', 3, 1, 144, 'hard_swish', 0.25, 8., False, True),
]
}
MD_DSP_BLOCK_SPECS = {
'spec_name': 'MobileDetDSP',
# [expand_ratio] is set to 1 and [use_residual] is set to false
# for inverted_bottleneck_no_expansion
# [use_depthwise] is set to False for fused_conv
# [se_ratio] is set to None for all inverted_bottleneck layers
# [activation] is set to 'relu6' for all applicable layers
'block_spec_schema': ['block_fn', 'kernel_size', 'strides', 'filters',
'activation', 'se_ratio', 'expand_ratio',
'input_compression_ratio', 'output_compression_ratio',
'use_depthwise', 'use_residual', 'is_output'],
'block_specs': [
('convbn', 3, 2, 32, 'relu6',
None, None, None, None, None, None, False),
# inverted_bottleneck_no_expansion
('invertedbottleneck', 3, 1, 24, 'relu6',
None, 1., None, None, True, False, True),
('invertedbottleneck', 3, 2, 32, 'relu6',
None, 4., None, None, False, False, False), # fused_conv
('invertedbottleneck', 3, 1, 32, 'relu6',
None, 4., None, None, False, True, False), # fused_conv
('invertedbottleneck', 3, 1, 32, 'relu6',
None, 4., None, None, True, True, False),
('tucker', 3, 1, 32, 'relu6',
None, None, 0.25, 0.75, None, True, True),
('invertedbottleneck', 3, 2, 64, 'relu6',
None, 8., None, None, False, False, False), # fused_conv
('invertedbottleneck', 3, 1, 64, 'relu6',
None, 4., None, None, True, True, False),
('invertedbottleneck', 3, 1, 64, 'relu6',
None, 4., None, None, False, True, False), # fused_conv
('invertedbottleneck', 3, 1, 64, 'relu6',
None, 4., None, None, False, True, True), # fused_conv
('invertedbottleneck', 3, 2, 120, 'relu6',
None, 8., None, None, False, False, False), # fused_conv
('invertedbottleneck', 3, 1, 120, 'relu6',
None, 4., None, None, True, True, False),
('invertedbottleneck', 3, 1, 120, 'relu6',
None, 8, None, None, True, True, False),
('invertedbottleneck', 3, 1, 120, 'relu6',
None, 8., None, None, True, True, False),
('invertedbottleneck', 3, 1, 144, 'relu6',
None, 8., None, None, False, False, False), # fused_conv
('invertedbottleneck', 3, 1, 144, 'relu6',
None, 8., None, None, True, True, False),
('invertedbottleneck', 3, 1, 144, 'relu6',
None, 8, None, None, True, True, False),
('invertedbottleneck', 3, 1, 144, 'relu6',
None, 8., None, None, True, True, True),
('invertedbottleneck', 3, 2, 160, 'relu6',
None, 4, None, None, True, False, False),
('invertedbottleneck', 3, 1, 160, 'relu6',
None, 4, None, None, True, True, False),
('invertedbottleneck', 3, 1, 160, 'relu6',
None, 4., None, None, False, False, False), # fused_conv
('tucker', 3, 1, 160, 'relu6',
None, None, 0.75, 0.75, None, True, False),
('invertedbottleneck', 3, 1, 240, 'relu6',
None, 8, None, None, True, False, True),
]
}
MD_EdgeTPU_BLOCK_SPECS = {
'spec_name': 'MobileDetEdgeTPU',
# [use_depthwise] is set to False for fused_conv
# [se_ratio] is set to None for all inverted_bottleneck layers
# [activation] is set to 'relu6' for all applicable layers
'block_spec_schema': ['block_fn', 'kernel_size', 'strides', 'filters',
'activation', 'se_ratio', 'expand_ratio',
'input_compression_ratio', 'output_compression_ratio',
'use_depthwise', 'use_residual', 'is_output'],
'block_specs': [
('convbn', 3, 2, 32, 'relu6',
None, None, None, None, None, None, False),
('tucker', 3, 1, 16, 'relu6',
None, None, 0.25, 0.75, None, False, True),
('invertedbottleneck', 3, 2, 16, 'relu6',
None, 8., None, None, False, False, False), # fused_conv
('invertedbottleneck', 3, 1, 16, 'relu6',
None, 4., None, None, False, True, False), # fused_conv
('invertedbottleneck', 3, 1, 16, 'relu6',
None, 8., None, None, False, True, False), # fused_conv
('invertedbottleneck', 3, 1, 16, 'relu6',
None, 4., None, None, False, True, True), # fused_conv
('invertedbottleneck', 5, 2, 40, 'relu6',
None, 8., None, None, False, False, False), # fused_conv
('invertedbottleneck', 3, 1, 40, 'relu6',
None, 4., None, None, False, True, False), # fused_conv
('invertedbottleneck', 3, 1, 40, 'relu6',
None, 4., None, None, False, True, False), # fused_conv
('invertedbottleneck', 3, 1, 40, 'relu6',
None, 4., None, None, False, True, True), # fused_conv
('invertedbottleneck', 3, 2, 72, 'relu6',
None, 8, None, None, True, False, False),
('invertedbottleneck', 3, 1, 72, 'relu6',
None, 8, None, None, True, True, False),
('invertedbottleneck', 3, 1, 72, 'relu6',
None, 4., None, None, False, True, False), # fused_conv
('invertedbottleneck', 3, 1, 72, 'relu6',
None, 4., None, None, False, True, False), # fused_conv
('invertedbottleneck', 5, 1, 96, 'relu6',
None, 8, None, None, True, False, False),
('invertedbottleneck', 5, 1, 96, 'relu6',
None, 8, None, None, True, True, False),
('invertedbottleneck', 3, 1, 96, 'relu6',
None, 8, None, None, True, True, False),
('invertedbottleneck', 3, 1, 96, 'relu6',
None, 8, None, None, True, True, True),
('invertedbottleneck', 5, 2, 120, 'relu6',
None, 8, None, None, True, False, False),
('invertedbottleneck', 3, 1, 120, 'relu6',
None, 8, None, None, True, True, False),
('invertedbottleneck', 5, 1, 120, 'relu6',
None, 4, None, None, True, True, False),
('invertedbottleneck', 3, 1, 120, 'relu6',
None, 8, None, None, True, True, False),
('invertedbottleneck', 5, 1, 384, 'relu6',
None, 8, None, None, True, False, True),
]
}
MD_GPU_BLOCK_SPECS = {
'spec_name': 'MobileDetGPU',
# [use_depthwise] is set to False for fused_conv
# [se_ratio] is set to None for all inverted_bottleneck layers
# [activation] is set to 'relu6' for all applicable layers
'block_spec_schema': ['block_fn', 'kernel_size', 'strides', 'filters',
'activation', 'se_ratio', 'expand_ratio',
'input_compression_ratio', 'output_compression_ratio',
'use_depthwise', 'use_residual', 'is_output'],
'block_specs': [
# block 0
('convbn', 3, 2, 32, 'relu6',
None, None, None, None, None, None, False),
# block 1
('tucker', 3, 1, 16, 'relu6',
None, None, 0.25, 0.25, None, False, True),
# block 2
('invertedbottleneck', 3, 2, 32, 'relu6',
None, 8., None, None, False, False, False), # fused_conv
('tucker', 3, 1, 32, 'relu6',
None, None, 0.25, 0.25, None, True, False),
('tucker', 3, 1, 32, 'relu6',
None, None, 0.25, 0.25, None, True, False),
('tucker', 3, 1, 32, 'relu6',
None, None, 0.25, 0.25, None, True, True),
# block 3
('invertedbottleneck', 3, 2, 64, 'relu6',
None, 8., None, None, False, False, False), # fused_conv
('invertedbottleneck', 3, 1, 64, 'relu6',
None, 8., None, None, False, True, False), # fused_conv
('invertedbottleneck', 3, 1, 64, 'relu6',
None, 8., None, None, False, True, False), # fused_conv
('invertedbottleneck', 3, 1, 64, 'relu6',
None, 4., None, None, False, True, True), # fused_conv
# block 4
('invertedbottleneck', 3, 2, 128, 'relu6',
None, 8., None, None, False, False, False), # fused_conv
('invertedbottleneck', 3, 1, 128, 'relu6',
None, 4., None, None, False, True, False), # fused_conv
('invertedbottleneck', 3, 1, 128, 'relu6',
None, 4., None, None, False, True, False), # fused_conv
('invertedbottleneck', 3, 1, 128, 'relu6',
None, 4., None, None, False, True, False), # fused_conv
# block 5
('invertedbottleneck', 3, 1, 128, 'relu6',
None, 8., None, None, False, False, False), # fused_conv
('invertedbottleneck', 3, 1, 128, 'relu6',
None, 8., None, None, False, True, False), # fused_conv
('invertedbottleneck', 3, 1, 128, 'relu6',
None, 8., None, None, False, True, False), # fused_conv
('invertedbottleneck', 3, 1, 128, 'relu6',
None, 8., None, None, False, True, True), # fused_conv
# block 6
('invertedbottleneck', 3, 2, 128, 'relu6',
None, 4., None, None, False, False, False), # fused_conv
('invertedbottleneck', 3, 1, 128, 'relu6',
None, 4., None, None, False, True, False), # fused_conv
('invertedbottleneck', 3, 1, 128, 'relu6',
None, 4., None, None, False, True, False), # fused_conv
('invertedbottleneck', 3, 1, 128, 'relu6',
None, 4., None, None, False, True, False), # fused_conv
# block 7
('invertedbottleneck', 3, 1, 384, 'relu6',
None, 8, None, None, True, False, True),
]
}
SUPPORTED_SPECS_MAP = {
'MobileDetCPU': MD_CPU_BLOCK_SPECS,
'MobileDetDSP': MD_DSP_BLOCK_SPECS,
'MobileDetEdgeTPU': MD_EdgeTPU_BLOCK_SPECS,
'MobileDetGPU': MD_GPU_BLOCK_SPECS,
}
@dataclasses.dataclass
class BlockSpec(hyperparams.Config):
"""A container class that specifies the block configuration for MobileDet."""
block_fn: str = 'convbn'
kernel_size: int = 3
strides: int = 1
filters: int = 32
use_bias: bool = False
use_normalization: bool = True
activation: str = 'relu6'
is_output: bool = True
# Used for block type InvertedResConv and TuckerConvBlock.
use_residual: bool = True
# Used for block type InvertedResConv only.
use_depthwise: bool = True
expand_ratio: Optional[float] = 8.
se_ratio: Optional[float] = None
# Used for block type TuckerConvBlock only.
input_compression_ratio: Optional[float] = None
output_compression_ratio: Optional[float] = None
def block_spec_decoder(
specs: Dict[Any, Any],
filter_size_scale: float,
divisible_by: int = 8) -> List[BlockSpec]:
"""Decodes specs for a block.
Args:
specs: A `dict` specification of block specs of a mobiledet version.
filter_size_scale: A `float` multiplier for the filter size for all
convolution ops. The value must be greater than zero. Typical usage will
be to set this value in (0, 1) to reduce the number of parameters or
computation cost of the model.
divisible_by: An `int` that ensures all inner dimensions are divisible by
this number.
Returns:
A list of `BlockSpec` that defines structure of the base network.
"""
spec_name = specs['spec_name']
block_spec_schema = specs['block_spec_schema']
block_specs = specs['block_specs']
if not block_specs:
raise ValueError(
'The block spec cannot be empty for {} !'.format(spec_name))
if len(block_specs[0]) != len(block_spec_schema):
raise ValueError('The block spec values {} do not match with '
'the schema {}'.format(block_specs[0], block_spec_schema))
decoded_specs = []
for s in block_specs:
kw_s = dict(zip(block_spec_schema, s))
decoded_specs.append(BlockSpec(**kw_s))
for ds in decoded_specs:
if ds.filters:
ds.filters = nn_layers.round_filters(filters=ds.filters,
multiplier=filter_size_scale,
divisor=divisible_by,
round_down_protect=False,
min_depth=8)
return decoded_specs
@tf.keras.utils.register_keras_serializable(package='Vision')
class MobileDet(tf.keras.Model):
"""Creates a MobileDet family model."""
def __init__(
self,
model_id: str = 'MobileDetCPU',
filter_size_scale: float = 1.0,
input_specs: tf.keras.layers.InputSpec = layers.InputSpec(
shape=[None, None, None, 3]),
# The followings are for hyper-parameter tuning.
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_initializer: str = 'VarianceScaling',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
# The followings should be kept the same most of the times.
min_depth: int = 8,
divisible_by: int = 8,
regularize_depthwise: bool = False,
use_sync_bn: bool = False,
**kwargs):
"""Initializes a MobileDet model.
Args:
model_id: A `str` of MobileDet version. The supported values are
`MobileDetCPU`, `MobileDetDSP`, `MobileDetEdgeTPU`, `MobileDetGPU`.
filter_size_scale: A `float` of multiplier for the filters (number of
channels) for all convolution ops. The value must be greater than zero.
Typical usage will be to set this value in (0, 1) to reduce the number
of parameters or computation cost of the model.
input_specs: A `tf.keras.layers.InputSpec` of specs of the input tensor.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_initializer: A `str` for kernel initializer of convolutional
layers.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default to None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
Default to None.
min_depth: An `int` of minimum depth (number of channels) for all
convolution ops. Enforced when filter_size_scale < 1, and not an active
constraint when filter_size_scale >= 1.
divisible_by: An `int` that ensures all inner dimensions are divisible by
this number.
regularize_depthwise: If Ture, apply regularization on depthwise.
use_sync_bn: If True, use synchronized batch normalization.
**kwargs: Additional keyword arguments to be passed.
"""
if model_id not in SUPPORTED_SPECS_MAP:
raise ValueError('The MobileDet version {} '
'is not supported'.format(model_id))
if filter_size_scale <= 0:
raise ValueError('filter_size_scale is not greater than zero.')
self._model_id = model_id
self._input_specs = input_specs
self._filter_size_scale = filter_size_scale
self._min_depth = min_depth
self._divisible_by = divisible_by
self._regularize_depthwise = regularize_depthwise
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
inputs = tf.keras.Input(shape=input_specs.shape[1:])
block_specs = SUPPORTED_SPECS_MAP.get(model_id)
self._decoded_specs = block_spec_decoder(
specs=block_specs,
filter_size_scale=self._filter_size_scale,
divisible_by=self._get_divisible_by())
x, endpoints, next_endpoint_level = self._mobiledet_base(inputs=inputs)
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super(MobileDet, self).__init__(
inputs=inputs, outputs=endpoints, **kwargs)
def _get_divisible_by(self):
return self._divisible_by
def _mobiledet_base(self,
inputs: tf.Tensor
) -> Tuple[tf.Tensor, Dict[str, tf.Tensor], int]:
"""Builds the base MobileDet architecture.
Args:
inputs: A `tf.Tensor` of shape `[batch_size, height, width, channels]`.
Returns:
A tuple of output Tensor and dictionary that collects endpoints.
"""
input_shape = inputs.get_shape().as_list()
if len(input_shape) != 4:
raise ValueError('Expected rank 4 input, was: %d' % len(input_shape))
net = inputs
endpoints = {}
endpoint_level = 1
for i, block_def in enumerate(self._decoded_specs):
block_name = 'block_group_{}_{}'.format(block_def.block_fn, i)
if block_def.block_fn == 'convbn':
net = mobilenet.Conv2DBNBlock(
filters=block_def.filters,
kernel_size=block_def.kernel_size,
strides=block_def.strides,
activation=block_def.activation,
use_bias=block_def.use_bias,
use_normalization=block_def.use_normalization,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon
)(net)
elif block_def.block_fn == 'invertedbottleneck':
in_filters = net.shape.as_list()[-1]
net = nn_blocks.InvertedBottleneckBlock(
in_filters=in_filters,
out_filters=block_def.filters,
kernel_size=block_def.kernel_size,
strides=block_def.strides,
expand_ratio=block_def.expand_ratio,
se_ratio=block_def.se_ratio,
se_inner_activation=block_def.activation,
se_gating_activation='sigmoid',
se_round_down_protect=False,
expand_se_in_filters=True,
activation=block_def.activation,
use_depthwise=block_def.use_depthwise,
use_residual=block_def.use_residual,
regularize_depthwise=self._regularize_depthwise,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
divisible_by=self._get_divisible_by()
)(net)
elif block_def.block_fn == 'tucker':
in_filters = net.shape.as_list()[-1]
net = nn_blocks.TuckerConvBlock(
in_filters=in_filters,
out_filters=block_def.filters,
kernel_size=block_def.kernel_size,
strides=block_def.strides,
input_compression_ratio=block_def.input_compression_ratio,
output_compression_ratio=block_def.output_compression_ratio,
activation=block_def.activation,
use_residual=block_def.use_residual,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon,
divisible_by=self._get_divisible_by()
)(net)
else:
raise ValueError('Unknown block type {} for layer {}'.format(
block_def.block_fn, i))
net = tf.keras.layers.Activation('linear', name=block_name)(net)
if block_def.is_output:
endpoints[str(endpoint_level)] = net
endpoint_level += 1
return net, endpoints, endpoint_level
def get_config(self):
config_dict = {
'model_id': self._model_id,
'filter_size_scale': self._filter_size_scale,
'min_depth': self._min_depth,
'divisible_by': self._divisible_by,
'regularize_depthwise': self._regularize_depthwise,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
}
return config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
@factory.register_backbone_builder('mobiledet')
def build_mobiledet(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None
) -> tf.keras.Model:
"""Builds MobileDet backbone from a config."""
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert backbone_type == 'mobiledet', (f'Inconsistent backbone type '
f'{backbone_type}')
return MobileDet(
model_id=backbone_cfg.model_id,
filter_size_scale=backbone_cfg.filter_size_scale,
input_specs=input_specs,
use_sync_bn=norm_activation_config.use_sync_bn,
norm_momentum=norm_activation_config.norm_momentum,
norm_epsilon=norm_activation_config.norm_epsilon,
kernel_regularizer=l2_regularizer)
| 24,886 | 41.908621 | 80 | py |
models | models-master/official/vision/modeling/backbones/vit_specs.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VisionTransformer backbone specs."""
import immutabledict
VIT_SPECS = immutabledict.immutabledict({
'vit-ti16':
dict(
hidden_size=192,
patch_size=16,
transformer=dict(mlp_dim=768, num_heads=3, num_layers=12),
),
'vit-s16':
dict(
hidden_size=384,
patch_size=16,
transformer=dict(mlp_dim=1536, num_heads=6, num_layers=12),
),
'vit-b16':
dict(
hidden_size=768,
patch_size=16,
transformer=dict(mlp_dim=3072, num_heads=12, num_layers=12),
),
'vit-b32':
dict(
hidden_size=768,
patch_size=32,
transformer=dict(mlp_dim=3072, num_heads=12, num_layers=12),
),
'vit-l16':
dict(
hidden_size=1024,
patch_size=16,
transformer=dict(mlp_dim=4096, num_heads=16, num_layers=24),
),
'vit-l32':
dict(
hidden_size=1024,
patch_size=32,
transformer=dict(mlp_dim=4096, num_heads=16, num_layers=24),
),
'vit-h14':
dict(
hidden_size=1280,
patch_size=14,
transformer=dict(mlp_dim=5120, num_heads=16, num_layers=32),
),
'vit-g14':
dict(
hidden_size=1408,
patch_size=14,
transformer=dict(mlp_dim=5632, num_heads=16, num_layers=40),
),
'vit-G14':
dict(
hidden_size=1664,
patch_size=14,
transformer=dict(mlp_dim=8192, num_heads=16, num_layers=48),
),
'vit-e14':
dict(
hidden_size=1792,
patch_size=14,
transformer=dict(mlp_dim=15360, num_heads=16, num_layers=56),
),
})
| 2,412 | 28.790123 | 74 | py |
models | models-master/official/vision/modeling/heads/dense_prediction_heads_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dense_prediction_heads.py."""
import unittest
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from official.vision.modeling.heads import dense_prediction_heads
def get_attribute_heads(att_head_type):
if att_head_type == 'regression_head':
return [
dict(
name='depth',
type='regression',
size=1,
prediction_tower_name='',
num_convs=1,
num_filters=128,
)
]
elif att_head_type == 'classification_head':
return [
dict(
name='depth',
type='classification',
size=1,
prediction_tower_name='')
]
elif att_head_type == 'shared_prediction_tower_attribute_heads':
return [
dict(
name='attr_1', type='regression', size=1, prediction_tower_name=''
),
dict(
name='attr_2',
type='classification',
size=1,
prediction_tower_name='tower_1',
),
dict(
name='attr_3',
type='regression',
size=1,
prediction_tower_name='tower_1',
),
]
else:
raise ValueError('Undefined attribute type.')
class RetinaNetHeadTest(parameterized.TestCase, tf.test.TestCase):
@combinations.generate(
combinations.combine(
use_separable_conv=[True, False],
use_sync_bn=[True, False],
share_level_convs=[True, False],
)
)
def test_forward_without_attribute_head(
self, use_separable_conv, use_sync_bn, share_level_convs
):
retinanet_head = dense_prediction_heads.RetinaNetHead(
min_level=3,
max_level=4,
num_classes=3,
num_anchors_per_location=3,
num_convs=2,
num_filters=256,
attribute_heads=None,
use_separable_conv=use_separable_conv,
activation='relu',
use_sync_bn=use_sync_bn,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_regularizer=None,
bias_regularizer=None,
share_level_convs=share_level_convs,
)
features = {
'3': np.random.rand(2, 128, 128, 16),
'4': np.random.rand(2, 64, 64, 16),
}
scores, boxes, _ = retinanet_head(features)
self.assertAllEqual(scores['3'].numpy().shape, [2, 128, 128, 9])
self.assertAllEqual(scores['4'].numpy().shape, [2, 64, 64, 9])
self.assertAllEqual(boxes['3'].numpy().shape, [2, 128, 128, 12])
self.assertAllEqual(boxes['4'].numpy().shape, [2, 64, 64, 12])
@parameterized.parameters(
(False, 'regression_head', False),
(True, 'classification_head', True),
(True, 'shared_prediction_tower_attribute_heads', False),
)
def test_forward_with_attribute_head(
self,
use_sync_bn,
att_head_type,
share_classification_heads,
):
retinanet_head = dense_prediction_heads.RetinaNetHead(
min_level=3,
max_level=4,
num_classes=3,
num_anchors_per_location=3,
num_convs=2,
num_filters=256,
attribute_heads=get_attribute_heads(att_head_type),
share_classification_heads=share_classification_heads,
use_separable_conv=True,
activation='relu',
use_sync_bn=use_sync_bn,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_regularizer=None,
bias_regularizer=None,
)
features = {
'3': np.random.rand(2, 128, 128, 16),
'4': np.random.rand(2, 64, 64, 16),
}
scores, boxes, attributes = retinanet_head(features)
self.assertAllEqual(scores['3'].numpy().shape, [2, 128, 128, 9])
self.assertAllEqual(scores['4'].numpy().shape, [2, 64, 64, 9])
self.assertAllEqual(boxes['3'].numpy().shape, [2, 128, 128, 12])
self.assertAllEqual(boxes['4'].numpy().shape, [2, 64, 64, 12])
for att in attributes.values():
self.assertAllEqual(att['3'].numpy().shape, [2, 128, 128, 3])
self.assertAllEqual(att['4'].numpy().shape, [2, 64, 64, 3])
if att_head_type == 'regression_head':
self.assertLen(retinanet_head._att_convs['depth'], 1)
self.assertEqual(retinanet_head._att_convs['depth'][0].filters, 128)
@unittest.expectedFailure
def test_forward_shared_prediction_tower_with_share_classification_heads(
self):
share_classification_heads = True
attribute_heads = get_attribute_heads(
'shared_prediction_tower_attribute_heads')
retinanet_head = dense_prediction_heads.RetinaNetHead(
min_level=3,
max_level=4,
num_classes=3,
num_anchors_per_location=3,
num_convs=2,
num_filters=256,
attribute_heads=attribute_heads,
share_classification_heads=share_classification_heads,
use_separable_conv=True,
activation='relu',
use_sync_bn=True,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_regularizer=None,
bias_regularizer=None,
)
features = {
'3': np.random.rand(2, 128, 128, 16),
'4': np.random.rand(2, 64, 64, 16),
}
retinanet_head(features)
def test_serialize_deserialize(self):
retinanet_head = dense_prediction_heads.RetinaNetHead(
min_level=3,
max_level=7,
num_classes=3,
num_anchors_per_location=9,
num_convs=2,
num_filters=16,
attribute_heads=None,
use_separable_conv=False,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_regularizer=None,
bias_regularizer=None,
)
config = retinanet_head.get_config()
new_retinanet_head = (
dense_prediction_heads.RetinaNetHead.from_config(config))
self.assertAllEqual(
retinanet_head.get_config(), new_retinanet_head.get_config())
class RpnHeadTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(False, False),
(False, True),
(True, False),
(True, True),
)
def test_forward(self, use_separable_conv, use_sync_bn):
rpn_head = dense_prediction_heads.RPNHead(
min_level=3,
max_level=4,
num_anchors_per_location=3,
num_convs=2,
num_filters=256,
use_separable_conv=use_separable_conv,
activation='relu',
use_sync_bn=use_sync_bn,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_regularizer=None,
bias_regularizer=None,
)
features = {
'3': np.random.rand(2, 128, 128, 16),
'4': np.random.rand(2, 64, 64, 16),
}
scores, boxes = rpn_head(features)
self.assertAllEqual(scores['3'].numpy().shape, [2, 128, 128, 3])
self.assertAllEqual(scores['4'].numpy().shape, [2, 64, 64, 3])
self.assertAllEqual(boxes['3'].numpy().shape, [2, 128, 128, 12])
self.assertAllEqual(boxes['4'].numpy().shape, [2, 64, 64, 12])
def test_serialize_deserialize(self):
rpn_head = dense_prediction_heads.RPNHead(
min_level=3,
max_level=7,
num_anchors_per_location=9,
num_convs=2,
num_filters=16,
use_separable_conv=False,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_regularizer=None,
bias_regularizer=None,
)
config = rpn_head.get_config()
new_rpn_head = dense_prediction_heads.RPNHead.from_config(config)
self.assertAllEqual(rpn_head.get_config(), new_rpn_head.get_config())
if __name__ == '__main__':
tf.test.main()
| 8,256 | 30.395437 | 78 | py |
models | models-master/official/vision/modeling/heads/instance_heads_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for instance_heads.py."""
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.vision.modeling.heads import instance_heads
class DetectionHeadTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(0, 0, False, False),
(0, 1, False, False),
(1, 0, False, False),
(1, 1, False, False),
)
def test_forward(self, num_convs, num_fcs, use_separable_conv, use_sync_bn):
detection_head = instance_heads.DetectionHead(
num_classes=3,
num_convs=num_convs,
num_filters=16,
use_separable_conv=use_separable_conv,
num_fcs=num_fcs,
fc_dims=4,
activation='relu',
use_sync_bn=use_sync_bn,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_regularizer=None,
bias_regularizer=None,
)
roi_features = np.random.rand(2, 10, 128, 128, 16)
scores, boxes = detection_head(roi_features)
self.assertAllEqual(scores.numpy().shape, [2, 10, 3])
self.assertAllEqual(boxes.numpy().shape, [2, 10, 12])
def test_serialize_deserialize(self):
detection_head = instance_heads.DetectionHead(
num_classes=91,
num_convs=0,
num_filters=256,
use_separable_conv=False,
num_fcs=2,
fc_dims=1024,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_regularizer=None,
bias_regularizer=None,
)
config = detection_head.get_config()
new_detection_head = instance_heads.DetectionHead.from_config(config)
self.assertAllEqual(
detection_head.get_config(), new_detection_head.get_config())
class MaskHeadTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(1, 1, False),
(1, 2, False),
(2, 1, False),
(2, 2, False),
)
def test_forward(self, upsample_factor, num_convs, use_sync_bn):
mask_head = instance_heads.MaskHead(
num_classes=3,
upsample_factor=upsample_factor,
num_convs=num_convs,
num_filters=16,
use_separable_conv=False,
activation='relu',
use_sync_bn=use_sync_bn,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_regularizer=None,
bias_regularizer=None,
)
roi_features = np.random.rand(2, 10, 14, 14, 16)
roi_classes = np.zeros((2, 10))
masks = mask_head([roi_features, roi_classes])
self.assertAllEqual(
masks.numpy().shape,
[2, 10, 14 * upsample_factor, 14 * upsample_factor])
def test_serialize_deserialize(self):
mask_head = instance_heads.MaskHead(
num_classes=3,
upsample_factor=2,
num_convs=1,
num_filters=256,
use_separable_conv=False,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_regularizer=None,
bias_regularizer=None,
)
config = mask_head.get_config()
new_mask_head = instance_heads.MaskHead.from_config(config)
self.assertAllEqual(
mask_head.get_config(), new_mask_head.get_config())
def test_forward_class_agnostic(self):
mask_head = instance_heads.MaskHead(
num_classes=3,
class_agnostic=True
)
roi_features = np.random.rand(2, 10, 14, 14, 16)
roi_classes = np.zeros((2, 10))
masks = mask_head([roi_features, roi_classes])
self.assertAllEqual(masks.numpy().shape, [2, 10, 28, 28])
if __name__ == '__main__':
tf.test.main()
| 4,197 | 30.096296 | 78 | py |
models | models-master/official/vision/modeling/heads/segmentation_heads.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of segmentation heads."""
from typing import List, Union, Optional, Mapping, Tuple, Any
import tensorflow as tf
from official.modeling import tf_utils
from official.vision.modeling.layers import nn_layers
from official.vision.ops import spatial_transform_ops
class MaskScoring(tf.keras.Model):
"""Creates a mask scoring layer.
This implements mask scoring layer from the paper:
Zhaojin Huang, Lichao Huang, Yongchao Gong, Chang Huang, Xinggang Wang.
Mask Scoring R-CNN.
(https://arxiv.org/pdf/1903.00241.pdf)
"""
def __init__(
self,
num_classes: int,
fc_input_size: List[int],
num_convs: int = 3,
num_filters: int = 256,
use_depthwise_convolution: bool = False,
fc_dims: int = 1024,
num_fcs: int = 2,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initializes mask scoring layer.
Args:
num_classes: An `int` for number of classes.
fc_input_size: A List of `int` for the input size of the
fully connected layers.
num_convs: An`int` for number of conv layers.
num_filters: An `int` for the number of filters for conv layers.
use_depthwise_convolution: A `bool`, whether or not using depthwise convs.
fc_dims: An `int` number of filters for each fully connected layers.
num_fcs: An `int` for number of fully connected layers.
activation: A `str` name of the activation function.
use_sync_bn: A bool, whether or not to use sync batch normalization.
norm_momentum: A float for the momentum in BatchNorm. Defaults to 0.99.
norm_epsilon: A float for the epsilon value in BatchNorm. Defaults to
0.001.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
**kwargs: Additional keyword arguments to be passed.
"""
super(MaskScoring, self).__init__(**kwargs)
self._config_dict = {
'num_classes': num_classes,
'num_convs': num_convs,
'num_filters': num_filters,
'fc_input_size': fc_input_size,
'fc_dims': fc_dims,
'num_fcs': num_fcs,
'use_sync_bn': use_sync_bn,
'use_depthwise_convolution': use_depthwise_convolution,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'activation': activation,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
}
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation = tf_utils.get_activation(activation)
def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]):
"""Creates the variables of the mask scoring head."""
conv_op = tf.keras.layers.Conv2D
conv_kwargs = {
'filters': self._config_dict['num_filters'],
'kernel_size': 3,
'padding': 'same',
}
conv_kwargs.update({
'kernel_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'bias_initializer': tf.zeros_initializer(),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
})
bn_op = tf.keras.layers.BatchNormalization
bn_kwargs = {
'axis': self._bn_axis,
'momentum': self._config_dict['norm_momentum'],
'epsilon': self._config_dict['norm_epsilon'],
'synchronized': self._config_dict['use_sync_bn'],
}
self._convs = []
self._conv_norms = []
for i in range(self._config_dict['num_convs']):
if self._config_dict['use_depthwise_convolution']:
self._convs.append(
tf.keras.layers.DepthwiseConv2D(
name='mask-scoring-depthwise-conv-{}'.format(i),
kernel_size=3,
padding='same',
use_bias=False,
depthwise_initializer=tf.keras.initializers.RandomNormal(
stddev=0.01),
depthwise_regularizer=self._config_dict['kernel_regularizer'],
depth_multiplier=1))
norm_name = 'mask-scoring-depthwise-bn-{}'.format(i)
self._conv_norms.append(bn_op(name=norm_name, **bn_kwargs))
conv_name = 'mask-scoring-conv-{}'.format(i)
if 'kernel_initializer' in conv_kwargs:
conv_kwargs['kernel_initializer'] = tf_utils.clone_initializer(
conv_kwargs['kernel_initializer'])
if self._config_dict['use_depthwise_convolution']:
conv_kwargs['kernel_size'] = 1
self._convs.append(conv_op(name=conv_name, **conv_kwargs))
bn_name = 'mask-scoring-bn-{}'.format(i)
self._conv_norms.append(bn_op(name=bn_name, **bn_kwargs))
self._fcs = []
self._fc_norms = []
for i in range(self._config_dict['num_fcs']):
fc_name = 'mask-scoring-fc-{}'.format(i)
self._fcs.append(
tf.keras.layers.Dense(
units=self._config_dict['fc_dims'],
kernel_initializer=tf.keras.initializers.VarianceScaling(
scale=1 / 3.0, mode='fan_out', distribution='uniform'),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'],
name=fc_name))
bn_name = 'mask-scoring-fc-bn-{}'.format(i)
self._fc_norms.append(bn_op(name=bn_name, **bn_kwargs))
self._classifier = tf.keras.layers.Dense(
units=self._config_dict['num_classes'],
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'],
name='iou-scores')
super(MaskScoring, self).build(input_shape)
def call(self, inputs: tf.Tensor, training: bool = None): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
"""Forward pass mask scoring head.
Args:
inputs: A `tf.Tensor` of the shape [batch_size, width, size, num_classes],
representing the segmentation logits.
training: a `bool` indicating whether it is in `training` mode.
Returns:
mask_scores: A `tf.Tensor` of predicted mask scores
[batch_size, num_classes].
"""
x = tf.stop_gradient(inputs)
for conv, bn in zip(self._convs, self._conv_norms):
x = conv(x)
x = bn(x)
x = self._activation(x)
# Casts feat to float32 so the resize op can be run on TPU.
x = tf.cast(x, tf.float32)
x = tf.image.resize(x, size=self._config_dict['fc_input_size'],
method=tf.image.ResizeMethod.BILINEAR)
# Casts it back to be compatible with the rest opetations.
x = tf.cast(x, inputs.dtype)
_, h, w, filters = x.get_shape().as_list()
x = tf.reshape(x, [-1, h * w * filters])
for fc, bn in zip(self._fcs, self._fc_norms):
x = fc(x)
x = bn(x)
x = self._activation(x)
ious = self._classifier(x)
return ious
def get_config(self) -> Mapping[str, Any]:
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@tf.keras.utils.register_keras_serializable(package='Vision')
class SegmentationHead(tf.keras.layers.Layer):
"""Creates a segmentation head."""
def __init__(
self,
num_classes: int,
level: Union[int, str],
num_convs: int = 2,
num_filters: int = 256,
use_depthwise_convolution: bool = False,
prediction_kernel_size: int = 1,
upsample_factor: int = 1,
feature_fusion: Optional[str] = None,
decoder_min_level: Optional[int] = None,
decoder_max_level: Optional[int] = None,
low_level: int = 2,
low_level_num_filters: int = 48,
num_decoder_filters: int = 256,
activation: str = 'relu',
logit_activation: Optional[str] = None,
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initializes a segmentation head.
Args:
num_classes: An `int` number of mask classification categories. The number
of classes does not include background class.
level: An `int` or `str`, level to use to build segmentation head.
num_convs: An `int` number of stacked convolution before the last
prediction layer.
num_filters: An `int` number to specify the number of filters used.
Default is 256.
use_depthwise_convolution: A bool to specify if use depthwise separable
convolutions.
prediction_kernel_size: An `int` number to specify the kernel size of the
prediction layer.
upsample_factor: An `int` number to specify the upsampling factor to
generate finer mask. Default 1 means no upsampling is applied.
feature_fusion: One of the constants in nn_layers.FeatureFusion, namely
`deeplabv3plus`, `pyramid_fusion`, `panoptic_fpn_fusion`,
`deeplabv3plus_sum_to_merge`, or None. If `deeplabv3plus`, features from
decoder_features[level] will be fused with low level feature maps from
backbone. If `pyramid_fusion`, multiscale features will be resized and
fused at the target level.
decoder_min_level: An `int` of minimum level from decoder to use in
feature fusion. It is only used when feature_fusion is set to
`panoptic_fpn_fusion`.
decoder_max_level: An `int` of maximum level from decoder to use in
feature fusion. It is only used when feature_fusion is set to
`panoptic_fpn_fusion`.
low_level: An `int` of backbone level to be used for feature fusion. It is
used when feature_fusion is set to `deeplabv3plus` or
`deeplabv3plus_sum_to_merge`.
low_level_num_filters: An `int` of reduced number of filters for the low
level features before fusing it with higher level features. It is only
used when feature_fusion is set to `deeplabv3plus` or
`deeplabv3plus_sum_to_merge`.
num_decoder_filters: An `int` of number of filters in the decoder outputs.
It is only used when feature_fusion is set to `panoptic_fpn_fusion`.
activation: A `str` that indicates which activation is used, e.g. 'relu',
'swish', etc.
logit_activation: Activation applied to the final classifier layer logits,
e.g. 'sigmoid', 'softmax'. Can be useful in cases when the task does not
use only cross entropy loss.
use_sync_bn: A `bool` that indicates whether to use synchronized batch
normalization across different replicas.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
**kwargs: Additional keyword arguments to be passed.
"""
super(SegmentationHead, self).__init__(**kwargs)
self._config_dict = {
'num_classes': num_classes,
'level': level,
'num_convs': num_convs,
'num_filters': num_filters,
'use_depthwise_convolution': use_depthwise_convolution,
'prediction_kernel_size': prediction_kernel_size,
'upsample_factor': upsample_factor,
'feature_fusion': feature_fusion,
'decoder_min_level': decoder_min_level,
'decoder_max_level': decoder_max_level,
'low_level': low_level,
'low_level_num_filters': low_level_num_filters,
'num_decoder_filters': num_decoder_filters,
'activation': activation,
'logit_activation': logit_activation,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer
}
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation = tf_utils.get_activation(activation)
def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]):
"""Creates the variables of the segmentation head."""
use_depthwise_convolution = self._config_dict['use_depthwise_convolution']
conv_op = tf.keras.layers.Conv2D
bn_op = tf.keras.layers.BatchNormalization
bn_kwargs = {
'axis': self._bn_axis,
'momentum': self._config_dict['norm_momentum'],
'epsilon': self._config_dict['norm_epsilon'],
'synchronized': self._config_dict['use_sync_bn'],
}
if self._config_dict['feature_fusion'] in {'deeplabv3plus',
'deeplabv3plus_sum_to_merge'}:
# Deeplabv3+ feature fusion layers.
self._dlv3p_conv = conv_op(
kernel_size=1,
padding='same',
use_bias=False,
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
kernel_regularizer=self._config_dict['kernel_regularizer'],
name='segmentation_head_deeplabv3p_fusion_conv',
filters=self._config_dict['low_level_num_filters'])
self._dlv3p_norm = bn_op(
name='segmentation_head_deeplabv3p_fusion_norm', **bn_kwargs)
elif self._config_dict['feature_fusion'] == 'panoptic_fpn_fusion':
self._panoptic_fpn_fusion = nn_layers.PanopticFPNFusion(
min_level=self._config_dict['decoder_min_level'],
max_level=self._config_dict['decoder_max_level'],
target_level=self._config_dict['level'],
num_filters=self._config_dict['num_filters'],
num_fpn_filters=self._config_dict['num_decoder_filters'],
activation=self._config_dict['activation'],
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'])
# Segmentation head layers.
self._convs = []
self._norms = []
for i in range(self._config_dict['num_convs']):
if use_depthwise_convolution:
self._convs.append(
tf.keras.layers.DepthwiseConv2D(
name='segmentation_head_depthwise_conv_{}'.format(i),
kernel_size=3,
padding='same',
use_bias=False,
depthwise_initializer=tf.keras.initializers.RandomNormal(
stddev=0.01),
depthwise_regularizer=self._config_dict['kernel_regularizer'],
depth_multiplier=1))
norm_name = 'segmentation_head_depthwise_norm_{}'.format(i)
self._norms.append(bn_op(name=norm_name, **bn_kwargs))
conv_name = 'segmentation_head_conv_{}'.format(i)
self._convs.append(
conv_op(
name=conv_name,
filters=self._config_dict['num_filters'],
kernel_size=3 if not use_depthwise_convolution else 1,
padding='same',
use_bias=False,
kernel_initializer=tf.keras.initializers.RandomNormal(
stddev=0.01),
kernel_regularizer=self._config_dict['kernel_regularizer']))
norm_name = 'segmentation_head_norm_{}'.format(i)
self._norms.append(bn_op(name=norm_name, **bn_kwargs))
self._classifier = conv_op(
name='segmentation_output',
filters=self._config_dict['num_classes'],
kernel_size=self._config_dict['prediction_kernel_size'],
padding='same',
activation=self._config_dict['logit_activation'],
bias_initializer=tf.zeros_initializer(),
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'])
super().build(input_shape)
def call(self, inputs: Tuple[Union[tf.Tensor, Mapping[str, tf.Tensor]],
Union[tf.Tensor, Mapping[str, tf.Tensor]]]):
"""Forward pass of the segmentation head.
It supports both a tuple of 2 tensors or 2 dictionaries. The first is
backbone endpoints, and the second is decoder endpoints. When inputs are
tensors, they are from a single level of feature maps. When inputs are
dictionaries, they contain multiple levels of feature maps, where the key
is the index of feature map.
Args:
inputs: A tuple of 2 feature map tensors of shape
[batch, height_l, width_l, channels] or 2 dictionaries of tensors:
- key: A `str` of the level of the multilevel features.
- values: A `tf.Tensor` of the feature map tensors, whose shape is
[batch, height_l, width_l, channels].
The first is backbone endpoints, and the second is decoder endpoints.
Returns:
segmentation prediction mask: A `tf.Tensor` of the segmentation mask
scores predicted from input features.
"""
backbone_output = inputs[0]
decoder_output = inputs[1]
if self._config_dict['feature_fusion'] in {'deeplabv3plus',
'deeplabv3plus_sum_to_merge'}:
# deeplabv3+ feature fusion
x = decoder_output[str(self._config_dict['level'])] if isinstance(
decoder_output, dict) else decoder_output
y = backbone_output[str(self._config_dict['low_level'])] if isinstance(
backbone_output, dict) else backbone_output
y = self._dlv3p_norm(self._dlv3p_conv(y))
y = self._activation(y)
x = tf.image.resize(
x, tf.shape(y)[1:3], method=tf.image.ResizeMethod.BILINEAR)
x = tf.cast(x, dtype=y.dtype)
if self._config_dict['feature_fusion'] == 'deeplabv3plus':
x = tf.concat([x, y], axis=self._bn_axis)
else:
x = tf.keras.layers.Add()([x, y])
elif self._config_dict['feature_fusion'] == 'pyramid_fusion':
if not isinstance(decoder_output, dict):
raise ValueError('Only support dictionary decoder_output.')
x = nn_layers.pyramid_feature_fusion(decoder_output,
self._config_dict['level'])
elif self._config_dict['feature_fusion'] == 'panoptic_fpn_fusion':
x = self._panoptic_fpn_fusion(decoder_output)
else:
x = decoder_output[str(self._config_dict['level'])] if isinstance(
decoder_output, dict) else decoder_output
for conv, norm in zip(self._convs, self._norms):
x = conv(x)
x = norm(x)
x = self._activation(x)
if self._config_dict['upsample_factor'] > 1:
x = spatial_transform_ops.nearest_upsampling(
x, scale=self._config_dict['upsample_factor'])
return self._classifier(x)
def get_config(self):
base_config = super().get_config()
return dict(list(base_config.items()) + list(self._config_dict.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
| 20,174 | 41.653277 | 134 | py |
models | models-master/official/vision/modeling/heads/instance_heads.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of instance prediction heads."""
from typing import List, Union, Optional
# Import libraries
import tensorflow as tf
from official.modeling import tf_utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class DetectionHead(tf.keras.layers.Layer):
"""Creates a detection head."""
def __init__(
self,
num_classes: int,
num_convs: int = 0,
num_filters: int = 256,
use_separable_conv: bool = False,
num_fcs: int = 2,
fc_dims: int = 1024,
class_agnostic_bbox_pred: bool = False,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initializes a detection head.
Args:
num_classes: An `int` for the number of classes.
num_convs: An `int` number that represents the number of the intermediate
convolution layers before the FC layers.
num_filters: An `int` number that represents the number of filters of the
intermediate convolution layers.
use_separable_conv: A `bool` that indicates whether the separable
convolution layers is used.
num_fcs: An `int` number that represents the number of FC layers before
the predictions.
fc_dims: An `int` number that represents the number of dimension of the FC
layers.
class_agnostic_bbox_pred: `bool`, indicating whether bboxes should be
predicted for every class or not.
activation: A `str` that indicates which activation is used, e.g. 'relu',
'swish', etc.
use_sync_bn: A `bool` that indicates whether to use synchronized batch
normalization across different replicas.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
**kwargs: Additional keyword arguments to be passed.
"""
super(DetectionHead, self).__init__(**kwargs)
self._config_dict = {
'num_classes': num_classes,
'num_convs': num_convs,
'num_filters': num_filters,
'use_separable_conv': use_separable_conv,
'num_fcs': num_fcs,
'fc_dims': fc_dims,
'class_agnostic_bbox_pred': class_agnostic_bbox_pred,
'activation': activation,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
}
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation = tf_utils.get_activation(activation)
def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]):
"""Creates the variables of the head."""
conv_op = (tf.keras.layers.SeparableConv2D
if self._config_dict['use_separable_conv']
else tf.keras.layers.Conv2D)
conv_kwargs = {
'filters': self._config_dict['num_filters'],
'kernel_size': 3,
'padding': 'same',
}
if self._config_dict['use_separable_conv']:
conv_kwargs.update({
'depthwise_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'pointwise_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'bias_initializer': tf.zeros_initializer(),
'depthwise_regularizer': self._config_dict['kernel_regularizer'],
'pointwise_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
})
else:
conv_kwargs.update({
'kernel_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'bias_initializer': tf.zeros_initializer(),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
})
bn_op = tf.keras.layers.BatchNormalization
bn_kwargs = {
'axis': self._bn_axis,
'momentum': self._config_dict['norm_momentum'],
'epsilon': self._config_dict['norm_epsilon'],
'synchronized': self._config_dict['use_sync_bn'],
}
self._convs = []
self._conv_norms = []
for i in range(self._config_dict['num_convs']):
conv_name = 'detection-conv_{}'.format(i)
if 'kernel_initializer' in conv_kwargs:
conv_kwargs['kernel_initializer'] = tf_utils.clone_initializer(
conv_kwargs['kernel_initializer'])
self._convs.append(conv_op(name=conv_name, **conv_kwargs))
bn_name = 'detection-conv-bn_{}'.format(i)
self._conv_norms.append(bn_op(name=bn_name, **bn_kwargs))
self._fcs = []
self._fc_norms = []
for i in range(self._config_dict['num_fcs']):
fc_name = 'detection-fc_{}'.format(i)
self._fcs.append(
tf.keras.layers.Dense(
units=self._config_dict['fc_dims'],
kernel_initializer=tf.keras.initializers.VarianceScaling(
scale=1 / 3.0, mode='fan_out', distribution='uniform'),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'],
name=fc_name))
bn_name = 'detection-fc-bn_{}'.format(i)
self._fc_norms.append(bn_op(name=bn_name, **bn_kwargs))
self._classifier = tf.keras.layers.Dense(
units=self._config_dict['num_classes'],
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'],
name='detection-scores')
num_box_outputs = (4 if self._config_dict['class_agnostic_bbox_pred'] else
self._config_dict['num_classes'] * 4)
self._box_regressor = tf.keras.layers.Dense(
units=num_box_outputs,
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.001),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'],
name='detection-boxes')
super(DetectionHead, self).build(input_shape)
def call(self, inputs: tf.Tensor, training: bool = None):
"""Forward pass of box and class branches for the Mask-RCNN model.
Args:
inputs: A `tf.Tensor` of the shape [batch_size, num_instances, roi_height,
roi_width, roi_channels], representing the ROI features.
training: a `bool` indicating whether it is in `training` mode.
Returns:
class_outputs: A `tf.Tensor` of the shape
[batch_size, num_rois, num_classes], representing the class predictions.
box_outputs: A `tf.Tensor` of the shape
[batch_size, num_rois, num_classes * 4], representing the box
predictions.
"""
roi_features = inputs
_, num_rois, height, width, filters = roi_features.get_shape().as_list()
x = tf.reshape(roi_features, [-1, height, width, filters])
for conv, bn in zip(self._convs, self._conv_norms):
x = conv(x)
x = bn(x)
x = self._activation(x)
_, _, _, filters = x.get_shape().as_list()
x = tf.reshape(x, [-1, num_rois, height * width * filters])
for fc, bn in zip(self._fcs, self._fc_norms):
x = fc(x)
x = bn(x)
x = self._activation(x)
classes = self._classifier(x)
boxes = self._box_regressor(x)
return classes, boxes
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
@tf.keras.utils.register_keras_serializable(package='Vision')
class MaskHead(tf.keras.layers.Layer):
"""Creates a mask head."""
def __init__(
self,
num_classes: int,
upsample_factor: int = 2,
num_convs: int = 4,
num_filters: int = 256,
use_separable_conv: bool = False,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
class_agnostic: bool = False,
**kwargs):
"""Initializes a mask head.
Args:
num_classes: An `int` of the number of classes.
upsample_factor: An `int` that indicates the upsample factor to generate
the final predicted masks. It should be >= 1.
num_convs: An `int` number that represents the number of the intermediate
convolution layers before the mask prediction layers.
num_filters: An `int` number that represents the number of filters of the
intermediate convolution layers.
use_separable_conv: A `bool` that indicates whether the separable
convolution layers is used.
activation: A `str` that indicates which activation is used, e.g. 'relu',
'swish', etc.
use_sync_bn: A `bool` that indicates whether to use synchronized batch
normalization across different replicas.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
class_agnostic: A `bool`. If set, we use a single channel mask head that
is shared between all classes.
**kwargs: Additional keyword arguments to be passed.
"""
super(MaskHead, self).__init__(**kwargs)
self._config_dict = {
'num_classes': num_classes,
'upsample_factor': upsample_factor,
'num_convs': num_convs,
'num_filters': num_filters,
'use_separable_conv': use_separable_conv,
'activation': activation,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
'class_agnostic': class_agnostic
}
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation = tf_utils.get_activation(activation)
def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]):
"""Creates the variables of the head."""
conv_op = (tf.keras.layers.SeparableConv2D
if self._config_dict['use_separable_conv']
else tf.keras.layers.Conv2D)
conv_kwargs = {
'filters': self._config_dict['num_filters'],
'kernel_size': 3,
'padding': 'same',
}
if self._config_dict['use_separable_conv']:
conv_kwargs.update({
'depthwise_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'pointwise_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'bias_initializer': tf.zeros_initializer(),
'depthwise_regularizer': self._config_dict['kernel_regularizer'],
'pointwise_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
})
else:
conv_kwargs.update({
'kernel_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'bias_initializer': tf.zeros_initializer(),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
})
bn_op = tf.keras.layers.BatchNormalization
bn_kwargs = {
'axis': self._bn_axis,
'momentum': self._config_dict['norm_momentum'],
'epsilon': self._config_dict['norm_epsilon'],
'synchronized': self._config_dict['use_sync_bn'],
}
self._convs = []
self._conv_norms = []
for i in range(self._config_dict['num_convs']):
conv_name = 'mask-conv_{}'.format(i)
for initializer_name in ['kernel_initializer', 'depthwise_initializer',
'pointwise_initializer']:
if initializer_name in conv_kwargs:
conv_kwargs[initializer_name] = tf_utils.clone_initializer(
conv_kwargs[initializer_name])
self._convs.append(conv_op(name=conv_name, **conv_kwargs))
bn_name = 'mask-conv-bn_{}'.format(i)
self._conv_norms.append(bn_op(name=bn_name, **bn_kwargs))
self._deconv = tf.keras.layers.Conv2DTranspose(
filters=self._config_dict['num_filters'],
kernel_size=self._config_dict['upsample_factor'],
strides=self._config_dict['upsample_factor'],
padding='valid',
kernel_initializer=tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=self._config_dict['kernel_regularizer'],
bias_regularizer=self._config_dict['bias_regularizer'],
name='mask-upsampling')
self._deconv_bn = bn_op(name='mask-deconv-bn', **bn_kwargs)
if self._config_dict['class_agnostic']:
num_filters = 1
else:
num_filters = self._config_dict['num_classes']
conv_kwargs = {
'filters': num_filters,
'kernel_size': 1,
'padding': 'valid',
}
if self._config_dict['use_separable_conv']:
conv_kwargs.update({
'depthwise_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'pointwise_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'bias_initializer': tf.zeros_initializer(),
'depthwise_regularizer': self._config_dict['kernel_regularizer'],
'pointwise_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
})
else:
conv_kwargs.update({
'kernel_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'bias_initializer': tf.zeros_initializer(),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
})
self._mask_regressor = conv_op(name='mask-logits', **conv_kwargs)
super(MaskHead, self).build(input_shape)
def call(self, inputs: List[tf.Tensor], training: bool = None):
"""Forward pass of mask branch for the Mask-RCNN model.
Args:
inputs: A `list` of two tensors where
inputs[0]: A `tf.Tensor` of shape [batch_size, num_instances,
roi_height, roi_width, roi_channels], representing the ROI features.
inputs[1]: A `tf.Tensor` of shape [batch_size, num_instances],
representing the classes of the ROIs.
training: A `bool` indicating whether it is in `training` mode.
Returns:
mask_outputs: A `tf.Tensor` of shape
[batch_size, num_instances, roi_height * upsample_factor,
roi_width * upsample_factor], representing the mask predictions.
"""
roi_features, roi_classes = inputs
_, num_rois, height, width, filters = roi_features.get_shape().as_list()
x = tf.reshape(roi_features, [-1, height, width, filters])
for conv, bn in zip(self._convs, self._conv_norms):
x = conv(x)
x = bn(x)
x = self._activation(x)
x = self._deconv(x)
x = self._deconv_bn(x)
x = self._activation(x)
logits = self._mask_regressor(x)
mask_height = height * self._config_dict['upsample_factor']
mask_width = width * self._config_dict['upsample_factor']
if self._config_dict['class_agnostic']:
return tf.reshape(logits, [-1, num_rois, mask_height, mask_width])
else:
logits = tf.reshape(
logits,
[-1, num_rois, mask_height, mask_width,
self._config_dict['num_classes']])
return tf.gather(
logits, tf.cast(roi_classes, dtype=tf.int32), axis=-1, batch_dims=2
)
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
| 17,819 | 40.059908 | 80 | py |
models | models-master/official/vision/modeling/heads/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Heads package definition."""
from official.vision.modeling.heads.dense_prediction_heads import RetinaNetHead
from official.vision.modeling.heads.dense_prediction_heads import RPNHead
from official.vision.modeling.heads.instance_heads import DetectionHead
from official.vision.modeling.heads.instance_heads import MaskHead
from official.vision.modeling.heads.segmentation_heads import MaskScoring
from official.vision.modeling.heads.segmentation_heads import SegmentationHead
| 1,088 | 46.347826 | 79 | py |
models | models-master/official/vision/modeling/heads/dense_prediction_heads.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of dense prediction heads."""
from typing import Any, Dict, List, Mapping, Optional, Union
# Import libraries
import numpy as np
import tensorflow as tf
from official.modeling import tf_utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class RetinaNetHead(tf.keras.layers.Layer):
"""Creates a RetinaNet head."""
def __init__(
self,
min_level: int,
max_level: int,
num_classes: int,
num_anchors_per_location: int,
num_convs: int = 4,
num_filters: int = 256,
attribute_heads: Optional[List[Dict[str, Any]]] = None,
share_classification_heads: bool = False,
use_separable_conv: bool = False,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
num_params_per_anchor: int = 4,
share_level_convs: bool = True,
**kwargs,
):
"""Initializes a RetinaNet head.
Args:
min_level: An `int` number of minimum feature level.
max_level: An `int` number of maximum feature level.
num_classes: An `int` number of classes to predict.
num_anchors_per_location: An `int` number of anchors per pixel location.
num_convs: An `int` number that represents the number of the intermediate
conv layers before the prediction.
num_filters: An `int` number that represents the number of filters of the
intermediate conv layers.
attribute_heads: If not None, a list that contains a dict for each
additional attribute head. Each dict consists of 4 key-value pairs:
`name`, `type` ('regression' or 'classification'), `size` (number of
predicted values for each instance), and `prediction_tower_name`
(optional, specifies shared prediction towers.)
share_classification_heads: A `bool` that indicates whether sharing
weights among the main and attribute classification heads.
use_separable_conv: A `bool` that indicates whether the separable
convolution layers is used.
activation: A `str` that indicates which activation is used, e.g. 'relu',
'swish', etc.
use_sync_bn: A `bool` that indicates whether to use synchronized batch
normalization across different replicas.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
num_params_per_anchor: Number of parameters required to specify an anchor
box. For example, `num_params_per_anchor` would be 4 for axis-aligned
anchor boxes specified by their y-centers, x-centers, heights, and
widths.
share_level_convs: An optional bool to enable sharing convs
across levels for classnet, boxnet, classifier and box regressor.
If True, convs will be shared across all levels.
**kwargs: Additional keyword arguments to be passed.
"""
super().__init__(**kwargs)
self._config_dict = {
'min_level': min_level,
'max_level': max_level,
'num_classes': num_classes,
'num_anchors_per_location': num_anchors_per_location,
'num_convs': num_convs,
'num_filters': num_filters,
'attribute_heads': attribute_heads,
'share_classification_heads': share_classification_heads,
'use_separable_conv': use_separable_conv,
'activation': activation,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
'num_params_per_anchor': num_params_per_anchor,
'share_level_convs': share_level_convs,
}
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation = tf_utils.get_activation(activation)
self._conv_kwargs = {
'filters': self._config_dict['num_filters'],
'kernel_size': 3,
'padding': 'same',
'bias_initializer': tf.zeros_initializer(),
'bias_regularizer': self._config_dict['bias_regularizer'],
}
if not self._config_dict['use_separable_conv']:
self._conv_kwargs.update({
'kernel_initializer': tf.keras.initializers.RandomNormal(stddev=0.01),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
})
self._bn_kwargs = {
'axis': self._bn_axis,
'momentum': self._config_dict['norm_momentum'],
'epsilon': self._config_dict['norm_epsilon'],
}
self._classifier_kwargs = {
'filters': (
self._config_dict['num_classes']
* self._config_dict['num_anchors_per_location']
),
'kernel_size': 3,
'padding': 'same',
'bias_initializer': tf.constant_initializer(-np.log((1 - 0.01) / 0.01)),
'bias_regularizer': self._config_dict['bias_regularizer'],
}
if not self._config_dict['use_separable_conv']:
self._classifier_kwargs.update({
'kernel_initializer': tf.keras.initializers.RandomNormal(stddev=1e-5),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
})
self._box_regressor_kwargs = {
'filters': (
self._config_dict['num_params_per_anchor']
* self._config_dict['num_anchors_per_location']
),
'kernel_size': 3,
'padding': 'same',
'bias_initializer': tf.zeros_initializer(),
'bias_regularizer': self._config_dict['bias_regularizer'],
}
if not self._config_dict['use_separable_conv']:
self._box_regressor_kwargs.update({
'kernel_initializer': tf.keras.initializers.RandomNormal(stddev=1e-5),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
})
if self._config_dict['attribute_heads']:
self._init_attribute_kwargs()
def _conv_kwargs_new_kernel_init(self, conv_kwargs):
if 'kernel_initializer' in conv_kwargs:
conv_kwargs['kernel_initializer'] = tf_utils.clone_initializer(
conv_kwargs['kernel_initializer']
)
return conv_kwargs
def _init_attribute_kwargs(self):
self._attribute_kwargs = []
for att_config in self._config_dict['attribute_heads']:
att_type = att_config['type']
att_size = att_config['size']
att_prediction_tower_name = att_config['prediction_tower_name']
att_predictor_kwargs = {
'filters': att_size * self._config_dict['num_anchors_per_location'],
'kernel_size': 3,
'padding': 'same',
'bias_initializer': tf.zeros_initializer(),
'bias_regularizer': self._config_dict['bias_regularizer'],
}
if att_type == 'regression':
att_predictor_kwargs.update(
{'bias_initializer': tf.zeros_initializer()}
)
elif att_type == 'classification':
att_predictor_kwargs.update(
{
'bias_initializer': tf.constant_initializer(
-np.log((1 - 0.01) / 0.01)
)
}
)
else:
raise ValueError(
'Attribute head type {} not supported.'.format(att_type)
)
if (
att_prediction_tower_name
and self._config_dict['share_classification_heads']
):
raise ValueError(
'share_classification_heads cannot be set as True when'
' att_prediction_tower_name is specified.'
)
if not self._config_dict['use_separable_conv']:
att_predictor_kwargs.update({
'kernel_initializer': tf.keras.initializers.RandomNormal(
stddev=1e-5
),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
})
self._attribute_kwargs.append(att_predictor_kwargs)
def _apply_prediction_tower(self, features, convs, norms) -> tf.Tensor:
x = features
for conv, norm in zip(convs, norms):
x = conv(x)
x = norm(x)
x = self._activation(x)
return x
def _apply_attribute_net(
self, attributes, level, level_idx, this_level_features, classnet_x
):
prediction_tower_output = {}
for att_config in self._config_dict['attribute_heads']:
att_name = att_config['name']
att_type = att_config['type']
if (
self._config_dict['share_classification_heads']
and att_type == 'classification'
):
attributes[att_name][str(level)] = self._att_predictors[att_name](
classnet_x
)
else:
def _apply_attribute_prediction_tower(
atttribute_name, features, feature_level
):
return self._apply_prediction_tower(
features,
self._att_convs[atttribute_name],
self._att_norms[atttribute_name][feature_level],
)
prediction_tower_name = att_config['prediction_tower_name']
if not prediction_tower_name:
attributes[att_name][str(level)] = self._att_predictors[att_name](
_apply_attribute_prediction_tower(
att_name, this_level_features, level_idx
)
)
else:
if prediction_tower_name not in prediction_tower_output:
prediction_tower_output[prediction_tower_name] = (
_apply_attribute_prediction_tower(
att_name, this_level_features, level_idx
)
)
attributes[att_name][str(level)] = self._att_predictors[att_name](
prediction_tower_output[prediction_tower_name]
)
def _build_prediction_tower(
self, net_name, predictor_name, conv_op, bn_op, predictor_kwargs
):
"""Builds the prediction tower. Convs across levels can be shared or not."""
convs = []
norms = []
for level in range(
self._config_dict['min_level'], self._config_dict['max_level'] + 1
):
if not self._config_dict['share_level_convs']:
this_level_convs = []
this_level_norms = []
for i in range(self._config_dict['num_convs']):
conv_kwargs = self._conv_kwargs_new_kernel_init(self._conv_kwargs)
if not self._config_dict['share_level_convs']:
# Do not share convs.
this_level_convs.append(
conv_op(name=f'{net_name}-conv_{level}_{i}', **conv_kwargs)
)
elif level == self._config_dict['min_level']:
convs.append(conv_op(name=f'{net_name}-conv_{i}', **conv_kwargs))
this_level_norms.append(
bn_op(name=f'{net_name}-conv-norm_{level}_{i}', **self._bn_kwargs)
)
norms.append(this_level_norms)
if not self._config_dict['share_level_convs']:
convs.append(this_level_convs)
# Create predictors after additional convs.
if self._config_dict['share_level_convs']:
predictors = conv_op(name=predictor_name, **predictor_kwargs)
else:
predictors = []
for level in range(
self._config_dict['min_level'], self._config_dict['max_level'] + 1
):
predictors.append(
conv_op(name=f'{predictor_name}-{level}', **predictor_kwargs)
)
return convs, norms, predictors
def _build_attribute_net(self, conv_op, bn_op):
self._att_predictors = {}
self._att_convs = {}
self._att_norms = {}
for att_config, att_predictor_kwargs in zip(
self._config_dict['attribute_heads'], self._attribute_kwargs
):
att_name = att_config['name']
att_num_convs = (
att_config.get('num_convs') or self._config_dict['num_convs']
)
att_num_filters = (
att_config.get('num_filters') or self._config_dict['num_filters']
)
if att_num_convs < 0:
raise ValueError(f'Invalid `num_convs` {att_num_convs} for {att_name}.')
if att_num_filters < 0:
raise ValueError(
f'Invalid `num_filters` {att_num_filters} for {att_name}.'
)
att_conv_kwargs = self._conv_kwargs.copy()
att_conv_kwargs['filters'] = att_num_filters
att_convs_i = []
att_norms_i = []
# Build conv and norm layers.
for level in range(
self._config_dict['min_level'], self._config_dict['max_level'] + 1
):
this_level_att_norms = []
for i in range(att_num_convs):
if level == self._config_dict['min_level']:
att_conv_name = '{}-conv_{}'.format(att_name, i)
att_convs_i.append(conv_op(name=att_conv_name, **att_conv_kwargs))
att_norm_name = '{}-conv-norm_{}_{}'.format(att_name, level, i)
this_level_att_norms.append(
bn_op(name=att_norm_name, **self._bn_kwargs)
)
att_norms_i.append(this_level_att_norms)
self._att_convs[att_name] = att_convs_i
self._att_norms[att_name] = att_norms_i
# Build the final prediction layer.
self._att_predictors[att_name] = conv_op(
name='{}_attributes'.format(att_name), **att_predictor_kwargs
)
def build(self, input_shape: Union[tf.TensorShape, List[tf.TensorShape]]):
"""Creates the variables of the head."""
conv_op = (
tf.keras.layers.SeparableConv2D
if self._config_dict['use_separable_conv']
else tf.keras.layers.Conv2D
)
bn_op = (
tf.keras.layers.experimental.SyncBatchNormalization
if self._config_dict['use_sync_bn']
else tf.keras.layers.BatchNormalization
)
# Class net.
self._cls_convs, self._cls_norms, self._classifier = (
self._build_prediction_tower(
'classnet', 'scores', conv_op, bn_op, self._classifier_kwargs
)
)
# Box net.
self._box_convs, self._box_norms, self._box_regressor = (
self._build_prediction_tower(
'boxnet', 'boxes', conv_op, bn_op, self._box_regressor_kwargs
)
)
# Attribute learning nets.
if self._config_dict['attribute_heads']:
self._build_attribute_net(conv_op, bn_op)
super().build(input_shape)
def call(self, features: Mapping[str, tf.Tensor]):
"""Forward pass of the RetinaNet head.
Args:
features: A `dict` of `tf.Tensor` where
- key: A `str` of the level of the multilevel features.
- values: A `tf.Tensor`, the feature map tensors, whose shape is
[batch, height_l, width_l, channels].
Returns:
scores: A `dict` of `tf.Tensor` which includes scores of the predictions.
- key: A `str` of the level of the multilevel predictions.
- values: A `tf.Tensor` of the box scores predicted from a particular
feature level, whose shape is
[batch, height_l, width_l, num_classes * num_anchors_per_location].
boxes: A `dict` of `tf.Tensor` which includes coordinates of the
predictions.
- key: A `str` of the level of the multilevel predictions.
- values: A `tf.Tensor` of the box scores predicted from a particular
feature level, whose shape is
[batch, height_l, width_l,
num_params_per_anchor * num_anchors_per_location].
attributes: a dict of (attribute_name, attribute_prediction). Each
`attribute_prediction` is a dict of:
- key: `str`, the level of the multilevel predictions.
- values: `Tensor`, the box scores predicted from a particular feature
level, whose shape is
[batch, height_l, width_l,
attribute_size * num_anchors_per_location].
Can be an empty dictionary if no attribute learning is required.
"""
scores = {}
boxes = {}
if self._config_dict['attribute_heads']:
attributes = {
att_config['name']: {}
for att_config in self._config_dict['attribute_heads']
}
else:
attributes = {}
for i, level in enumerate(
range(self._config_dict['min_level'],
self._config_dict['max_level'] + 1)):
this_level_features = features[str(level)]
if self._config_dict['share_level_convs']:
cls_convs = self._cls_convs
box_convs = self._box_convs
classifier = self._classifier
box_regressor = self._box_regressor
else:
cls_convs = self._cls_convs[i]
box_convs = self._box_convs[i]
classifier = self._classifier[i]
box_regressor = self._box_regressor[i]
# Apply class net.
x = self._apply_prediction_tower(
this_level_features, cls_convs, self._cls_norms[i]
)
scores[str(level)] = classifier(x)
classnet_x = x
# Apply box net.
x = self._apply_prediction_tower(
this_level_features, box_convs, self._box_norms[i]
)
boxes[str(level)] = box_regressor(x)
# Apply attribute nets.
if self._config_dict['attribute_heads']:
self._apply_attribute_net(
attributes, level, i, this_level_features, classnet_x
)
return scores, boxes, attributes
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
@tf.keras.utils.register_keras_serializable(package='Vision')
class RPNHead(tf.keras.layers.Layer):
"""Creates a Region Proposal Network (RPN) head."""
def __init__(
self,
min_level: int,
max_level: int,
num_anchors_per_location: int,
num_convs: int = 1,
num_filters: int = 256,
use_separable_conv: bool = False,
activation: str = 'relu',
use_sync_bn: bool = False,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
**kwargs):
"""Initializes a Region Proposal Network head.
Args:
min_level: An `int` number of minimum feature level.
max_level: An `int` number of maximum feature level.
num_anchors_per_location: An `int` number of number of anchors per pixel
location.
num_convs: An `int` number that represents the number of the intermediate
convolution layers before the prediction.
num_filters: An `int` number that represents the number of filters of the
intermediate convolution layers.
use_separable_conv: A `bool` that indicates whether the separable
convolution layers is used.
activation: A `str` that indicates which activation is used, e.g. 'relu',
'swish', etc.
use_sync_bn: A `bool` that indicates whether to use synchronized batch
normalization across different replicas.
norm_momentum: A `float` of normalization momentum for the moving average.
norm_epsilon: A `float` added to variance to avoid dividing by zero.
kernel_regularizer: A `tf.keras.regularizers.Regularizer` object for
Conv2D. Default is None.
bias_regularizer: A `tf.keras.regularizers.Regularizer` object for Conv2D.
**kwargs: Additional keyword arguments to be passed.
"""
super(RPNHead, self).__init__(**kwargs)
self._config_dict = {
'min_level': min_level,
'max_level': max_level,
'num_anchors_per_location': num_anchors_per_location,
'num_convs': num_convs,
'num_filters': num_filters,
'use_separable_conv': use_separable_conv,
'activation': activation,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
}
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation = tf_utils.get_activation(activation)
def build(self, input_shape):
"""Creates the variables of the head."""
conv_op = (tf.keras.layers.SeparableConv2D
if self._config_dict['use_separable_conv']
else tf.keras.layers.Conv2D)
conv_kwargs = {
'filters': self._config_dict['num_filters'],
'kernel_size': 3,
'padding': 'same',
'bias_initializer': tf.zeros_initializer(),
'bias_regularizer': self._config_dict['bias_regularizer'],
}
if not self._config_dict['use_separable_conv']:
conv_kwargs.update({
'kernel_initializer': tf.keras.initializers.RandomNormal(
stddev=0.01),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
})
bn_op = (tf.keras.layers.experimental.SyncBatchNormalization
if self._config_dict['use_sync_bn']
else tf.keras.layers.BatchNormalization)
bn_kwargs = {
'axis': self._bn_axis,
'momentum': self._config_dict['norm_momentum'],
'epsilon': self._config_dict['norm_epsilon'],
}
self._convs = []
self._norms = []
for level in range(
self._config_dict['min_level'], self._config_dict['max_level'] + 1):
this_level_norms = []
for i in range(self._config_dict['num_convs']):
if level == self._config_dict['min_level']:
conv_name = 'rpn-conv_{}'.format(i)
if 'kernel_initializer' in conv_kwargs:
conv_kwargs['kernel_initializer'] = tf_utils.clone_initializer(
conv_kwargs['kernel_initializer'])
self._convs.append(conv_op(name=conv_name, **conv_kwargs))
norm_name = 'rpn-conv-norm_{}_{}'.format(level, i)
this_level_norms.append(bn_op(name=norm_name, **bn_kwargs))
self._norms.append(this_level_norms)
classifier_kwargs = {
'filters': self._config_dict['num_anchors_per_location'],
'kernel_size': 1,
'padding': 'valid',
'bias_initializer': tf.zeros_initializer(),
'bias_regularizer': self._config_dict['bias_regularizer'],
}
if not self._config_dict['use_separable_conv']:
classifier_kwargs.update({
'kernel_initializer': tf.keras.initializers.RandomNormal(
stddev=1e-5),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
})
self._classifier = conv_op(name='rpn-scores', **classifier_kwargs)
box_regressor_kwargs = {
'filters': 4 * self._config_dict['num_anchors_per_location'],
'kernel_size': 1,
'padding': 'valid',
'bias_initializer': tf.zeros_initializer(),
'bias_regularizer': self._config_dict['bias_regularizer'],
}
if not self._config_dict['use_separable_conv']:
box_regressor_kwargs.update({
'kernel_initializer': tf.keras.initializers.RandomNormal(
stddev=1e-5),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
})
self._box_regressor = conv_op(name='rpn-boxes', **box_regressor_kwargs)
super(RPNHead, self).build(input_shape)
def call(self, features: Mapping[str, tf.Tensor]):
"""Forward pass of the RPN head.
Args:
features: A `dict` of `tf.Tensor` where
- key: A `str` of the level of the multilevel features.
- values: A `tf.Tensor`, the feature map tensors, whose shape is [batch,
height_l, width_l, channels].
Returns:
scores: A `dict` of `tf.Tensor` which includes scores of the predictions.
- key: A `str` of the level of the multilevel predictions.
- values: A `tf.Tensor` of the box scores predicted from a particular
feature level, whose shape is
[batch, height_l, width_l, num_classes * num_anchors_per_location].
boxes: A `dict` of `tf.Tensor` which includes coordinates of the
predictions.
- key: A `str` of the level of the multilevel predictions.
- values: A `tf.Tensor` of the box scores predicted from a particular
feature level, whose shape is
[batch, height_l, width_l, 4 * num_anchors_per_location].
"""
scores = {}
boxes = {}
for i, level in enumerate(
range(self._config_dict['min_level'],
self._config_dict['max_level'] + 1)):
x = features[str(level)]
for conv, norm in zip(self._convs, self._norms[i]):
x = conv(x)
x = norm(x)
x = self._activation(x)
scores[str(level)] = self._classifier(x)
boxes[str(level)] = self._box_regressor(x)
return scores, boxes
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
| 25,552 | 37.195815 | 80 | py |
models | models-master/official/vision/modeling/heads/segmentation_heads_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for segmentation_heads.py."""
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.vision.modeling.heads import segmentation_heads
class SegmentationHeadTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(2, 'pyramid_fusion', None, None),
(3, 'pyramid_fusion', None, None),
(2, 'panoptic_fpn_fusion', 2, 5),
(2, 'panoptic_fpn_fusion', 2, 6),
(3, 'panoptic_fpn_fusion', 3, 5),
(3, 'panoptic_fpn_fusion', 3, 6),
(3, 'deeplabv3plus', 3, 6),
(3, 'deeplabv3plus_sum_to_merge', 3, 6))
def test_forward(self, level, feature_fusion,
decoder_min_level, decoder_max_level):
backbone_features = {
'3': np.random.rand(2, 128, 128, 16),
'4': np.random.rand(2, 64, 64, 16),
'5': np.random.rand(2, 32, 32, 16),
}
decoder_features = {
'3': np.random.rand(2, 128, 128, 64),
'4': np.random.rand(2, 64, 64, 64),
'5': np.random.rand(2, 32, 32, 64),
'6': np.random.rand(2, 16, 16, 64),
}
if feature_fusion == 'panoptic_fpn_fusion':
backbone_features['2'] = np.random.rand(2, 256, 256, 16)
decoder_features['2'] = np.random.rand(2, 256, 256, 64)
head = segmentation_heads.SegmentationHead(
num_classes=10,
level=level,
low_level=decoder_min_level,
low_level_num_filters=64,
feature_fusion=feature_fusion,
decoder_min_level=decoder_min_level,
decoder_max_level=decoder_max_level,
num_decoder_filters=64)
logits = head((backbone_features, decoder_features))
if str(level) in decoder_features:
self.assertAllEqual(logits.numpy().shape, [
2, decoder_features[str(level)].shape[1],
decoder_features[str(level)].shape[2], 10
])
def test_serialize_deserialize(self):
head = segmentation_heads.SegmentationHead(num_classes=10, level=3)
config = head.get_config()
new_head = segmentation_heads.SegmentationHead.from_config(config)
self.assertAllEqual(head.get_config(), new_head.get_config())
class MaskScoringHeadTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(1, 1, 64, [4, 4]),
(2, 1, 64, [4, 4]),
(3, 1, 64, [4, 4]),
(1, 2, 32, [8, 8]),
(2, 2, 32, [8, 8]),
(3, 2, 32, [8, 8]),)
def test_forward(self, num_convs, num_fcs,
num_filters, fc_input_size):
features = np.random.rand(2, 64, 64, 16)
head = segmentation_heads.MaskScoring(
num_classes=2,
num_convs=num_convs,
num_filters=num_filters,
fc_dims=128,
num_fcs=num_fcs,
fc_input_size=fc_input_size)
scores = head(features)
self.assertAllEqual(scores.numpy().shape, [2, 2])
def test_serialize_deserialize(self):
head = segmentation_heads.MaskScoring(
num_classes=2, fc_input_size=[4, 4], fc_dims=128)
config = head.get_config()
new_head = segmentation_heads.MaskScoring.from_config(config)
self.assertAllEqual(head.get_config(), new_head.get_config())
if __name__ == '__main__':
tf.test.main()
| 3,802 | 32.955357 | 74 | py |
models | models-master/official/vision/tasks/video_classification.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video classification task definition."""
from typing import Any, Optional, List, Tuple
from absl import logging
import tensorflow as tf
from official.core import base_task
from official.core import task_factory
from official.modeling import tf_utils
from official.vision.configs import video_classification as exp_cfg
from official.vision.dataloaders import input_reader_factory
from official.vision.dataloaders import video_input
from official.vision.modeling import factory_3d
from official.vision.ops import augment
@task_factory.register_task_cls(exp_cfg.VideoClassificationTask)
class VideoClassificationTask(base_task.Task):
"""A task for video classification."""
def _get_num_classes(self):
"""Gets the number of classes."""
return self.task_config.train_data.num_classes
def _get_feature_shape(self):
"""Get the common feature shape for train and eval."""
return [
d1 if d1 == d2 else None
for d1, d2 in zip(self.task_config.train_data.feature_shape,
self.task_config.validation_data.feature_shape)
]
def _get_num_test_views(self):
"""Gets number of views for test."""
num_test_clips = self.task_config.validation_data.num_test_clips
num_test_crops = self.task_config.validation_data.num_test_crops
num_test_views = num_test_clips * num_test_crops
return num_test_views
def _is_multilabel(self):
"""If the label is multi-labels."""
return self.task_config.train_data.is_multilabel
def build_model(self):
"""Builds video classification model."""
common_input_shape = self._get_feature_shape()
input_specs = tf.keras.layers.InputSpec(shape=[None] + common_input_shape)
logging.info('Build model input %r', common_input_shape)
l2_weight_decay = float(self.task_config.losses.l2_weight_decay)
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (tf.keras.regularizers.l2(
l2_weight_decay / 2.0) if l2_weight_decay else None)
model = factory_3d.build_model(
self.task_config.model.model_type,
input_specs=input_specs,
model_config=self.task_config.model,
num_classes=self._get_num_classes(),
l2_regularizer=l2_regularizer)
if self.task_config.freeze_backbone:
logging.info('Freezing model backbone.')
model.backbone.trainable = False
return model
def initialize(self, model: tf.keras.Model):
"""Loads pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if self.task_config.init_checkpoint_modules == 'all':
ckpt = tf.train.Checkpoint(model=model)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
elif self.task_config.init_checkpoint_modules == 'backbone':
ckpt = tf.train.Checkpoint(backbone=model.backbone)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
raise ValueError(
"Only 'all' or 'backbone' can be used to initialize the model.")
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def _get_dataset_fn(self, params):
if params.file_type == 'tfrecord':
return tf.data.TFRecordDataset
else:
raise ValueError('Unknown input file type {!r}'.format(params.file_type))
def _get_decoder_fn(self, params):
if params.tfds_name:
decoder = video_input.VideoTfdsDecoder(
image_key=params.image_field_key, label_key=params.label_field_key)
else:
decoder = video_input.Decoder(
image_key=params.image_field_key, label_key=params.label_field_key)
if self.task_config.train_data.output_audio:
assert self.task_config.train_data.audio_feature, 'audio feature is empty'
decoder.add_feature(self.task_config.train_data.audio_feature,
tf.io.VarLenFeature(dtype=tf.float32))
return decoder.decode
def build_inputs(self,
params: exp_cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None):
"""Builds classification input."""
parser = video_input.Parser(
input_params=params,
image_key=params.image_field_key,
label_key=params.label_field_key)
postprocess_fn = video_input.PostBatchProcessor(params)
if params.mixup_and_cutmix is not None:
def mixup_and_cutmix(features, labels):
augmenter = augment.MixupAndCutmix(
mixup_alpha=params.mixup_and_cutmix.mixup_alpha,
cutmix_alpha=params.mixup_and_cutmix.cutmix_alpha,
prob=params.mixup_and_cutmix.prob,
label_smoothing=params.mixup_and_cutmix.label_smoothing,
num_classes=self._get_num_classes())
features['image'], labels = augmenter(features['image'], labels)
return features, labels
postprocess_fn = mixup_and_cutmix
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=self._get_dataset_fn(params),
decoder_fn=self._get_decoder_fn(params),
parser_fn=parser.parse_fn(params.is_training),
postprocess_fn=postprocess_fn)
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self,
labels: Any,
model_outputs: Any,
aux_losses: Optional[Any] = None):
"""Sparse categorical cross entropy loss.
Args:
labels: labels.
model_outputs: Output logits of the classifier.
aux_losses: auxiliarly loss tensors, i.e. `losses` in keras.Model.
Returns:
The total loss tensor.
"""
all_losses = {}
losses_config = self.task_config.losses
total_loss = None
if self._is_multilabel():
entropy = -tf.reduce_mean(
tf.reduce_sum(model_outputs * tf.math.log(model_outputs + 1e-8), -1))
total_loss = tf.keras.losses.binary_crossentropy(
labels, model_outputs, from_logits=False)
all_losses.update({
'class_loss': total_loss,
'entropy': entropy,
})
else:
if losses_config.one_hot:
total_loss = tf.keras.losses.categorical_crossentropy(
labels,
model_outputs,
from_logits=False,
label_smoothing=losses_config.label_smoothing)
else:
total_loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, model_outputs, from_logits=False)
total_loss = tf_utils.safe_mean(total_loss)
all_losses.update({
'class_loss': total_loss,
})
if aux_losses:
all_losses.update({
'reg_loss': aux_losses,
})
total_loss += tf.add_n(aux_losses)
all_losses[self.loss] = total_loss
return all_losses
def build_metrics(self, training: bool = True):
"""Gets streaming metrics for training/validation."""
if self.task_config.losses.one_hot:
metrics = [
tf.keras.metrics.CategoricalAccuracy(name='accuracy'),
tf.keras.metrics.TopKCategoricalAccuracy(k=1, name='top_1_accuracy'),
tf.keras.metrics.TopKCategoricalAccuracy(k=5, name='top_5_accuracy')
]
if self._is_multilabel():
metrics.append(
tf.keras.metrics.AUC(
curve='ROC', multi_label=self._is_multilabel(), name='ROC-AUC'))
metrics.append(
tf.keras.metrics.RecallAtPrecision(
0.95, name='RecallAtPrecision95'))
metrics.append(
tf.keras.metrics.AUC(
curve='PR', multi_label=self._is_multilabel(), name='PR-AUC'))
if self.task_config.metrics.use_per_class_recall:
for i in range(self._get_num_classes()):
metrics.append(
tf.keras.metrics.Recall(class_id=i, name=f'recall-{i}'))
else:
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'),
tf.keras.metrics.SparseTopKCategoricalAccuracy(
k=1, name='top_1_accuracy'),
tf.keras.metrics.SparseTopKCategoricalAccuracy(
k=5, name='top_5_accuracy')
]
return metrics
def process_metrics(self, metrics: List[Any], labels: Any,
model_outputs: Any):
"""Process and update metrics.
Called when using custom training loop API.
Args:
metrics: a nested structure of metrics objects. The return of function
self.build_metrics.
labels: a tensor or a nested structure of tensors.
model_outputs: a tensor or a nested structure of tensors. For example,
output of the keras model built by self.build_model.
"""
for metric in metrics:
metric.update_state(labels, model_outputs)
def train_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[List[Any]] = None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
input_partition_dims = self.task_config.train_input_partition_dims
if input_partition_dims:
strategy = tf.distribute.get_strategy()
features['image'] = strategy.experimental_split_to_logical_devices(
features['image'], input_partition_dims)
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(features, training=True)
# Casting output layer as float32 is necessary when mixed_precision is
# mixed_float16 or mixed_bfloat16 to ensure output is casted as float32.
outputs = tf.nest.map_structure(
lambda x: tf.cast(x, tf.float32), outputs)
# Computes per-replica loss.
if self._is_multilabel():
outputs = tf.nest.map_structure(tf.math.sigmoid, outputs)
else:
outputs = tf.nest.map_structure(tf.math.softmax, outputs)
all_losses = self.build_losses(
model_outputs=outputs, labels=labels, aux_losses=model.losses)
loss = all_losses[self.loss]
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(
optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient before apply_gradients when LossScaleOptimizer is
# used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = all_losses
if metrics:
self.process_metrics(metrics, labels, outputs)
logs.update({m.name: m.result() for m in metrics})
elif model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in model.metrics})
return logs
def validation_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
metrics: Optional[List[Any]] = None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
input_partition_dims = self.task_config.eval_input_partition_dims
if input_partition_dims:
strategy = tf.distribute.get_strategy()
features['image'] = strategy.experimental_split_to_logical_devices(
features['image'], input_partition_dims)
outputs = self.inference_step(features, model)
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
logs = self.build_losses(model_outputs=outputs, labels=labels,
aux_losses=model.losses)
if metrics:
self.process_metrics(metrics, labels, outputs)
logs.update({m.name: m.result() for m in metrics})
elif model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in model.metrics})
return logs
def inference_step(self, features: tf.Tensor, model: tf.keras.Model):
"""Performs the forward step."""
outputs = model(features, training=False)
if self._is_multilabel():
outputs = tf.nest.map_structure(tf.math.sigmoid, outputs)
else:
outputs = tf.nest.map_structure(tf.math.softmax, outputs)
num_test_views = self._get_num_test_views()
if num_test_views > 1:
# Averaging output probabilities across multiples views.
outputs = tf.reshape(outputs, [-1, num_test_views, outputs.shape[-1]])
outputs = tf.reduce_mean(outputs, axis=1)
return outputs
| 14,308 | 37.672973 | 80 | py |
models | models-master/official/vision/tasks/retinanet.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RetinaNet task definition."""
from typing import Any, List, Mapping, Optional, Tuple
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import base_task
from official.core import task_factory
from official.vision.configs import retinanet as exp_cfg
from official.vision.dataloaders import input_reader
from official.vision.dataloaders import input_reader_factory
from official.vision.dataloaders import retinanet_input
from official.vision.dataloaders import tf_example_decoder
from official.vision.dataloaders import tfds_factory
from official.vision.dataloaders import tf_example_label_map_decoder
from official.vision.evaluation import coco_evaluator
from official.vision.losses import focal_loss
from official.vision.losses import loss_utils
from official.vision.modeling import factory
from official.vision.utils.object_detection import visualization_utils
@task_factory.register_task_cls(exp_cfg.RetinaNetTask)
class RetinaNetTask(base_task.Task):
"""A single-replica view of training procedure.
RetinaNet task provides artifacts for training/evalution procedures, including
loading/iterating over Datasets, initializing the model, calculating the loss,
post-processing, and customized metrics with reduction.
"""
def build_model(self):
"""Build RetinaNet model."""
input_specs = tf.keras.layers.InputSpec(
shape=[None] + self.task_config.model.input_size)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (tf.keras.regularizers.l2(
l2_weight_decay / 2.0) if l2_weight_decay else None)
model = factory.build_retinanet(
input_specs=input_specs,
model_config=self.task_config.model,
l2_regularizer=l2_regularizer)
if self.task_config.freeze_backbone:
model.backbone.trainable = False
return model
def initialize(self, model: tf.keras.Model):
"""Loading pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if self.task_config.init_checkpoint_modules == 'all':
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
ckpt_items = {}
if 'backbone' in self.task_config.init_checkpoint_modules:
ckpt_items.update(backbone=model.backbone)
if 'decoder' in self.task_config.init_checkpoint_modules:
ckpt_items.update(decoder=model.decoder)
ckpt = tf.train.Checkpoint(**ckpt_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(self,
params: exp_cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None):
"""Build input dataset."""
if params.tfds_name:
decoder = tfds_factory.get_detection_decoder(params.tfds_name)
else:
decoder_cfg = params.decoder.get()
if params.decoder.type == 'simple_decoder':
decoder = tf_example_decoder.TfExampleDecoder(
regenerate_source_id=decoder_cfg.regenerate_source_id,
attribute_names=decoder_cfg.attribute_names,
)
elif params.decoder.type == 'label_map_decoder':
decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(
label_map=decoder_cfg.label_map,
regenerate_source_id=decoder_cfg.regenerate_source_id)
else:
raise ValueError('Unknown decoder type: {}!'.format(
params.decoder.type))
parser = retinanet_input.Parser(
output_size=self.task_config.model.input_size[:2],
min_level=self.task_config.model.min_level,
max_level=self.task_config.model.max_level,
num_scales=self.task_config.model.anchor.num_scales,
aspect_ratios=self.task_config.model.anchor.aspect_ratios,
anchor_size=self.task_config.model.anchor.anchor_size,
dtype=params.dtype,
match_threshold=params.parser.match_threshold,
unmatched_threshold=params.parser.unmatched_threshold,
aug_type=params.parser.aug_type,
aug_rand_hflip=params.parser.aug_rand_hflip,
aug_scale_min=params.parser.aug_scale_min,
aug_scale_max=params.parser.aug_scale_max,
skip_crowd_during_training=params.parser.skip_crowd_during_training,
max_num_instances=params.parser.max_num_instances)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
combine_fn=input_reader.create_combine_fn(params),
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def build_attribute_loss(self,
attribute_heads: List[exp_cfg.AttributeHead],
outputs: Mapping[str, Any],
labels: Mapping[str, Any],
box_sample_weight: tf.Tensor) -> float:
"""Computes attribute loss.
Args:
attribute_heads: a list of attribute head configs.
outputs: RetinaNet model outputs.
labels: RetinaNet labels.
box_sample_weight: normalized bounding box sample weights.
Returns:
Attribute loss of all attribute heads.
"""
params = self.task_config
attribute_loss = 0.0
for head in attribute_heads:
if head.name not in labels['attribute_targets']:
raise ValueError(f'Attribute {head.name} not found in label targets.')
if head.name not in outputs['attribute_outputs']:
raise ValueError(f'Attribute {head.name} not found in model outputs.')
if head.type == 'regression':
y_true_att = loss_utils.multi_level_flatten(
labels['attribute_targets'][head.name], last_dim=head.size
)
y_pred_att = loss_utils.multi_level_flatten(
outputs['attribute_outputs'][head.name], last_dim=head.size
)
att_loss_fn = tf.keras.losses.Huber(
1.0, reduction=tf.keras.losses.Reduction.SUM)
att_loss = att_loss_fn(
y_true=y_true_att,
y_pred=y_pred_att,
sample_weight=box_sample_weight)
elif head.type == 'classification':
y_true_att = loss_utils.multi_level_flatten(
labels['attribute_targets'][head.name], last_dim=None
)
y_true_att = tf.one_hot(y_true_att, head.size)
y_pred_att = loss_utils.multi_level_flatten(
outputs['attribute_outputs'][head.name], last_dim=head.size
)
cls_loss_fn = focal_loss.FocalLoss(
alpha=params.losses.focal_loss_alpha,
gamma=params.losses.focal_loss_gamma,
reduction=tf.keras.losses.Reduction.SUM,
)
att_loss = cls_loss_fn(
y_true=y_true_att,
y_pred=y_pred_att,
sample_weight=box_sample_weight,
)
else:
raise ValueError(f'Attribute type {head.type} not supported.')
attribute_loss += att_loss
return attribute_loss
def build_losses(self,
outputs: Mapping[str, Any],
labels: Mapping[str, Any],
aux_losses: Optional[Any] = None):
"""Build RetinaNet losses."""
params = self.task_config
attribute_heads = self.task_config.model.head.attribute_heads
cls_loss_fn = focal_loss.FocalLoss(
alpha=params.losses.focal_loss_alpha,
gamma=params.losses.focal_loss_gamma,
reduction=tf.keras.losses.Reduction.SUM)
box_loss_fn = tf.keras.losses.Huber(
params.losses.huber_loss_delta, reduction=tf.keras.losses.Reduction.SUM)
# Sums all positives in a batch for normalization and avoids zero
# num_positives_sum, which would lead to inf loss during training
cls_sample_weight = labels['cls_weights']
box_sample_weight = labels['box_weights']
num_positives = tf.reduce_sum(box_sample_weight) + 1.0
cls_sample_weight = cls_sample_weight / num_positives
box_sample_weight = box_sample_weight / num_positives
y_true_cls = loss_utils.multi_level_flatten(
labels['cls_targets'], last_dim=None)
y_true_cls = tf.one_hot(y_true_cls, params.model.num_classes)
y_pred_cls = loss_utils.multi_level_flatten(
outputs['cls_outputs'], last_dim=params.model.num_classes)
y_true_box = loss_utils.multi_level_flatten(
labels['box_targets'], last_dim=4)
y_pred_box = loss_utils.multi_level_flatten(
outputs['box_outputs'], last_dim=4)
cls_loss = cls_loss_fn(
y_true=y_true_cls, y_pred=y_pred_cls, sample_weight=cls_sample_weight)
box_loss = box_loss_fn(
y_true=y_true_box, y_pred=y_pred_box, sample_weight=box_sample_weight)
model_loss = cls_loss + params.losses.box_loss_weight * box_loss
if attribute_heads:
model_loss += self.build_attribute_loss(attribute_heads, outputs, labels,
box_sample_weight)
total_loss = model_loss
if aux_losses:
reg_loss = tf.reduce_sum(aux_losses)
total_loss = model_loss + reg_loss
total_loss = params.losses.loss_weight * total_loss
return total_loss, cls_loss, box_loss, model_loss
def build_metrics(self, training: bool = True):
"""Build detection metrics."""
metrics = []
metric_names = ['total_loss', 'cls_loss', 'box_loss', 'model_loss']
for name in metric_names:
metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))
if not training:
if (
self.task_config.validation_data.tfds_name
and self.task_config.annotation_file
):
raise ValueError(
"Can't evaluate using annotation file when TFDS is used."
)
if self._task_config.use_coco_metrics:
self.coco_metric = coco_evaluator.COCOEvaluator(
annotation_file=self.task_config.annotation_file,
include_mask=False,
per_category_metrics=self.task_config.per_category_metrics,
max_num_eval_detections=self.task_config.max_num_eval_detections,
)
if self._task_config.use_wod_metrics:
# To use Waymo open dataset metrics, please install one of the pip
# package `waymo-open-dataset-tf-*` from
# https://github.com/waymo-research/waymo-open-dataset/blob/master/docs/quick_start.md#use-pre-compiled-pippip3-packages-for-linux
# Note that the package is built with specific tensorflow version and
# will produce error if it does not match the tf version that is
# currently used.
try:
from official.vision.evaluation import wod_detection_evaluator # pylint: disable=g-import-not-at-top
except ModuleNotFoundError:
logging.error('waymo-open-dataset should be installed to enable Waymo'
' evaluator.')
raise
self.wod_metric = wod_detection_evaluator.WOD2dDetectionEvaluator()
return metrics
def train_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[List[Any]] = None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(features, training=True)
outputs = tf.nest.map_structure(
lambda x: tf.cast(x, tf.float32), outputs)
# Computes per-replica loss.
loss, cls_loss, box_loss, model_loss = self.build_losses(
outputs=outputs, labels=labels, aux_losses=model.losses)
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient when LossScaleOptimizer is used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
all_losses = {
'total_loss': loss,
'cls_loss': cls_loss,
'box_loss': box_loss,
'model_loss': model_loss,
}
if metrics:
for m in metrics:
m.update_state(all_losses[m.name])
logs.update({m.name: m.result()})
return logs
def validation_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
metrics: Optional[List[Any]] = None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
outputs = model(features, anchor_boxes=labels['anchor_boxes'],
image_shape=labels['image_info'][:, 1, :],
training=False)
loss, cls_loss, box_loss, model_loss = self.build_losses(
outputs=outputs, labels=labels, aux_losses=model.losses)
logs = {self.loss: loss}
all_losses = {
'total_loss': loss,
'cls_loss': cls_loss,
'box_loss': box_loss,
'model_loss': model_loss,
}
if self._task_config.use_coco_metrics:
coco_model_outputs = {
'detection_boxes': outputs['detection_boxes'],
'detection_scores': outputs['detection_scores'],
'detection_classes': outputs['detection_classes'],
'num_detections': outputs['num_detections'],
'source_id': labels['groundtruths']['source_id'],
'image_info': labels['image_info']
}
logs.update(
{self.coco_metric.name: (labels['groundtruths'], coco_model_outputs)})
if self.task_config.use_wod_metrics:
wod_model_outputs = {
'detection_boxes': outputs['detection_boxes'],
'detection_scores': outputs['detection_scores'],
'detection_classes': outputs['detection_classes'],
'num_detections': outputs['num_detections'],
'source_id': labels['groundtruths']['source_id'],
'image_info': labels['image_info']
}
logs.update(
{self.wod_metric.name: (labels['groundtruths'], wod_model_outputs)})
if metrics:
for m in metrics:
m.update_state(all_losses[m.name])
logs.update({m.name: m.result()})
if (
hasattr(self.task_config, 'allow_image_summary')
and self.task_config.allow_image_summary
):
logs.update(
{'visualization': (tf.cast(features, dtype=tf.float32), outputs)}
)
return logs
def aggregate_logs(self, state=None, step_outputs=None):
if self._task_config.use_coco_metrics:
if state is None:
self.coco_metric.reset_states()
self.coco_metric.update_state(step_outputs[self.coco_metric.name][0],
step_outputs[self.coco_metric.name][1])
if self._task_config.use_wod_metrics:
if state is None:
self.wod_metric.reset_states()
self.wod_metric.update_state(step_outputs[self.wod_metric.name][0],
step_outputs[self.wod_metric.name][1])
if 'visualization' in step_outputs:
# Update detection state for writing summary if there are artifacts for
# visualization.
if state is None:
state = {}
state.update(visualization_utils.update_detection_state(step_outputs))
if state is None:
# Create an arbitrary state to indicate it's not the first step in the
# following calls to this function.
state = True
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
logs = {}
if self._task_config.use_coco_metrics:
logs.update(self.coco_metric.result())
if self._task_config.use_wod_metrics:
logs.update(self.wod_metric.result())
# Add visualization for summary.
if isinstance(aggregated_logs, dict) and 'image' in aggregated_logs:
validation_outputs = visualization_utils.visualize_outputs(
logs=aggregated_logs, task_config=self.task_config
)
logs.update(validation_outputs)
return logs
| 18,008 | 37.980519 | 138 | py |
models | models-master/official/vision/tasks/semantic_segmentation.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image segmentation task definition."""
from typing import Any, List, Mapping, Optional, Tuple, Union
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import base_task
from official.core import task_factory
from official.vision.configs import semantic_segmentation as exp_cfg
from official.vision.dataloaders import input_reader
from official.vision.dataloaders import input_reader_factory
from official.vision.dataloaders import segmentation_input
from official.vision.dataloaders import tfds_factory
from official.vision.evaluation import segmentation_metrics
from official.vision.losses import segmentation_losses
from official.vision.modeling import factory
@task_factory.register_task_cls(exp_cfg.SemanticSegmentationTask)
class SemanticSegmentationTask(base_task.Task):
"""A task for semantic segmentation."""
def build_model(self):
"""Builds segmentation model."""
input_specs = tf.keras.layers.InputSpec(shape=[None] +
self.task_config.model.input_size)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (
tf.keras.regularizers.l2(l2_weight_decay /
2.0) if l2_weight_decay else None)
model = factory.build_segmentation_model(
input_specs=input_specs,
model_config=self.task_config.model,
l2_regularizer=l2_regularizer)
# Builds the model
dummy_inputs = tf.keras.Input(self.task_config.model.input_size)
_ = model(dummy_inputs, training=False)
return model
def initialize(self, model: tf.keras.Model):
"""Loads pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if 'all' in self.task_config.init_checkpoint_modules:
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
ckpt_items = {}
if 'backbone' in self.task_config.init_checkpoint_modules:
ckpt_items.update(backbone=model.backbone)
if 'decoder' in self.task_config.init_checkpoint_modules:
ckpt_items.update(decoder=model.decoder)
ckpt = tf.train.Checkpoint(**ckpt_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(self,
params: exp_cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None):
"""Builds classification input."""
ignore_label = self.task_config.losses.ignore_label
gt_is_matting_map = self.task_config.losses.gt_is_matting_map
if params.tfds_name:
decoder = tfds_factory.get_segmentation_decoder(params.tfds_name)
else:
decoder = segmentation_input.Decoder(
image_feature=params.image_feature,
additional_dense_features=params.additional_dense_features)
parser = segmentation_input.Parser(
output_size=params.output_size,
crop_size=params.crop_size,
ignore_label=ignore_label,
resize_eval_groundtruth=params.resize_eval_groundtruth,
gt_is_matting_map=gt_is_matting_map,
groundtruth_padded_size=params.groundtruth_padded_size,
aug_scale_min=params.aug_scale_min,
aug_scale_max=params.aug_scale_max,
aug_rand_hflip=params.aug_rand_hflip,
preserve_aspect_ratio=params.preserve_aspect_ratio,
dtype=params.dtype,
image_feature=params.image_feature,
additional_dense_features=params.additional_dense_features)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
combine_fn=input_reader.create_combine_fn(params),
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self,
labels: Mapping[str, tf.Tensor],
model_outputs: Union[Mapping[str, tf.Tensor], tf.Tensor],
aux_losses: Optional[Any] = None):
"""Segmentation loss.
Args:
labels: labels.
model_outputs: Output logits of the classifier.
aux_losses: auxiliarly loss tensors, i.e. `losses` in keras.Model.
Returns:
The total loss tensor.
"""
loss_params = self._task_config.losses
segmentation_loss_fn = segmentation_losses.SegmentationLoss(
loss_params.label_smoothing,
loss_params.class_weights,
loss_params.ignore_label,
use_groundtruth_dimension=loss_params.use_groundtruth_dimension,
use_binary_cross_entropy=loss_params.use_binary_cross_entropy,
top_k_percent_pixels=loss_params.top_k_percent_pixels,
gt_is_matting_map=loss_params.gt_is_matting_map)
total_loss = segmentation_loss_fn(model_outputs['logits'], labels['masks'])
if 'mask_scores' in model_outputs:
mask_scoring_loss_fn = segmentation_losses.MaskScoringLoss(
loss_params.ignore_label)
total_loss += loss_params.mask_scoring_weight * mask_scoring_loss_fn(
model_outputs['mask_scores'],
model_outputs['logits'],
labels['masks'])
if aux_losses:
total_loss += tf.add_n(aux_losses)
total_loss = loss_params.loss_weight * total_loss
return total_loss
def process_metrics(self, metrics, labels, model_outputs, **kwargs):
"""Process and update metrics.
Called when using custom training loop API.
Args:
metrics: a nested structure of metrics objects. The return of function
self.build_metrics.
labels: a tensor or a nested structure of tensors.
model_outputs: a tensor or a nested structure of tensors. For example,
output of the keras model built by self.build_model.
**kwargs: other args.
"""
for metric in metrics:
if 'mask_scores_mse' == metric.name:
actual_mask_scores = segmentation_losses.get_actual_mask_scores(
model_outputs['logits'], labels['masks'],
self.task_config.losses.ignore_label)
metric.update_state(actual_mask_scores, model_outputs['mask_scores'])
else:
metric.update_state(labels, model_outputs['logits'])
def build_metrics(self, training: bool = True):
"""Gets streaming metrics for training/validation."""
metrics = []
self.iou_metric = None
if training and self.task_config.evaluation.report_train_mean_iou:
metrics.append(
segmentation_metrics.MeanIoU(
name='mean_iou',
num_classes=self.task_config.model.num_classes,
rescale_predictions=False,
dtype=tf.float32))
if self.task_config.model.get('mask_scoring_head'):
metrics.append(
tf.keras.metrics.MeanSquaredError(name='mask_scores_mse'))
if not training:
self.iou_metric = segmentation_metrics.PerClassIoU(
name='per_class_iou',
num_classes=self.task_config.model.num_classes,
rescale_predictions=(
not self.task_config.validation_data.resize_eval_groundtruth),
dtype=tf.float32)
if (self.task_config.validation_data.resize_eval_groundtruth and
self.task_config.model.get('mask_scoring_head')):
# Masks scores metric can only be computed if labels are scaled to match
# preticted mask scores.
metrics.append(
tf.keras.metrics.MeanSquaredError(name='mask_scores_mse'))
return metrics
def train_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[List[Any]] = None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
input_partition_dims = self.task_config.train_input_partition_dims
if input_partition_dims:
strategy = tf.distribute.get_strategy()
features = strategy.experimental_split_to_logical_devices(
features, input_partition_dims)
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(features, training=True)
if isinstance(outputs, tf.Tensor):
outputs = {'logits': outputs}
# Casting output layer as float32 is necessary when mixed_precision is
# mixed_float16 or mixed_bfloat16 to ensure output is casted as float32.
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
# Computes per-replica loss.
loss = self.build_losses(
model_outputs=outputs, labels=labels, aux_losses=model.losses)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient before apply_gradients when LossScaleOptimizer is
# used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, labels, outputs)
logs.update({m.name: m.result() for m in metrics})
return logs
def validation_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
metrics: Optional[List[Any]] = None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
input_partition_dims = self.task_config.eval_input_partition_dims
if input_partition_dims:
strategy = tf.distribute.get_strategy()
features = strategy.experimental_split_to_logical_devices(
features, input_partition_dims)
outputs = self.inference_step(features, model)
if isinstance(outputs, tf.Tensor):
outputs = {'logits': outputs}
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
if self.task_config.validation_data.resize_eval_groundtruth:
loss = self.build_losses(
model_outputs=outputs, labels=labels, aux_losses=model.losses)
else:
loss = 0
logs = {self.loss: loss}
if self.iou_metric is not None:
self.iou_metric.update_state(labels, outputs['logits'])
if metrics:
self.process_metrics(metrics, labels, outputs)
return logs
def inference_step(self, inputs: tf.Tensor, model: tf.keras.Model):
"""Performs the forward step."""
return model(inputs, training=False)
def aggregate_logs(self, state=None, step_outputs=None):
if state is None and self.iou_metric is not None:
self.iou_metric.reset_states()
state = self.iou_metric
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
result = {}
if self.iou_metric is not None:
ious = self.iou_metric.result()
# TODO(arashwan): support loading class name from a label map file.
if self.task_config.evaluation.report_per_class_iou:
for i, value in enumerate(ious.numpy()):
result.update({'iou/{}'.format(i): value})
# Computes mean IoU
result.update({'mean_iou': tf.reduce_mean(ious)})
return result
| 13,246 | 37.175793 | 80 | py |
models | models-master/official/vision/tasks/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tasks package definition."""
from official.vision.tasks.image_classification import ImageClassificationTask
from official.vision.tasks.maskrcnn import MaskRCNNTask
from official.vision.tasks.retinanet import RetinaNetTask
from official.vision.tasks.semantic_segmentation import SemanticSegmentationTask
from official.vision.tasks.video_classification import VideoClassificationTask
| 995 | 44.272727 | 80 | py |
models | models-master/official/vision/tasks/maskrcnn.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MaskRCNN task definition."""
import os
from typing import Any, Dict, List, Mapping, Optional, Tuple
from absl import logging
import numpy as np
import tensorflow as tf
from official.common import dataset_fn as dataset_fn_lib
from official.core import base_task
from official.core import task_factory
from official.vision.configs import maskrcnn as exp_cfg
from official.vision.dataloaders import input_reader
from official.vision.dataloaders import input_reader_factory
from official.vision.dataloaders import maskrcnn_input
from official.vision.dataloaders import tf_example_decoder
from official.vision.dataloaders import tf_example_label_map_decoder
from official.vision.evaluation import coco_evaluator
from official.vision.evaluation import coco_utils
from official.vision.evaluation import instance_metrics as metrics_lib
from official.vision.losses import maskrcnn_losses
from official.vision.modeling import factory
from official.vision.utils.object_detection import visualization_utils
def zero_out_disallowed_class_ids(batch_class_ids: tf.Tensor,
allowed_class_ids: List[int]):
"""Zeroes out IDs of classes not in allowed_class_ids.
Args:
batch_class_ids: A [batch_size, num_instances] int tensor of input
class IDs.
allowed_class_ids: A python list of class IDs which we want to allow.
Returns:
filtered_class_ids: A [batch_size, num_instances] int tensor with any
class ID not in allowed_class_ids set to 0.
"""
allowed_class_ids = tf.constant(allowed_class_ids,
dtype=batch_class_ids.dtype)
match_ids = (batch_class_ids[:, :, tf.newaxis] ==
allowed_class_ids[tf.newaxis, tf.newaxis, :])
match_ids = tf.reduce_any(match_ids, axis=2)
return tf.where(match_ids, batch_class_ids, tf.zeros_like(batch_class_ids))
@task_factory.register_task_cls(exp_cfg.MaskRCNNTask)
class MaskRCNNTask(base_task.Task):
"""A single-replica view of training procedure.
Mask R-CNN task provides artifacts for training/evalution procedures,
including loading/iterating over Datasets, initializing the model, calculating
the loss, post-processing, and customized metrics with reduction.
"""
def build_model(self):
"""Builds Mask R-CNN model."""
input_specs = tf.keras.layers.InputSpec(
shape=[None] + self.task_config.model.input_size)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (tf.keras.regularizers.l2(
l2_weight_decay / 2.0) if l2_weight_decay else None)
model = factory.build_maskrcnn(
input_specs=input_specs,
model_config=self.task_config.model,
l2_regularizer=l2_regularizer)
if self.task_config.freeze_backbone:
model.backbone.trainable = False
# Builds the model through warm-up call.
dummy_images = tf.keras.Input(self.task_config.model.input_size)
dummy_image_shape = tf.keras.layers.Input([2])
_ = model(dummy_images, image_shape=dummy_image_shape, training=False)
return model
def initialize(self, model: tf.keras.Model):
"""Loads pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if self.task_config.init_checkpoint_modules == 'all':
ckpt = tf.train.Checkpoint(model=model)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
ckpt_items = {}
if 'backbone' in self.task_config.init_checkpoint_modules:
ckpt_items.update(backbone=model.backbone)
if 'decoder' in self.task_config.init_checkpoint_modules:
ckpt_items.update(decoder=model.decoder)
ckpt = tf.train.Checkpoint(**ckpt_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(
self,
params: exp_cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None,
dataset_fn: Optional[dataset_fn_lib.PossibleDatasetType] = None
) -> tf.data.Dataset:
"""Builds input dataset."""
decoder_cfg = params.decoder.get()
if params.decoder.type == 'simple_decoder':
decoder = tf_example_decoder.TfExampleDecoder(
include_mask=self._task_config.model.include_mask,
regenerate_source_id=decoder_cfg.regenerate_source_id,
mask_binarize_threshold=decoder_cfg.mask_binarize_threshold)
elif params.decoder.type == 'label_map_decoder':
decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(
label_map=decoder_cfg.label_map,
include_mask=self._task_config.model.include_mask,
regenerate_source_id=decoder_cfg.regenerate_source_id,
mask_binarize_threshold=decoder_cfg.mask_binarize_threshold)
else:
raise ValueError('Unknown decoder type: {}!'.format(params.decoder.type))
parser = maskrcnn_input.Parser(
output_size=self.task_config.model.input_size[:2],
min_level=self.task_config.model.min_level,
max_level=self.task_config.model.max_level,
num_scales=self.task_config.model.anchor.num_scales,
aspect_ratios=self.task_config.model.anchor.aspect_ratios,
anchor_size=self.task_config.model.anchor.anchor_size,
rpn_match_threshold=params.parser.rpn_match_threshold,
rpn_unmatched_threshold=params.parser.rpn_unmatched_threshold,
rpn_batch_size_per_im=params.parser.rpn_batch_size_per_im,
rpn_fg_fraction=params.parser.rpn_fg_fraction,
aug_rand_hflip=params.parser.aug_rand_hflip,
aug_rand_vflip=params.parser.aug_rand_vflip,
aug_scale_min=params.parser.aug_scale_min,
aug_scale_max=params.parser.aug_scale_max,
aug_type=params.parser.aug_type,
skip_crowd_during_training=params.parser.skip_crowd_during_training,
max_num_instances=params.parser.max_num_instances,
include_mask=self.task_config.model.include_mask,
outer_boxes_scale=self.task_config.model.outer_boxes_scale,
mask_crop_size=params.parser.mask_crop_size,
dtype=params.dtype,
)
if not dataset_fn:
dataset_fn = dataset_fn_lib.pick_dataset_fn(params.file_type)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn,
decoder_fn=decoder.decode,
combine_fn=input_reader.create_combine_fn(params),
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def _build_rpn_losses(
self, outputs: Mapping[str, Any],
labels: Mapping[str, Any]) -> Tuple[tf.Tensor, tf.Tensor]:
"""Builds losses for Region Proposal Network (RPN)."""
rpn_score_loss_fn = maskrcnn_losses.RpnScoreLoss(
tf.shape(outputs['box_outputs'])[1])
rpn_box_loss_fn = maskrcnn_losses.RpnBoxLoss(
self.task_config.losses.rpn_huber_loss_delta)
rpn_score_loss = tf.reduce_mean(
rpn_score_loss_fn(outputs['rpn_scores'], labels['rpn_score_targets']))
rpn_box_loss = tf.reduce_mean(
rpn_box_loss_fn(outputs['rpn_boxes'], labels['rpn_box_targets']))
return rpn_score_loss, rpn_box_loss
def _build_frcnn_losses(
self, outputs: Mapping[str, Any],
labels: Mapping[str, Any]) -> Tuple[tf.Tensor, tf.Tensor]:
"""Builds losses for Fast R-CNN."""
cascade_ious = self.task_config.model.roi_sampler.cascade_iou_thresholds
frcnn_cls_loss_fn = maskrcnn_losses.FastrcnnClassLoss(
use_binary_cross_entropy=self.task_config.losses
.frcnn_class_use_binary_cross_entropy,
top_k_percent=self.task_config.losses.frcnn_class_loss_top_k_percent)
frcnn_box_loss_fn = maskrcnn_losses.FastrcnnBoxLoss(
self.task_config.losses.frcnn_huber_loss_delta,
self.task_config.model.detection_head.class_agnostic_bbox_pred)
# Final cls/box losses are computed as an average of all detection heads.
frcnn_cls_loss = 0.0
frcnn_box_loss = 0.0
num_det_heads = 1 if cascade_ious is None else 1 + len(cascade_ious)
for cas_num in range(num_det_heads):
frcnn_cls_loss_i = tf.reduce_mean(
frcnn_cls_loss_fn(
outputs['class_outputs_{}'
.format(cas_num) if cas_num else 'class_outputs'],
outputs['class_targets_{}'
.format(cas_num) if cas_num else 'class_targets']))
frcnn_box_loss_i = tf.reduce_mean(
frcnn_box_loss_fn(
outputs['box_outputs_{}'.format(cas_num
) if cas_num else 'box_outputs'],
outputs['class_targets_{}'
.format(cas_num) if cas_num else 'class_targets'],
outputs['box_targets_{}'.format(cas_num
) if cas_num else 'box_targets']))
frcnn_cls_loss += frcnn_cls_loss_i
frcnn_box_loss += frcnn_box_loss_i
frcnn_cls_loss /= num_det_heads
frcnn_box_loss /= num_det_heads
return frcnn_cls_loss, frcnn_box_loss
def _build_mask_loss(self, outputs: Mapping[str, Any]) -> tf.Tensor:
"""Builds losses for the masks."""
mask_loss_fn = maskrcnn_losses.MaskrcnnLoss()
mask_class_targets = outputs['mask_class_targets']
if self.task_config.allowed_mask_class_ids is not None:
# Classes with ID=0 are ignored by mask_loss_fn in loss computation.
mask_class_targets = zero_out_disallowed_class_ids(
mask_class_targets, self.task_config.allowed_mask_class_ids)
return tf.reduce_mean(
mask_loss_fn(outputs['mask_outputs'], outputs['mask_targets'],
mask_class_targets))
def build_losses(self,
outputs: Mapping[str, Any],
labels: Mapping[str, Any],
aux_losses: Optional[Any] = None) -> Dict[str, tf.Tensor]:
"""Builds Mask R-CNN losses."""
rpn_score_loss, rpn_box_loss = self._build_rpn_losses(outputs, labels)
frcnn_cls_loss, frcnn_box_loss = self._build_frcnn_losses(outputs, labels)
if self.task_config.model.include_mask:
mask_loss = self._build_mask_loss(outputs)
else:
mask_loss = tf.constant(0.0, dtype=tf.float32)
params = self.task_config
model_loss = (
params.losses.rpn_score_weight * rpn_score_loss +
params.losses.rpn_box_weight * rpn_box_loss +
params.losses.frcnn_class_weight * frcnn_cls_loss +
params.losses.frcnn_box_weight * frcnn_box_loss +
params.losses.mask_weight * mask_loss)
total_loss = model_loss
if aux_losses:
reg_loss = tf.reduce_sum(aux_losses)
total_loss = model_loss + reg_loss
total_loss = params.losses.loss_weight * total_loss
losses = {
'total_loss': total_loss,
'rpn_score_loss': rpn_score_loss,
'rpn_box_loss': rpn_box_loss,
'frcnn_cls_loss': frcnn_cls_loss,
'frcnn_box_loss': frcnn_box_loss,
'mask_loss': mask_loss,
'model_loss': model_loss,
}
return losses
def _build_coco_metrics(self):
"""Builds COCO metrics evaluator."""
if (not self._task_config.model.include_mask
) or self._task_config.annotation_file:
self.coco_metric = coco_evaluator.COCOEvaluator(
annotation_file=self._task_config.annotation_file,
include_mask=self._task_config.model.include_mask,
per_category_metrics=self._task_config.per_category_metrics)
else:
# Builds COCO-style annotation file if include_mask is True, and
# annotation_file isn't provided.
annotation_path = os.path.join(self._logging_dir, 'annotation.json')
if tf.io.gfile.exists(annotation_path):
logging.info(
'annotation.json file exists, skipping creating the annotation'
' file.')
else:
if self._task_config.validation_data.num_examples <= 0:
logging.info('validation_data.num_examples needs to be > 0')
if not self._task_config.validation_data.input_path:
logging.info('Can not create annotation file for tfds.')
logging.info(
'Creating coco-style annotation file: %s', annotation_path)
coco_utils.scan_and_generator_annotation_file(
self._task_config.validation_data.input_path,
self._task_config.validation_data.file_type,
self._task_config.validation_data.num_examples,
self.task_config.model.include_mask, annotation_path,
regenerate_source_id=self._task_config.validation_data.decoder
.simple_decoder.regenerate_source_id)
self.coco_metric = coco_evaluator.COCOEvaluator(
annotation_file=annotation_path,
include_mask=self._task_config.model.include_mask,
per_category_metrics=self._task_config.per_category_metrics)
def build_metrics(self, training: bool = True):
"""Builds detection metrics."""
self.instance_box_perclass_metrics = None
self.instance_mask_perclass_metrics = None
if training:
metric_names = [
'total_loss',
'rpn_score_loss',
'rpn_box_loss',
'frcnn_cls_loss',
'frcnn_box_loss',
'mask_loss',
'model_loss',
]
return [
tf.keras.metrics.Mean(name, dtype=tf.float32) for name in metric_names
]
else:
if self._task_config.use_coco_metrics:
self._build_coco_metrics()
if self._task_config.use_wod_metrics:
# To use Waymo open dataset metrics, please install one of the pip
# package `waymo-open-dataset-tf-*` from
# https://github.com/waymo-research/waymo-open-dataset/blob/master/docs/quick_start.md#use-pre-compiled-pippip3-packages-for-linux
# Note that the package is built with specific tensorflow version and
# will produce error if it does not match the tf version that is
# currently used.
try:
from official.vision.evaluation import wod_detection_evaluator # pylint: disable=g-import-not-at-top
except ModuleNotFoundError:
logging.error('waymo-open-dataset should be installed to enable Waymo'
' evaluator.')
raise
self.wod_metric = wod_detection_evaluator.WOD2dDetectionEvaluator()
if self.task_config.use_approx_instance_metrics:
self.instance_box_perclass_metrics = metrics_lib.InstanceMetrics(
name='instance_box_perclass',
num_classes=self.task_config.model.num_classes,
iou_thresholds=np.arange(0.5, 1.0, step=0.05),
)
if self.task_config.model.include_mask:
self.instance_mask_perclass_metrics = metrics_lib.InstanceMetrics(
name='instance_mask_perclass',
use_masks=True,
num_classes=self.task_config.model.num_classes,
iou_thresholds=np.arange(0.5, 1.0, step=0.05),
)
return []
def train_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[List[Any]] = None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
images, labels = inputs
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
model_kwargs = {
'image_shape': labels['image_info'][:, 1, :],
'anchor_boxes': labels['anchor_boxes'],
'gt_boxes': labels['gt_boxes'],
'gt_classes': labels['gt_classes'],
'training': True,
}
if self.task_config.model.include_mask:
model_kwargs['gt_masks'] = labels['gt_masks']
if self.task_config.model.outer_boxes_scale > 1.0:
model_kwargs['gt_outer_boxes'] = labels['gt_outer_boxes']
outputs = model(
images, **model_kwargs)
outputs = tf.nest.map_structure(
lambda x: tf.cast(x, tf.float32), outputs)
# Computes per-replica loss.
losses = self.build_losses(
outputs=outputs, labels=labels, aux_losses=model.losses)
scaled_loss = losses['total_loss'] / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient when LossScaleOptimizer is used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: losses['total_loss']}
if metrics:
for m in metrics:
m.update_state(losses[m.name])
return logs
def _update_metrics(self, labels, outputs, logs):
instance_predictions = {
'detection_boxes': outputs['detection_boxes'],
'detection_scores': outputs['detection_scores'],
'detection_classes': outputs['detection_classes'],
'num_detections': outputs['num_detections'],
'source_id': labels['groundtruths']['source_id'],
'image_info': labels['image_info'],
}
if 'detection_outer_boxes' in outputs:
instance_predictions['detection_outer_boxes'] = outputs[
'detection_outer_boxes'
]
if 'detection_masks' in outputs:
instance_predictions['detection_masks'] = outputs['detection_masks']
if self._task_config.use_coco_metrics:
logs[self.coco_metric.name] = (
labels['groundtruths'],
instance_predictions,
)
if self.task_config.use_wod_metrics:
logs[self.wod_metric.name] = (
labels['groundtruths'],
instance_predictions,
)
instance_labels = {
'boxes': labels['groundtruths']['boxes'],
'classes': labels['groundtruths']['classes'],
'is_crowds': labels['groundtruths']['is_crowds'],
'image_info': labels['image_info'],
}
if self.instance_box_perclass_metrics is not None:
self.instance_box_perclass_metrics.update_state(
y_true=instance_labels, y_pred=instance_predictions
)
if self.instance_mask_perclass_metrics is not None:
instance_labels['masks'] = labels['groundtruths']['masks']
self.instance_mask_perclass_metrics.update_state(
y_true=instance_labels, y_pred=instance_predictions
)
def validation_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
metrics: Optional[List[Any]] = None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
images, labels = inputs
outputs = model(
images,
anchor_boxes=labels['anchor_boxes'],
image_shape=labels['image_info'][:, 1, :],
training=False,
)
logs = {self.loss: 0}
self._update_metrics(labels, outputs, logs)
if (
hasattr(self.task_config, 'allow_image_summary')
and self.task_config.allow_image_summary
):
logs.update(
{'visualization': (tf.cast(images, dtype=tf.float32), outputs)}
)
return logs
def aggregate_logs(
self,
state: Optional[Any] = None,
step_outputs: Optional[Dict[str, Any]] = None,
) -> Optional[Any]:
"""Optional aggregation over logs returned from a validation step."""
if not state:
# The metrics which update state on CPU.
if self.task_config.use_coco_metrics:
self.coco_metric.reset_states()
if self.task_config.use_wod_metrics:
self.wod_metric.reset_states()
if self.task_config.use_coco_metrics:
self.coco_metric.update_state(
step_outputs[self.coco_metric.name][0],
step_outputs[self.coco_metric.name][1],
)
if self.task_config.use_wod_metrics:
self.wod_metric.update_state(
step_outputs[self.wod_metric.name][0],
step_outputs[self.wod_metric.name][1],
)
if 'visualization' in step_outputs:
# Update detection state for writing summary if there are artifacts for
# visualization.
if state is None:
state = {}
state.update(visualization_utils.update_detection_state(step_outputs))
# TODO(allenyan): Mapping `detection_masks` (w.r.t. the `gt_boxes`) back
# to full masks (w.r.t. the image). Disable mask visualization fow now.
state.pop('detection_masks', None)
if not state:
# Create an arbitrary state to indicate it's not the first step in the
# following calls to this function.
state = True
return state
def _reduce_instance_metrics(
self, logs: Dict[str, Any], use_masks: bool = False
):
"""Updates the per class and mean instance metrics in the logs."""
if use_masks:
instance_metrics = self.instance_mask_perclass_metrics
prefix = 'mask_'
else:
instance_metrics = self.instance_box_perclass_metrics
prefix = ''
result = instance_metrics.result()
iou_thresholds = instance_metrics.get_config()['iou_thresholds']
for ap_key in instance_metrics.get_average_precision_metrics_keys():
# (num_iou_thresholds, num_classes)
per_class_ap = tf.where(
result['valid_classes'], result[ap_key], tf.zeros_like(result[ap_key])
)
# (num_iou_thresholds,)
mean_ap_by_iou = tf.math.divide_no_nan(
tf.reduce_sum(per_class_ap, axis=-1),
tf.reduce_sum(
tf.cast(result['valid_classes'], dtype=per_class_ap.dtype),
axis=-1,
),
)
logs[f'{prefix}{ap_key}'] = tf.reduce_mean(mean_ap_by_iou)
for j, iou in enumerate(iou_thresholds):
if int(iou * 100) in {50, 75}:
logs[f'{prefix}{ap_key}{int(iou * 100)}'] = mean_ap_by_iou[j]
if self.task_config.per_category_metrics:
# (num_classes,)
per_class_mean_ap = tf.reduce_mean(per_class_ap, axis=0)
valid_classes = result['valid_classes'].numpy()
for k in range(self.task_config.model.num_classes):
if valid_classes[k]:
logs[f'{prefix}{ap_key} ByCategory/{k}'] = per_class_mean_ap[k]
for j, iou in enumerate(iou_thresholds):
if int(iou * 100) in {50, 75}:
logs[f'{prefix}{ap_key}{int(iou * 100)} ByCategory/{k}'] = (
per_class_ap[j][k]
)
def reduce_aggregated_logs(
self,
aggregated_logs: Dict[str, Any],
global_step: Optional[tf.Tensor] = None,
) -> Dict[str, tf.Tensor]:
"""Optional reduce of aggregated logs over validation steps."""
logs = {}
# The metrics which update state on device.
if self.instance_box_perclass_metrics is not None:
self._reduce_instance_metrics(logs, use_masks=False)
self.instance_box_perclass_metrics.reset_state()
if self.instance_mask_perclass_metrics is not None:
self._reduce_instance_metrics(logs, use_masks=True)
self.instance_mask_perclass_metrics.reset_state()
# The metrics which update state on CPU.
if self.task_config.use_coco_metrics:
logs.update(self.coco_metric.result())
if self.task_config.use_wod_metrics:
logs.update(self.wod_metric.result())
# Add visualization for summary.
if isinstance(aggregated_logs, dict) and 'image' in aggregated_logs:
validation_outputs = visualization_utils.visualize_outputs(
logs=aggregated_logs, task_config=self.task_config
)
logs.update(validation_outputs)
return logs
| 25,240 | 39.192675 | 138 | py |
models | models-master/official/vision/tasks/image_classification.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image classification task definition."""
from typing import Any, List, Optional, Tuple
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import base_task
from official.core import task_factory
from official.modeling import tf_utils
from official.vision.configs import image_classification as exp_cfg
from official.vision.dataloaders import classification_input
from official.vision.dataloaders import input_reader
from official.vision.dataloaders import input_reader_factory
from official.vision.dataloaders import tfds_factory
from official.vision.modeling import factory
from official.vision.ops import augment
_EPSILON = 1e-6
@task_factory.register_task_cls(exp_cfg.ImageClassificationTask)
class ImageClassificationTask(base_task.Task):
"""A task for image classification."""
def build_model(self):
"""Builds classification model."""
input_specs = tf.keras.layers.InputSpec(
shape=[None] + self.task_config.model.input_size)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (tf.keras.regularizers.l2(
l2_weight_decay / 2.0) if l2_weight_decay else None)
model = factory.build_classification_model(
input_specs=input_specs,
model_config=self.task_config.model,
l2_regularizer=l2_regularizer)
if self.task_config.freeze_backbone:
model.backbone.trainable = False
# Builds the model
dummy_inputs = tf.keras.Input(self.task_config.model.input_size)
_ = model(dummy_inputs, training=False)
return model
def initialize(self, model: tf.keras.Model):
"""Loads pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if self.task_config.init_checkpoint_modules == 'all':
ckpt = tf.train.Checkpoint(model=model)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
elif self.task_config.init_checkpoint_modules == 'backbone':
ckpt = tf.train.Checkpoint(backbone=model.backbone)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
raise ValueError(
"Only 'all' or 'backbone' can be used to initialize the model.")
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(
self,
params: exp_cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None
) -> tf.data.Dataset:
"""Builds classification input."""
num_classes = self.task_config.model.num_classes
input_size = self.task_config.model.input_size
image_field_key = self.task_config.train_data.image_field_key
label_field_key = self.task_config.train_data.label_field_key
is_multilabel = self.task_config.train_data.is_multilabel
if params.tfds_name:
decoder = tfds_factory.get_classification_decoder(params.tfds_name)
else:
decoder = classification_input.Decoder(
image_field_key=image_field_key, label_field_key=label_field_key,
is_multilabel=is_multilabel)
parser = classification_input.Parser(
output_size=input_size[:2],
num_classes=num_classes,
image_field_key=image_field_key,
label_field_key=label_field_key,
decode_jpeg_only=params.decode_jpeg_only,
aug_rand_hflip=params.aug_rand_hflip,
aug_crop=params.aug_crop,
aug_type=params.aug_type,
color_jitter=params.color_jitter,
random_erasing=params.random_erasing,
is_multilabel=is_multilabel,
dtype=params.dtype,
center_crop_fraction=params.center_crop_fraction,
tf_resize_method=params.tf_resize_method,
three_augment=params.three_augment)
postprocess_fn = None
if params.mixup_and_cutmix:
postprocess_fn = augment.MixupAndCutmix(
mixup_alpha=params.mixup_and_cutmix.mixup_alpha,
cutmix_alpha=params.mixup_and_cutmix.cutmix_alpha,
prob=params.mixup_and_cutmix.prob,
label_smoothing=params.mixup_and_cutmix.label_smoothing,
num_classes=num_classes)
def sample_fn(repeated_augment, dataset):
weights = [1 / repeated_augment] * repeated_augment
dataset = tf.data.Dataset.sample_from_datasets(
datasets=[dataset] * repeated_augment,
weights=weights,
seed=None,
stop_on_empty_dataset=True,
)
return dataset
is_repeated_augment = (
params.is_training
and params.repeated_augment is not None
)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
combine_fn=input_reader.create_combine_fn(params),
parser_fn=parser.parse_fn(params.is_training),
postprocess_fn=postprocess_fn,
sample_fn=(lambda ds: sample_fn(params.repeated_augment, ds))
if is_repeated_augment
else None,
)
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self,
labels: tf.Tensor,
model_outputs: tf.Tensor,
aux_losses: Optional[Any] = None) -> tf.Tensor:
"""Builds sparse categorical cross entropy loss.
Args:
labels: Input groundtruth labels.
model_outputs: Output logits of the classifier.
aux_losses: The auxiliarly loss tensors, i.e. `losses` in tf.keras.Model.
Returns:
The total loss tensor.
"""
losses_config = self.task_config.losses
is_multilabel = self.task_config.train_data.is_multilabel
if not is_multilabel:
if losses_config.use_binary_cross_entropy:
total_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=model_outputs
)
# Average over all object classes inside an image.
total_loss = tf.reduce_mean(total_loss, axis=-1)
elif losses_config.one_hot:
total_loss = tf.keras.losses.categorical_crossentropy(
labels,
model_outputs,
from_logits=True,
label_smoothing=losses_config.label_smoothing)
elif losses_config.soft_labels:
total_loss = tf.nn.softmax_cross_entropy_with_logits(
labels, model_outputs)
else:
total_loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, model_outputs, from_logits=True)
else:
# Multi-label binary cross entropy loss. This will apply `reduce_mean`.
total_loss = tf.keras.losses.binary_crossentropy(
labels,
model_outputs,
from_logits=True,
label_smoothing=losses_config.label_smoothing,
axis=-1)
# Multiple num_classes to behave like `reduce_sum`.
total_loss = total_loss * self.task_config.model.num_classes
total_loss = tf_utils.safe_mean(total_loss)
if aux_losses:
total_loss += tf.add_n(aux_losses)
total_loss = losses_config.loss_weight * total_loss
return total_loss
def build_metrics(self,
training: bool = True) -> List[tf.keras.metrics.Metric]:
"""Gets streaming metrics for training/validation."""
is_multilabel = self.task_config.train_data.is_multilabel
if not is_multilabel:
k = self.task_config.evaluation.top_k
if (self.task_config.losses.one_hot or
self.task_config.losses.soft_labels):
metrics = [
tf.keras.metrics.CategoricalAccuracy(name='accuracy'),
tf.keras.metrics.TopKCategoricalAccuracy(
k=k, name='top_{}_accuracy'.format(k))]
if hasattr(
self.task_config.evaluation, 'precision_and_recall_thresholds'
) and self.task_config.evaluation.precision_and_recall_thresholds:
thresholds = self.task_config.evaluation.precision_and_recall_thresholds # pylint: disable=line-too-long
# pylint:disable=g-complex-comprehension
metrics += [
tf.keras.metrics.Precision(
thresholds=th,
name='precision_at_threshold_{}'.format(th),
top_k=1) for th in thresholds
]
metrics += [
tf.keras.metrics.Recall(
thresholds=th,
name='recall_at_threshold_{}'.format(th),
top_k=1) for th in thresholds
]
# Add per-class precision and recall.
if hasattr(
self.task_config.evaluation,
'report_per_class_precision_and_recall'
) and self.task_config.evaluation.report_per_class_precision_and_recall:
for class_id in range(self.task_config.model.num_classes):
metrics += [
tf.keras.metrics.Precision(
thresholds=th,
class_id=class_id,
name=f'precision_at_threshold_{th}/{class_id}',
top_k=1) for th in thresholds
]
metrics += [
tf.keras.metrics.Recall(
thresholds=th,
class_id=class_id,
name=f'recall_at_threshold_{th}/{class_id}',
top_k=1) for th in thresholds
]
# pylint:enable=g-complex-comprehension
else:
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'),
tf.keras.metrics.SparseTopKCategoricalAccuracy(
k=k, name='top_{}_accuracy'.format(k))]
else:
metrics = []
# These metrics destablize the training if included in training. The jobs
# fail due to OOM.
# TODO(arashwan): Investigate adding following metric to train.
if not training:
metrics = [
tf.keras.metrics.AUC(
name='globalPR-AUC',
curve='PR',
multi_label=False,
from_logits=True),
tf.keras.metrics.AUC(
name='meanPR-AUC',
curve='PR',
multi_label=True,
num_labels=self.task_config.model.num_classes,
from_logits=True),
]
return metrics
def train_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[List[Any]] = None):
"""Does forward and backward.
Args:
inputs: A tuple of input tensors of (features, labels).
model: A tf.keras.Model instance.
optimizer: The optimizer for this training step.
metrics: A nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
is_multilabel = self.task_config.train_data.is_multilabel
if self.task_config.losses.one_hot and not is_multilabel:
labels = tf.one_hot(labels, self.task_config.model.num_classes)
if self.task_config.losses.use_binary_cross_entropy:
# BCE loss converts the multiclass classification to multilabel. The
# corresponding label value of objects present in the image would be one.
if self.task_config.train_data.mixup_and_cutmix is not None:
# label values below off_value_threshold would be mapped to zero and
# above that would be mapped to one. Negative labels are guaranteed to
# have value less than or equal value of the off_value from mixup.
off_value_threshold = (
self.task_config.train_data.mixup_and_cutmix.label_smoothing
/ self.task_config.model.num_classes
)
labels = tf.where(
tf.less(labels, off_value_threshold + _EPSILON), 0.0, 1.0)
elif tf.rank(labels) == 1:
labels = tf.one_hot(labels, self.task_config.model.num_classes)
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(features, training=True)
# Casting output layer as float32 is necessary when mixed_precision is
# mixed_float16 or mixed_bfloat16 to ensure output is casted as float32.
outputs = tf.nest.map_structure(
lambda x: tf.cast(x, tf.float32), outputs)
# Computes per-replica loss.
loss = self.build_losses(
model_outputs=outputs,
labels=labels,
aux_losses=model.losses)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(
optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient before apply_gradients when LossScaleOptimizer is
# used.
if isinstance(
optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
# Convert logits to softmax for metric computation if needed.
if hasattr(self.task_config.model,
'output_softmax') and self.task_config.model.output_softmax:
outputs = tf.nn.softmax(outputs, axis=-1)
if metrics:
self.process_metrics(metrics, labels, outputs)
elif model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in model.metrics})
return logs
def validation_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
metrics: Optional[List[Any]] = None):
"""Runs validatation step.
Args:
inputs: A tuple of input tensors of (features, labels).
model: A tf.keras.Model instance.
metrics: A nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
one_hot = self.task_config.losses.one_hot
soft_labels = self.task_config.losses.soft_labels
is_multilabel = self.task_config.train_data.is_multilabel
# Note: `soft_labels`` only apply to the training phrase. In the validation
# phrase, labels should still be integer ids and need to be converted to
# one hot format.
if (one_hot or soft_labels) and not is_multilabel:
labels = tf.one_hot(labels, self.task_config.model.num_classes)
outputs = self.inference_step(features, model)
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
loss = self.build_losses(
model_outputs=outputs,
labels=labels,
aux_losses=model.losses)
logs = {self.loss: loss}
# Convert logits to softmax for metric computation if needed.
if hasattr(self.task_config.model,
'output_softmax') and self.task_config.model.output_softmax:
outputs = tf.nn.softmax(outputs, axis=-1)
if metrics:
self.process_metrics(metrics, labels, outputs)
elif model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in model.metrics})
return logs
def inference_step(self, inputs: tf.Tensor, model: tf.keras.Model):
"""Performs the forward step."""
return model(inputs, training=False)
| 16,689 | 38.086651 | 115 | py |
models | models-master/official/vision/ops/nms.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow implementation of non max suppression."""
# Import libraries
import tensorflow as tf
from official.vision.ops import box_ops
NMS_TILE_SIZE = 512
def _self_suppression(iou, _, iou_sum):
batch_size = tf.shape(iou)[0]
can_suppress_others = tf.cast(
tf.reshape(tf.reduce_max(iou, 1) <= 0.5, [batch_size, -1, 1]), iou.dtype)
iou_suppressed = tf.reshape(
tf.cast(tf.reduce_max(can_suppress_others * iou, 1) <= 0.5, iou.dtype),
[batch_size, -1, 1]) * iou
iou_sum_new = tf.reduce_sum(iou_suppressed, [1, 2])
return [
iou_suppressed,
tf.reduce_any(iou_sum - iou_sum_new > 0.5), iou_sum_new
]
def _cross_suppression(boxes, box_slice, iou_threshold, inner_idx):
batch_size = tf.shape(boxes)[0]
new_slice = tf.slice(boxes, [0, inner_idx * NMS_TILE_SIZE, 0],
[batch_size, NMS_TILE_SIZE, 4])
iou = box_ops.bbox_overlap(new_slice, box_slice)
ret_slice = tf.expand_dims(
tf.cast(tf.reduce_all(iou < iou_threshold, [1]), box_slice.dtype),
2) * box_slice
return boxes, ret_slice, iou_threshold, inner_idx + 1
def _suppression_loop_body(boxes, iou_threshold, output_size, idx):
"""Process boxes in the range [idx*NMS_TILE_SIZE, (idx+1)*NMS_TILE_SIZE).
Args:
boxes: a tensor with a shape of [batch_size, anchors, 4].
iou_threshold: a float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
output_size: an int32 tensor of size [batch_size]. Representing the number
of selected boxes for each batch.
idx: an integer scalar representing induction variable.
Returns:
boxes: updated boxes.
iou_threshold: pass down iou_threshold to the next iteration.
output_size: the updated output_size.
idx: the updated induction variable.
"""
boxes_shape = tf.shape(boxes)
num_tiles = boxes_shape[1] // NMS_TILE_SIZE
batch_size = boxes_shape[0]
# Iterates over tiles that can possibly suppress the current tile.
box_slice = tf.slice(boxes, [0, idx * NMS_TILE_SIZE, 0],
[batch_size, NMS_TILE_SIZE, 4])
_, box_slice, _, _ = tf.while_loop(
lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx,
_cross_suppression, [boxes, box_slice, iou_threshold,
tf.constant(0)])
# Iterates over the current tile to compute self-suppression.
iou = box_ops.bbox_overlap(box_slice, box_slice)
mask = tf.expand_dims(
tf.reshape(tf.range(NMS_TILE_SIZE), [1, -1]) > tf.reshape(
tf.range(NMS_TILE_SIZE), [-1, 1]), 0)
iou *= tf.cast(tf.logical_and(mask, iou >= iou_threshold), iou.dtype)
suppressed_iou, _, _ = tf.while_loop(
lambda _iou, loop_condition, _iou_sum: loop_condition, _self_suppression,
[iou, tf.constant(True),
tf.reduce_sum(iou, [1, 2])])
suppressed_box = tf.reduce_sum(suppressed_iou, 1) > 0
box_slice *= tf.expand_dims(1.0 - tf.cast(suppressed_box, box_slice.dtype), 2)
# Uses box_slice to update the input boxes.
mask = tf.reshape(
tf.cast(tf.equal(tf.range(num_tiles), idx), boxes.dtype), [1, -1, 1, 1])
boxes = tf.tile(tf.expand_dims(
box_slice, [1]), [1, num_tiles, 1, 1]) * mask + tf.reshape(
boxes, [batch_size, num_tiles, NMS_TILE_SIZE, 4]) * (1 - mask)
boxes = tf.reshape(boxes, boxes_shape)
# Updates output_size.
output_size += tf.reduce_sum(
tf.cast(tf.reduce_any(box_slice > 0, [2]), tf.int32), [1])
return boxes, iou_threshold, output_size, idx + 1
def sorted_non_max_suppression_padded(scores,
boxes,
max_output_size,
iou_threshold):
"""A wrapper that handles non-maximum suppression.
Assumption:
* The boxes are sorted by scores unless the box is a dot (all coordinates
are zero).
* Boxes with higher scores can be used to suppress boxes with lower scores.
The overal design of the algorithm is to handle boxes tile-by-tile:
boxes = boxes.pad_to_multiply_of(tile_size)
num_tiles = len(boxes) // tile_size
output_boxes = []
for i in range(num_tiles):
box_tile = boxes[i*tile_size : (i+1)*tile_size]
for j in range(i - 1):
suppressing_tile = boxes[j*tile_size : (j+1)*tile_size]
iou = bbox_overlap(box_tile, suppressing_tile)
# if the box is suppressed in iou, clear it to a dot
box_tile *= _update_boxes(iou)
# Iteratively handle the diagnal tile.
iou = _box_overlap(box_tile, box_tile)
iou_changed = True
while iou_changed:
# boxes that are not suppressed by anything else
suppressing_boxes = _get_suppressing_boxes(iou)
# boxes that are suppressed by suppressing_boxes
suppressed_boxes = _get_suppressed_boxes(iou, suppressing_boxes)
# clear iou to 0 for boxes that are suppressed, as they cannot be used
# to suppress other boxes any more
new_iou = _clear_iou(iou, suppressed_boxes)
iou_changed = (new_iou != iou)
iou = new_iou
# remaining boxes that can still suppress others, are selected boxes.
output_boxes.append(_get_suppressing_boxes(iou))
if len(output_boxes) >= max_output_size:
break
Args:
scores: a tensor with a shape of [batch_size, anchors].
boxes: a tensor with a shape of [batch_size, anchors, 4].
max_output_size: a scalar integer `Tensor` representing the maximum number
of boxes to be selected by non max suppression.
iou_threshold: a float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
Returns:
nms_scores: a tensor with a shape of [batch_size, anchors]. It has same
dtype as input scores.
nms_proposals: a tensor with a shape of [batch_size, anchors, 4]. It has
same dtype as input boxes.
"""
batch_size = tf.shape(boxes)[0]
num_boxes = tf.shape(boxes)[1]
pad = tf.cast(
tf.math.ceil(tf.cast(num_boxes, tf.float32) / NMS_TILE_SIZE),
tf.int32) * NMS_TILE_SIZE - num_boxes
boxes = tf.pad(tf.cast(boxes, tf.float32), [[0, 0], [0, pad], [0, 0]])
scores = tf.pad(
tf.cast(scores, tf.float32), [[0, 0], [0, pad]], constant_values=-1)
num_boxes += pad
def _loop_cond(unused_boxes, unused_threshold, output_size, idx):
return tf.logical_and(
tf.reduce_min(output_size) < max_output_size,
idx < num_boxes // NMS_TILE_SIZE)
selected_boxes, _, output_size, _ = tf.while_loop(
_loop_cond, _suppression_loop_body, [
boxes, iou_threshold,
tf.zeros([batch_size], tf.int32),
tf.constant(0)
])
idx = num_boxes - tf.cast(
tf.nn.top_k(
tf.cast(tf.reduce_any(selected_boxes > 0, [2]), tf.int32) *
tf.expand_dims(tf.range(num_boxes, 0, -1), 0), max_output_size)[0],
tf.int32)
idx = tf.minimum(idx, num_boxes - 1)
idx = tf.reshape(
idx + tf.reshape(tf.range(batch_size) * num_boxes, [-1, 1]), [-1])
boxes = tf.reshape(
tf.gather(tf.reshape(boxes, [-1, 4]), idx),
[batch_size, max_output_size, 4])
boxes = boxes * tf.cast(
tf.reshape(tf.range(max_output_size), [1, -1, 1]) < tf.reshape(
output_size, [-1, 1, 1]), boxes.dtype)
scores = tf.reshape(
tf.gather(tf.reshape(scores, [-1, 1]), idx),
[batch_size, max_output_size])
scores = scores * tf.cast(
tf.reshape(tf.range(max_output_size), [1, -1]) < tf.reshape(
output_size, [-1, 1]), scores.dtype)
return scores, boxes
| 8,115 | 38.784314 | 80 | py |
models | models-master/official/vision/ops/target_gather_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for target_gather.py."""
import tensorflow as tf
from official.vision.ops import target_gather
class TargetGatherTest(tf.test.TestCase):
def test_target_gather_batched(self):
gt_boxes = tf.constant(
[[
[0, 0, 5, 5],
[0, 5, 5, 10],
[5, 0, 10, 5],
[5, 5, 10, 10],
]],
dtype=tf.float32)
gt_classes = tf.constant([[[2], [10], [3], [-1]]], dtype=tf.int32)
labeler = target_gather.TargetGather()
match_indices = tf.constant([[0, 2]], dtype=tf.int32)
match_indicators = tf.constant([[-2, 1]])
mask = tf.less_equal(match_indicators, 0)
cls_mask = tf.expand_dims(mask, -1)
matched_gt_classes = labeler(gt_classes, match_indices, cls_mask)
box_mask = tf.tile(cls_mask, [1, 1, 4])
matched_gt_boxes = labeler(gt_boxes, match_indices, box_mask)
self.assertAllEqual(
matched_gt_classes.numpy(), [[[0], [3]]])
self.assertAllClose(
matched_gt_boxes.numpy(), [[[0, 0, 0, 0], [5, 0, 10, 5]]])
def test_target_gather_unbatched(self):
gt_boxes = tf.constant(
[
[0, 0, 5, 5],
[0, 5, 5, 10],
[5, 0, 10, 5],
[5, 5, 10, 10],
],
dtype=tf.float32)
gt_classes = tf.constant([[2], [10], [3], [-1]], dtype=tf.int32)
labeler = target_gather.TargetGather()
match_indices = tf.constant([0, 2], dtype=tf.int32)
match_indicators = tf.constant([-2, 1])
mask = tf.less_equal(match_indicators, 0)
cls_mask = tf.expand_dims(mask, -1)
matched_gt_classes = labeler(gt_classes, match_indices, cls_mask)
box_mask = tf.tile(cls_mask, [1, 4])
matched_gt_boxes = labeler(gt_boxes, match_indices, box_mask)
self.assertAllEqual(
matched_gt_classes.numpy(), [[0], [3]])
self.assertAllClose(
matched_gt_boxes.numpy(), [[0, 0, 0, 0], [5, 0, 10, 5]])
if __name__ == '__main__':
tf.test.main()
| 2,551 | 31.717949 | 74 | py |
models | models-master/official/vision/ops/mask_ops_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mask_ops.py."""
# Import libraries
import numpy as np
import tensorflow as tf
from official.vision.ops import mask_ops
class MaskUtilsTest(tf.test.TestCase):
def testPasteInstanceMasks(self):
image_height = 10
image_width = 10
mask_height = 6
mask_width = 6
masks = np.random.randint(0, 255, (1, mask_height, mask_width))
detected_boxes = np.array([[0.0, 2.0, mask_width, mask_height]])
_ = mask_ops.paste_instance_masks(
masks, detected_boxes, image_height, image_width)
def testPasteInstanceMasksV2(self):
image_height = 10
image_width = 10
mask_height = 6
mask_width = 6
masks = np.random.randint(0, 255, (1, mask_height, mask_width))
detected_boxes = np.array([[0.0, 2.0, mask_width, mask_height]])
image_masks = mask_ops.paste_instance_masks_v2(
masks, detected_boxes, image_height, image_width)
self.assertNDArrayNear(
image_masks[:, 2:8, 0:6],
np.array(masks > 0.5, dtype=np.uint8),
1e-5)
def testInstanceMasksOverlap(self):
boxes = tf.constant([[[0, 0, 4, 4], [1, 1, 5, 5]]])
masks = tf.constant([[
[
[0.9, 0.8, 0.1, 0.2],
[0.8, 0.7, 0.3, 0.2],
[0.6, 0.7, 0.4, 0.3],
[1.0, 0.7, 0.1, 0.0],
],
[
[0.9, 0.8, 0.8, 0.7],
[0.8, 0.7, 0.6, 0.8],
[0.1, 0.2, 0.4, 0.3],
[0.2, 0.1, 0.1, 0.0],
],
]])
gt_boxes = tf.constant([[[1, 1, 5, 5], [2, 2, 6, 6]]])
gt_masks = tf.constant([[
[
[1.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0],
],
[
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
],
]])
iou, ioa = mask_ops.instance_masks_overlap(
boxes,
masks,
gt_boxes,
gt_masks,
output_size=[10, 10],
)
self.assertAllClose(iou, [[[1 / 3, 0], [1 / 5, 1 / 7]]], atol=1e-4)
self.assertAllClose(ioa, [[[3 / 8, 0], [1 / 4, 3 / 8]]], atol=1e-4)
if __name__ == '__main__':
tf.test.main()
| 2,825 | 28.4375 | 74 | py |
models | models-master/official/vision/ops/sampling_ops.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to subsample minibatches by balancing positives and negatives.
Subsamples minibatches based on a pre-specified positive fraction in range
[0,1]. The class presumes there are many more negatives than positive examples:
if the desired batch_size cannot be achieved with the pre-specified positive
fraction, it fills the rest with negative examples. If this is not sufficient
for obtaining the desired batch_size, it returns fewer examples.
The main function to call is Subsample(self, indicator, labels). For convenience
one can also call SubsampleWeights(self, weights, labels) which is defined in
the minibatch_sampler base class.
When is_static is True, it implements a method that guarantees static shapes.
It also ensures the length of output of the subsample is always batch_size, even
when number of examples set to True in indicator is less than batch_size.
This is originally implemented in TensorFlow Object Detection API.
"""
# Import libraries
import tensorflow as tf
def combined_static_and_dynamic_shape(tensor):
"""Returns a list containing static and dynamic values for the dimensions.
Returns a list of static and dynamic values for shape dimensions. This is
useful to preserve static shapes when available in reshape operation.
Args:
tensor: A tensor of any type.
Returns:
A list of size tensor.shape.ndims containing integers or a scalar tensor.
"""
static_tensor_shape = tensor.shape.as_list()
dynamic_tensor_shape = tf.shape(input=tensor)
combined_shape = []
for index, dim in enumerate(static_tensor_shape):
if dim is not None:
combined_shape.append(dim)
else:
combined_shape.append(dynamic_tensor_shape[index])
return combined_shape
def indices_to_dense_vector(indices,
size,
indices_value=1.,
default_value=0,
dtype=tf.float32):
"""Creates dense vector with indices set to specific value and rest to zeros.
This function exists because it is unclear if it is safe to use
tf.sparse_to_dense(indices, [size], 1, validate_indices=False)
with indices which are not ordered.
This function accepts a dynamic size (e.g. tf.shape(tensor)[0])
Args:
indices: 1d Tensor with integer indices which are to be set to
indices_values.
size: scalar with size (integer) of output Tensor.
indices_value: values of elements specified by indices in the output vector
default_value: values of other elements in the output vector.
dtype: data type.
Returns:
dense 1D Tensor of shape [size] with indices set to indices_values and the
rest set to default_value.
"""
size = tf.cast(size, dtype=tf.int32)
zeros = tf.ones([size], dtype=dtype) * default_value
values = tf.ones_like(indices, dtype=dtype) * indices_value
return tf.dynamic_stitch(
[tf.range(size), tf.cast(indices, dtype=tf.int32)], [zeros, values])
def matmul_gather_on_zeroth_axis(params, indices, scope=None):
"""Matrix multiplication based implementation of tf.gather on zeroth axis.
TODO(rathodv, jonathanhuang): enable sparse matmul option.
Args:
params: A float32 Tensor. The tensor from which to gather values.
Must be at least rank 1.
indices: A Tensor. Must be one of the following types: int32, int64.
Must be in range [0, params.shape[0])
scope: A name for the operation (optional).
Returns:
A Tensor. Has the same type as params. Values from params gathered
from indices given by indices, with shape indices.shape + params.shape[1:].
"""
scope = scope or 'MatMulGather'
with tf.name_scope(scope):
params_shape = combined_static_and_dynamic_shape(params)
indices_shape = combined_static_and_dynamic_shape(indices)
params2d = tf.reshape(params, [params_shape[0], -1])
indicator_matrix = tf.one_hot(indices, params_shape[0])
gathered_result_flattened = tf.matmul(indicator_matrix, params2d)
return tf.reshape(gathered_result_flattened,
tf.stack(indices_shape + params_shape[1:]))
class BalancedPositiveNegativeSampler:
"""Subsamples minibatches to a desired balance of positives and negatives."""
def __init__(self, positive_fraction=0.5, is_static=False):
"""Constructs a minibatch sampler.
Args:
positive_fraction: desired fraction of positive examples (scalar in [0,1])
in the batch.
is_static: If True, uses an implementation with static shape guarantees.
Raises:
ValueError: if positive_fraction < 0, or positive_fraction > 1
"""
if positive_fraction < 0 or positive_fraction > 1:
raise ValueError('positive_fraction should be in range [0,1]. '
'Received: %s.' % positive_fraction)
self._positive_fraction = positive_fraction
self._is_static = is_static
@staticmethod
def subsample_indicator(indicator, num_samples):
"""Subsample indicator vector.
Given a boolean indicator vector with M elements set to `True`, the function
assigns all but `num_samples` of these previously `True` elements to
`False`. If `num_samples` is greater than M, the original indicator vector
is returned.
Args:
indicator: a 1-dimensional boolean tensor indicating which elements
are allowed to be sampled and which are not.
num_samples: int32 scalar tensor
Returns:
a boolean tensor with the same shape as input (indicator) tensor
"""
indices = tf.where(indicator)
indices = tf.random.shuffle(indices)
indices = tf.reshape(indices, [-1])
num_samples = tf.minimum(tf.size(input=indices), num_samples)
selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1]))
selected_indicator = indices_to_dense_vector(
selected_indices,
tf.shape(input=indicator)[0])
return tf.equal(selected_indicator, 1)
def _get_num_pos_neg_samples(self, sorted_indices_tensor, sample_size):
"""Counts the number of positives and negatives numbers to be sampled.
Args:
sorted_indices_tensor: A sorted int32 tensor of shape [N] which contains
the signed indices of the examples where the sign is based on the label
value. The examples that cannot be sampled are set to 0. It samples
at most sample_size*positive_fraction positive examples and remaining
from negative examples.
sample_size: Size of subsamples.
Returns:
A tuple containing the number of positive and negative labels in the
subsample.
"""
input_length = tf.shape(input=sorted_indices_tensor)[0]
valid_positive_index = tf.greater(sorted_indices_tensor,
tf.zeros(input_length, tf.int32))
num_sampled_pos = tf.reduce_sum(
input_tensor=tf.cast(valid_positive_index, tf.int32))
max_num_positive_samples = tf.constant(
int(sample_size * self._positive_fraction), tf.int32)
num_positive_samples = tf.minimum(max_num_positive_samples, num_sampled_pos)
num_negative_samples = tf.constant(sample_size,
tf.int32) - num_positive_samples
return num_positive_samples, num_negative_samples
def _get_values_from_start_and_end(self, input_tensor, num_start_samples,
num_end_samples, total_num_samples):
"""slices num_start_samples and last num_end_samples from input_tensor.
Args:
input_tensor: An int32 tensor of shape [N] to be sliced.
num_start_samples: Number of examples to be sliced from the beginning
of the input tensor.
num_end_samples: Number of examples to be sliced from the end of the
input tensor.
total_num_samples: Sum of is num_start_samples and num_end_samples. This
should be a scalar.
Returns:
A tensor containing the first num_start_samples and last num_end_samples
from input_tensor.
"""
input_length = tf.shape(input=input_tensor)[0]
start_positions = tf.less(tf.range(input_length), num_start_samples)
end_positions = tf.greater_equal(
tf.range(input_length), input_length - num_end_samples)
selected_positions = tf.logical_or(start_positions, end_positions)
selected_positions = tf.cast(selected_positions, tf.float32)
indexed_positions = tf.multiply(tf.cumsum(selected_positions),
selected_positions)
one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1,
total_num_samples,
dtype=tf.float32)
return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32),
one_hot_selector, axes=[0, 0]), tf.int32)
def _static_subsample(self, indicator, batch_size, labels):
"""Returns subsampled minibatch.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
N should be a complie time constant.
batch_size: desired batch size. This scalar cannot be None.
labels: boolean tensor of shape [N] denoting positive(=True) and negative
(=False) examples. N should be a complie time constant.
Returns:
sampled_idx_indicator: boolean tensor of shape [N], True for entries which
are sampled. It ensures the length of output of the subsample is always
batch_size, even when number of examples set to True in indicator is
less than batch_size.
Raises:
ValueError: if labels and indicator are not 1D boolean tensors.
"""
# Check if indicator and labels have a static size.
if not indicator.shape.is_fully_defined():
raise ValueError('indicator must be static in shape when is_static is'
'True')
if not labels.shape.is_fully_defined():
raise ValueError('labels must be static in shape when is_static is'
'True')
if not isinstance(batch_size, int):
raise ValueError('batch_size has to be an integer when is_static is'
'True.')
input_length = tf.shape(input=indicator)[0]
# Set the number of examples set True in indicator to be at least
# batch_size.
num_true_sampled = tf.reduce_sum(
input_tensor=tf.cast(indicator, tf.float32))
additional_false_sample = tf.less_equal(
tf.cumsum(tf.cast(tf.logical_not(indicator), tf.float32)),
batch_size - num_true_sampled)
indicator = tf.logical_or(indicator, additional_false_sample)
# Shuffle indicator and label. Need to store the permutation to restore the
# order post sampling.
permutation = tf.random.shuffle(tf.range(input_length))
indicator = matmul_gather_on_zeroth_axis(
tf.cast(indicator, tf.float32), permutation)
labels = matmul_gather_on_zeroth_axis(
tf.cast(labels, tf.float32), permutation)
# index (starting from 1) when indicator is True, 0 when False
indicator_idx = tf.where(
tf.cast(indicator, tf.bool), tf.range(1, input_length + 1),
tf.zeros(input_length, tf.int32))
# Replace -1 for negative, +1 for positive labels
signed_label = tf.where(
tf.cast(labels, tf.bool), tf.ones(input_length, tf.int32),
tf.scalar_mul(-1, tf.ones(input_length, tf.int32)))
# negative of index for negative label, positive index for positive label,
# 0 when indicator is False.
signed_indicator_idx = tf.multiply(indicator_idx, signed_label)
sorted_signed_indicator_idx = tf.nn.top_k(
signed_indicator_idx, input_length, sorted=True).values
[num_positive_samples,
num_negative_samples] = self._get_num_pos_neg_samples(
sorted_signed_indicator_idx, batch_size)
sampled_idx = self._get_values_from_start_and_end(
sorted_signed_indicator_idx, num_positive_samples,
num_negative_samples, batch_size)
# Shift the indices to start from 0 and remove any samples that are set as
# False.
sampled_idx = tf.abs(sampled_idx) - tf.ones(batch_size, tf.int32)
sampled_idx = tf.multiply(
tf.cast(tf.greater_equal(sampled_idx, tf.constant(0)), tf.int32),
sampled_idx)
sampled_idx_indicator = tf.cast(
tf.reduce_sum(
input_tensor=tf.one_hot(sampled_idx, depth=input_length), axis=0),
tf.bool)
# project back the order based on stored permutations
reprojections = tf.one_hot(permutation, depth=input_length,
dtype=tf.float32)
return tf.cast(tf.tensordot(
tf.cast(sampled_idx_indicator, tf.float32),
reprojections, axes=[0, 0]), tf.bool)
def subsample(self, indicator, batch_size, labels, scope=None):
"""Returns subsampled minibatch.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
batch_size: desired batch size. If None, keeps all positive samples and
randomly selects negative samples so that the positive sample fraction
matches self._positive_fraction. It cannot be None is is_static is True.
labels: boolean tensor of shape [N] denoting positive(=True) and negative
(=False) examples.
scope: name scope.
Returns:
sampled_idx_indicator: boolean tensor of shape [N], True for entries which
are sampled.
Raises:
ValueError: if labels and indicator are not 1D boolean tensors.
"""
if len(indicator.get_shape().as_list()) != 1:
raise ValueError('indicator must be 1 dimensional, got a tensor of '
'shape %s' % indicator.get_shape())
if len(labels.get_shape().as_list()) != 1:
raise ValueError('labels must be 1 dimensional, got a tensor of '
'shape %s' % labels.get_shape())
if labels.dtype != tf.bool:
raise ValueError('labels should be of type bool. Received: %s' %
labels.dtype)
if indicator.dtype != tf.bool:
raise ValueError('indicator should be of type bool. Received: %s' %
indicator.dtype)
scope = scope or 'BalancedPositiveNegativeSampler'
with tf.name_scope(scope):
if self._is_static:
return self._static_subsample(indicator, batch_size, labels)
else:
# Only sample from indicated samples
negative_idx = tf.logical_not(labels)
positive_idx = tf.logical_and(labels, indicator)
negative_idx = tf.logical_and(negative_idx, indicator)
# Sample positive and negative samples separately
if batch_size is None:
max_num_pos = tf.reduce_sum(
input_tensor=tf.cast(positive_idx, dtype=tf.int32))
else:
max_num_pos = int(self._positive_fraction * batch_size)
sampled_pos_idx = self.subsample_indicator(positive_idx, max_num_pos)
num_sampled_pos = tf.reduce_sum(
input_tensor=tf.cast(sampled_pos_idx, tf.int32))
if batch_size is None:
negative_positive_ratio = (
1 - self._positive_fraction) / self._positive_fraction
max_num_neg = tf.cast(
negative_positive_ratio *
tf.cast(num_sampled_pos, dtype=tf.float32),
dtype=tf.int32)
else:
max_num_neg = batch_size - num_sampled_pos
sampled_neg_idx = self.subsample_indicator(negative_idx, max_num_neg)
return tf.logical_or(sampled_pos_idx, sampled_neg_idx)
| 16,062 | 40.830729 | 80 | py |
models | models-master/official/vision/ops/anchor_generator.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi scale anchor generator definition."""
import tensorflow as tf
# (TODO/tanzheny): consider having customized anchor offset.
class _SingleAnchorGenerator:
"""Utility to generate anchors for a single feature map.
Example:
```python
anchor_gen = _SingleAnchorGenerator(32, [.5, 1., 2.], stride=16)
anchors = anchor_gen([512, 512, 3])
```
"""
def __init__(self,
anchor_size,
scales,
aspect_ratios,
stride,
clip_boxes=False):
"""Constructs single scale anchor.
Args:
anchor_size: A single int represents the base anchor size. The anchor
height will be `anchor_size / sqrt(aspect_ratio)`, anchor width will be
`anchor_size * sqrt(aspect_ratio)`.
scales: A list/tuple, or a list/tuple of a list/tuple of positive
floats representing the actual anchor size to the base `anchor_size`.
aspect_ratios: a list/tuple of positive floats representing the ratio of
anchor width to anchor height.
stride: A single int represents the anchor stride size between center of
each anchor.
clip_boxes: Boolean to represent whether the anchor coordinates should be
clipped to the image size. Defaults to `False`.
Input shape: the size of the image, `[H, W, C]`
Output shape: the size of anchors, `[(H / stride) * (W / stride), 4]`
"""
self.anchor_size = anchor_size
self.scales = scales
self.aspect_ratios = aspect_ratios
self.stride = stride
self.clip_boxes = clip_boxes
def __call__(self, image_size):
image_height = tf.cast(image_size[0], tf.float32)
image_width = tf.cast(image_size[1], tf.float32)
k = len(self.scales) * len(self.aspect_ratios)
aspect_ratios_sqrt = tf.cast(tf.sqrt(self.aspect_ratios), dtype=tf.float32)
anchor_size = tf.cast(self.anchor_size, tf.float32)
# [K]
anchor_heights = []
anchor_widths = []
for scale in self.scales:
anchor_size_t = anchor_size * scale
anchor_height = anchor_size_t / aspect_ratios_sqrt
anchor_width = anchor_size_t * aspect_ratios_sqrt
anchor_heights.append(anchor_height)
anchor_widths.append(anchor_width)
anchor_heights = tf.concat(anchor_heights, axis=0)
anchor_widths = tf.concat(anchor_widths, axis=0)
half_anchor_heights = tf.reshape(0.5 * anchor_heights, [1, 1, k])
half_anchor_widths = tf.reshape(0.5 * anchor_widths, [1, 1, k])
stride = tf.cast(self.stride, tf.float32)
# [W]
cx = tf.range(0.5 * stride, image_width, stride)
# [H]
cy = tf.range(0.5 * stride, image_height, stride)
# [H, W]
cx_grid, cy_grid = tf.meshgrid(cx, cy)
# [H, W, 1]
cx_grid = tf.expand_dims(cx_grid, axis=-1)
cy_grid = tf.expand_dims(cy_grid, axis=-1)
# [H, W, K, 1]
y_min = tf.expand_dims(cy_grid - half_anchor_heights, axis=-1)
y_max = tf.expand_dims(cy_grid + half_anchor_heights, axis=-1)
x_min = tf.expand_dims(cx_grid - half_anchor_widths, axis=-1)
x_max = tf.expand_dims(cx_grid + half_anchor_widths, axis=-1)
if self.clip_boxes:
y_min = tf.maximum(tf.minimum(y_min, image_height), 0.)
y_max = tf.maximum(tf.minimum(y_max, image_height), 0.)
x_min = tf.maximum(tf.minimum(x_min, image_width), 0.)
x_max = tf.maximum(tf.minimum(x_max, image_width), 0.)
# [H, W, K, 4]
result = tf.concat([y_min, x_min, y_max, x_max], axis=-1)
shape = result.shape.as_list()
# [H, W, K * 4]
return tf.reshape(result, [shape[0], shape[1], shape[2] * shape[3]])
class AnchorGenerator():
"""Utility to generate anchors for a multiple feature maps.
Example:
```python
anchor_gen = AnchorGenerator([32, 64], [.5, 1., 2.],
strides=[16, 32])
anchors = anchor_gen([512, 512, 3])
```
"""
def __init__(self,
anchor_sizes,
scales,
aspect_ratios,
strides,
clip_boxes=False):
"""Constructs multiscale anchors.
Args:
anchor_sizes: A list of int represents the anchor size for each scale. The
anchor height will be `anchor_size / sqrt(aspect_ratio)`, anchor width
will be `anchor_size * sqrt(aspect_ratio)` for each scale.
scales: A list/tuple, or a list/tuple of a list/tuple of positive
floats representing the actual anchor size to the base `anchor_size`.
aspect_ratios: A list/tuple, or a list/tuple of a list/tuple of positive
floats representing the ratio of anchor width to anchor height.
strides: A list/tuple of ints represent the anchor stride size between
center of anchors at each scale.
clip_boxes: Boolean to represents whether the anchor coordinates should be
clipped to the image size. Defaults to `False`.
Input shape: the size of the image, `[H, W, C]`
Output shape: the size of anchors concat on each level, `[(H /
strides) * (W / strides), K * 4]`
"""
# aspect_ratio is a single list that is the same across all levels.
aspect_ratios = maybe_map_structure_for_anchor(aspect_ratios, anchor_sizes)
scales = maybe_map_structure_for_anchor(scales, anchor_sizes)
if isinstance(anchor_sizes, dict):
self.anchor_generators = {}
for k in anchor_sizes.keys():
self.anchor_generators[k] = _SingleAnchorGenerator(
anchor_sizes[k], scales[k], aspect_ratios[k], strides[k],
clip_boxes)
elif isinstance(anchor_sizes, (list, tuple)):
self.anchor_generators = []
for anchor_size, scale_list, ar_list, stride in zip(
anchor_sizes, scales, aspect_ratios, strides):
self.anchor_generators.append(
_SingleAnchorGenerator(anchor_size, scale_list, ar_list, stride,
clip_boxes))
def __call__(self, image_size):
anchor_generators = tf.nest.flatten(self.anchor_generators)
results = [anchor_gen(image_size) for anchor_gen in anchor_generators]
return tf.nest.pack_sequence_as(self.anchor_generators, results)
def maybe_map_structure_for_anchor(params, anchor_sizes):
"""broadcast the params to match anchor_sizes."""
if all(isinstance(param, (int, float)) for param in params):
if isinstance(anchor_sizes, (tuple, list)):
return [params] * len(anchor_sizes)
elif isinstance(anchor_sizes, dict):
return tf.nest.map_structure(lambda _: params, anchor_sizes)
else:
raise ValueError("the structure of `anchor_sizes` must be a tuple, "
"list, or dict, given {}".format(anchor_sizes))
else:
return params
| 7,234 | 38.535519 | 80 | py |
models | models-master/official/vision/ops/anchor_generator_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for anchor_generator.py."""
from absl.testing import parameterized
import tensorflow as tf
from official.vision.ops import anchor_generator
class AnchorGeneratorTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
# Single scale anchor.
(5, [1.0], [[[-16., -16., 48., 48.], [-16., 16., 48., 80.]],
[[16., -16., 80., 48.], [16., 16., 80., 80.]]]),
# # Multi aspect ratio anchor.
(6, [1.0, 4.0, 0.25],
[[[-32., -32., 96., 96., 0., -96., 64., 160., -96., 0., 160., 64.]]]),
)
def testAnchorGeneration(self, level, aspect_ratios, expected_boxes):
image_size = [64, 64]
anchor_size = 2**(level + 1)
stride = 2**level
anchor_gen = anchor_generator._SingleAnchorGenerator(
anchor_size=anchor_size,
scales=[1.],
aspect_ratios=aspect_ratios,
stride=stride,
clip_boxes=False)
anchors = anchor_gen(image_size).numpy()
self.assertAllClose(expected_boxes, anchors)
@parameterized.parameters(
# Single scale anchor.
(5, [1.0], [[[0., 0., 48., 48.], [0., 16., 48., 64.]],
[[16., 0., 64., 48.], [16., 16., 64., 64.]]]),
# # Multi aspect ratio anchor.
(6, [1.0, 4.0, 0.25
], [[[0., 0., 64., 64., 0., 0., 64., 64., 0., 0., 64., 64.]]]),
)
def testAnchorGenerationClipped(self, level, aspect_ratios, expected_boxes):
image_size = [64, 64]
anchor_size = 2**(level + 1)
stride = 2**level
anchor_gen = anchor_generator._SingleAnchorGenerator(
anchor_size=anchor_size,
scales=[1.],
aspect_ratios=aspect_ratios,
stride=stride,
clip_boxes=True)
anchors = anchor_gen(image_size).numpy()
self.assertAllClose(expected_boxes, anchors)
class MultiScaleAnchorGeneratorTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
# Multi scale anchor.
(5, 6, [[1.0], [1.0]], [[-16, -16, 48, 48], [-16, 16, 48, 80],
[16, -16, 80, 48], [16, 16, 80, 80],
[-32, -32, 96, 96]]),)
def testAnchorGeneration(self, min_level, max_level, aspect_ratios,
expected_boxes):
image_size = [64, 64]
levels = range(min_level, max_level + 1)
anchor_sizes = [2**(level + 1) for level in levels]
strides = [2**level for level in levels]
anchor_gen = anchor_generator.AnchorGenerator(
anchor_sizes=anchor_sizes,
scales=[1.],
aspect_ratios=aspect_ratios,
strides=strides)
anchors = anchor_gen(image_size)
anchors = [tf.reshape(anchor, [-1, 4]) for anchor in anchors]
anchors = tf.concat(anchors, axis=0).numpy()
self.assertAllClose(expected_boxes, anchors)
@parameterized.parameters(
# Multi scale anchor.
(5, 6, [[1.0], [1.0]], [[-16, -16, 48, 48], [-16, 16, 48, 80],
[16, -16, 80, 48], [16, 16, 80, 80],
[-32, -32, 96, 96]]),)
def testAnchorGenerationClipped(self, min_level, max_level, aspect_ratios,
expected_boxes):
image_size = [64, 64]
levels = range(min_level, max_level + 1)
anchor_sizes = [2**(level + 1) for level in levels]
strides = [2**level for level in levels]
anchor_gen = anchor_generator.AnchorGenerator(
anchor_sizes=anchor_sizes,
scales=[1.],
aspect_ratios=aspect_ratios,
strides=strides,
clip_boxes=False)
anchors = anchor_gen(image_size)
anchors = [tf.reshape(anchor, [-1, 4]) for anchor in anchors]
anchors = tf.concat(anchors, axis=0).numpy()
self.assertAllClose(expected_boxes, anchors)
@parameterized.parameters(
# Multi scale anchor.
(5, 6, [1.0], {
'5': [[[-16., -16., 48., 48.], [-16., 16., 48., 80.]],
[[16., -16., 80., 48.], [16., 16., 80., 80.]]],
'6': [[[-32, -32, 96, 96]]]
}),)
def testAnchorGenerationDict(self, min_level, max_level, aspect_ratios,
expected_boxes):
image_size = [64, 64]
levels = range(min_level, max_level + 1)
anchor_sizes = dict((str(level), 2**(level + 1)) for level in levels)
strides = dict((str(level), 2**level) for level in levels)
anchor_gen = anchor_generator.AnchorGenerator(
anchor_sizes=anchor_sizes,
scales=[1.],
aspect_ratios=aspect_ratios,
strides=strides,
clip_boxes=False)
anchors = anchor_gen(image_size)
for k in expected_boxes.keys():
self.assertAllClose(expected_boxes[k], anchors[k].numpy())
if __name__ == '__main__':
tf.test.main()
| 5,286 | 37.311594 | 78 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.