repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/official/vision/ops/preprocess_ops.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessing ops."""
import math
from typing import Optional, Sequence, Tuple, Union
from six.moves import range
import tensorflow as tf
from official.vision.ops import augment
from official.vision.ops import box_ops
CENTER_CROP_FRACTION = 0.875
# Calculated from the ImageNet training set
MEAN_NORM = (0.485, 0.456, 0.406)
STDDEV_NORM = (0.229, 0.224, 0.225)
MEAN_RGB = tuple(255 * i for i in MEAN_NORM)
STDDEV_RGB = tuple(255 * i for i in STDDEV_NORM)
# Alias for convenience. PLEASE use `box_ops.horizontal_flip_boxes` directly.
horizontal_flip_boxes = box_ops.horizontal_flip_boxes
vertical_flip_boxes = box_ops.vertical_flip_boxes
def clip_or_pad_to_fixed_size(input_tensor, size, constant_values=0):
"""Pads data to a fixed length at the first dimension.
Args:
input_tensor: `Tensor` with any dimension.
size: `int` number for the first dimension of output Tensor.
constant_values: `int` value assigned to the paddings.
Returns:
`Tensor` with the first dimension padded to `size`.
"""
input_shape = input_tensor.get_shape().as_list()
padding_shape = []
# Computes the padding length on the first dimension, clip input tensor if it
# is longer than `size`.
input_length = tf.shape(input_tensor)[0]
input_length = tf.clip_by_value(input_length, 0, size)
input_tensor = input_tensor[:input_length]
padding_length = tf.maximum(0, size - input_length)
padding_shape.append(padding_length)
# Copies shapes of the rest of input shape dimensions.
for i in range(1, len(input_shape)):
padding_shape.append(tf.shape(input_tensor)[i])
# Pads input tensor to the fixed first dimension.
paddings = tf.cast(constant_values * tf.ones(padding_shape),
input_tensor.dtype)
padded_tensor = tf.concat([input_tensor, paddings], axis=0)
output_shape = input_shape
output_shape[0] = size
padded_tensor.set_shape(output_shape)
return padded_tensor
def normalize_image(image: tf.Tensor,
offset: Sequence[float] = MEAN_NORM,
scale: Sequence[float] = STDDEV_NORM):
"""Normalizes the image to zero mean and unit variance."""
with tf.name_scope('normalize_image'):
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return normalize_scaled_float_image(image, offset, scale)
def normalize_scaled_float_image(image: tf.Tensor,
offset: Sequence[float] = MEAN_NORM,
scale: Sequence[float] = STDDEV_NORM):
"""Normalizes a scaled float image to zero mean and unit variance.
It assumes the input image is float dtype with values in [0, 1).
Args:
image: A tf.Tensor in float32 dtype with values in range [0, 1).
offset: A tuple of mean values to be subtracted from the image.
scale: A tuple of normalization factors.
Returns:
A normalized image tensor.
"""
offset = tf.constant(offset)
offset = tf.expand_dims(offset, axis=0)
offset = tf.expand_dims(offset, axis=0)
image -= offset
scale = tf.constant(scale)
scale = tf.expand_dims(scale, axis=0)
scale = tf.expand_dims(scale, axis=0)
image /= scale
return image
def compute_padded_size(desired_size, stride):
"""Compute the padded size given the desired size and the stride.
The padded size will be the smallest rectangle, such that each dimension is
the smallest multiple of the stride which is larger than the desired
dimension. For example, if desired_size = (100, 200) and stride = 32,
the output padded_size = (128, 224).
Args:
desired_size: a `Tensor` or `int` list/tuple of two elements representing
[height, width] of the target output image size.
stride: an integer, the stride of the backbone network.
Returns:
padded_size: a `Tensor` or `int` list/tuple of two elements representing
[height, width] of the padded output image size.
"""
if isinstance(desired_size, list) or isinstance(desired_size, tuple):
padded_size = [int(math.ceil(d * 1.0 / stride) * stride)
for d in desired_size]
else:
padded_size = tf.cast(
tf.math.ceil(
tf.cast(desired_size, dtype=tf.float32) / stride) * stride,
tf.int32)
return padded_size
def resize_and_crop_image(image,
desired_size,
padded_size,
aug_scale_min=1.0,
aug_scale_max=1.0,
seed=1,
method=tf.image.ResizeMethod.BILINEAR):
"""Resizes the input image to output size (RetinaNet style).
Resize and pad images given the desired output size of the image and
stride size.
Here are the preprocessing steps.
1. For a given image, keep its aspect ratio and rescale the image to make it
the largest rectangle to be bounded by the rectangle specified by the
`desired_size`.
2. Pad the rescaled image to the padded_size.
Args:
image: a `Tensor` of shape [height, width, 3] representing an image.
desired_size: a `Tensor` or `int` list/tuple of two elements representing
[height, width] of the desired actual output image size.
padded_size: a `Tensor` or `int` list/tuple of two elements representing
[height, width] of the padded output image size. Padding will be applied
after scaling the image to the desired_size. Can be None to disable
padding.
aug_scale_min: a `float` with range between [0, 1.0] representing minimum
random scale applied to desired_size for training scale jittering.
aug_scale_max: a `float` with range between [1.0, inf] representing maximum
random scale applied to desired_size for training scale jittering.
seed: seed for random scale jittering.
method: function to resize input image to scaled image.
Returns:
output_image: `Tensor` of shape [height, width, 3] where [height, width]
equals to `output_size`.
image_info: a 2D `Tensor` that encodes the information of the image and the
applied preprocessing. It is in the format of
[[original_height, original_width], [desired_height, desired_width],
[y_scale, x_scale], [y_offset, x_offset]], where [desired_height,
desired_width] is the actual scaled image size, and [y_scale, x_scale] is
the scaling factor, which is the ratio of
scaled dimension / original dimension.
"""
with tf.name_scope('resize_and_crop_image'):
image_size = tf.cast(tf.shape(image)[0:2], tf.float32)
random_jittering = (
isinstance(aug_scale_min, tf.Tensor)
or isinstance(aug_scale_max, tf.Tensor)
or not math.isclose(aug_scale_min, 1.0)
or not math.isclose(aug_scale_max, 1.0)
)
if random_jittering:
random_scale = tf.random.uniform(
[], aug_scale_min, aug_scale_max, seed=seed)
scaled_size = tf.round(random_scale * tf.cast(desired_size, tf.float32))
else:
scaled_size = tf.cast(desired_size, tf.float32)
scale = tf.minimum(
scaled_size[0] / image_size[0], scaled_size[1] / image_size[1])
scaled_size = tf.round(image_size * scale)
# Computes 2D image_scale.
image_scale = scaled_size / image_size
# Selects non-zero random offset (x, y) if scaled image is larger than
# desired_size.
if random_jittering:
max_offset = scaled_size - tf.cast(desired_size, tf.float32)
max_offset = tf.where(
tf.less(max_offset, 0), tf.zeros_like(max_offset), max_offset)
offset = max_offset * tf.random.uniform([2,], 0, 1, seed=seed)
offset = tf.cast(offset, tf.int32)
else:
offset = tf.zeros((2,), tf.int32)
scaled_image = tf.image.resize(
image, tf.cast(scaled_size, tf.int32), method=method)
if random_jittering:
scaled_image = scaled_image[
offset[0]:offset[0] + desired_size[0],
offset[1]:offset[1] + desired_size[1], :]
output_image = scaled_image
if padded_size is not None:
output_image = tf.image.pad_to_bounding_box(
scaled_image, 0, 0, padded_size[0], padded_size[1])
image_info = tf.stack([
image_size,
tf.cast(desired_size, dtype=tf.float32),
image_scale,
tf.cast(offset, tf.float32)])
return output_image, image_info
def resize_and_crop_image_v2(image,
short_side,
long_side,
padded_size,
aug_scale_min=1.0,
aug_scale_max=1.0,
seed=1,
method=tf.image.ResizeMethod.BILINEAR):
"""Resizes the input image to output size (Faster R-CNN style).
Resize and pad images given the specified short / long side length and the
stride size.
Here are the preprocessing steps.
1. For a given image, keep its aspect ratio and first try to rescale the short
side of the original image to `short_side`.
2. If the scaled image after 1 has a long side that exceeds `long_side`, keep
the aspect ratio and rescale the long side of the image to `long_side`.
3. (Optional) Apply random jittering according to `aug_scale_min` and
`aug_scale_max`. By default this step is skipped.
4. Pad the rescaled image to the padded_size.
Args:
image: a `Tensor` of shape [height, width, 3] representing an image.
short_side: a scalar `Tensor` or `int` representing the desired short side
to be rescaled to.
long_side: a scalar `Tensor` or `int` representing the desired long side to
be rescaled to.
padded_size: a `Tensor` or `int` list/tuple of two elements representing
[height, width] of the padded output image size.
aug_scale_min: a `float` with range between [0, 1.0] representing minimum
random scale applied for training scale jittering.
aug_scale_max: a `float` with range between [1.0, inf] representing maximum
random scale applied for training scale jittering.
seed: seed for random scale jittering.
method: function to resize input image to scaled image.
Returns:
output_image: `Tensor` of shape [height, width, 3] where [height, width]
equals to `output_size`.
image_info: a 2D `Tensor` that encodes the information of the image and the
applied preprocessing. It is in the format of
[[original_height, original_width], [desired_height, desired_width],
[y_scale, x_scale], [y_offset, x_offset]], where [desired_height,
desired_width] is the actual scaled image size, and [y_scale, x_scale] is
the scaling factor, which is the ratio of
scaled dimension / original dimension.
"""
with tf.name_scope('resize_and_crop_image_v2'):
image_size = tf.cast(tf.shape(image)[0:2], tf.float32)
scale_using_short_side = (
short_side / tf.math.minimum(image_size[0], image_size[1]))
scale_using_long_side = (
long_side / tf.math.maximum(image_size[0], image_size[1]))
scaled_size = tf.math.round(image_size * scale_using_short_side)
scaled_size = tf.where(
tf.math.greater(
tf.math.maximum(scaled_size[0], scaled_size[1]), long_side),
tf.math.round(image_size * scale_using_long_side),
scaled_size)
desired_size = scaled_size
random_jittering = (
isinstance(aug_scale_min, tf.Tensor)
or isinstance(aug_scale_max, tf.Tensor)
or not math.isclose(aug_scale_min, 1.0)
or not math.isclose(aug_scale_max, 1.0)
)
if random_jittering:
random_scale = tf.random.uniform(
[], aug_scale_min, aug_scale_max, seed=seed)
scaled_size = tf.math.round(random_scale * scaled_size)
# Computes 2D image_scale.
image_scale = scaled_size / image_size
# Selects non-zero random offset (x, y) if scaled image is larger than
# desired_size.
if random_jittering:
max_offset = scaled_size - desired_size
max_offset = tf.where(
tf.math.less(max_offset, 0), tf.zeros_like(max_offset), max_offset)
offset = max_offset * tf.random.uniform([2,], 0, 1, seed=seed)
offset = tf.cast(offset, tf.int32)
else:
offset = tf.zeros((2,), tf.int32)
scaled_image = tf.image.resize(
image, tf.cast(scaled_size, tf.int32), method=method)
if random_jittering:
scaled_image = scaled_image[
offset[0]:offset[0] + desired_size[0],
offset[1]:offset[1] + desired_size[1], :]
output_image = tf.image.pad_to_bounding_box(
scaled_image, 0, 0, padded_size[0], padded_size[1])
image_info = tf.stack([
image_size,
tf.cast(desired_size, dtype=tf.float32),
image_scale,
tf.cast(offset, tf.float32)])
return output_image, image_info
def resize_image(
image: tf.Tensor,
size: Union[Tuple[int, int], int],
max_size: Optional[int] = None,
method: tf.image.ResizeMethod = tf.image.ResizeMethod.BILINEAR):
"""Resize image with size and max_size.
Args:
image: the image to be resized.
size: if list to tuple, resize to it. If scalar, we keep the same
aspect ratio and resize the short side to the value.
max_size: only used when size is a scalar. When the larger side is larger
than max_size after resized with size we used max_size to keep the aspect
ratio instead.
method: the method argument passed to tf.image.resize.
Returns:
the resized image and image_info to be used for downstream processing.
image_info: a 2D `Tensor` that encodes the information of the image and the
applied preprocessing. It is in the format of
[[original_height, original_width], [resized_height, resized_width],
[y_scale, x_scale], [0, 0]], where [resized_height, resized_width]
is the actual scaled image size, and [y_scale, x_scale] is the
scaling factor, which is the ratio of
scaled dimension / original dimension.
"""
def get_size_with_aspect_ratio(image_size, size, max_size=None):
h = image_size[0]
w = image_size[1]
if max_size is not None:
min_original_size = tf.cast(tf.math.minimum(w, h), dtype=tf.float32)
max_original_size = tf.cast(tf.math.maximum(w, h), dtype=tf.float32)
if max_original_size / min_original_size * size > max_size:
size = tf.cast(
tf.math.floor(max_size * min_original_size / max_original_size),
dtype=tf.int32)
else:
size = tf.cast(size, tf.int32)
else:
size = tf.cast(size, tf.int32)
if (w <= h and w == size) or (h <= w and h == size):
return tf.stack([h, w])
if w < h:
ow = size
oh = tf.cast(
(tf.cast(size, dtype=tf.float32) * tf.cast(h, dtype=tf.float32) /
tf.cast(w, dtype=tf.float32)),
dtype=tf.int32)
else:
oh = size
ow = tf.cast(
(tf.cast(size, dtype=tf.float32) * tf.cast(w, dtype=tf.float32) /
tf.cast(h, dtype=tf.float32)),
dtype=tf.int32)
return tf.stack([oh, ow])
def get_size(image_size, size, max_size=None):
if isinstance(size, (list, tuple)):
return size[::-1]
else:
return get_size_with_aspect_ratio(image_size, size, max_size)
orignal_size = tf.shape(image)[0:2]
size = get_size(orignal_size, size, max_size)
rescaled_image = tf.image.resize(
image, tf.cast(size, tf.int32), method=method)
image_scale = size / orignal_size
image_info = tf.stack([
tf.cast(orignal_size, dtype=tf.float32),
tf.cast(size, dtype=tf.float32),
tf.cast(image_scale, tf.float32),
tf.constant([0.0, 0.0], dtype=tf.float32)
])
return rescaled_image, image_info
def center_crop_image(
image, center_crop_fraction: float = CENTER_CROP_FRACTION):
"""Center crop a square shape slice from the input image.
It crops a square shape slice from the image. The side of the actual crop
is 224 / 256 = 0.875 of the short side of the original image. References:
[1] Very Deep Convolutional Networks for Large-Scale Image Recognition
https://arxiv.org/abs/1409.1556
[2] Deep Residual Learning for Image Recognition
https://arxiv.org/abs/1512.03385
Args:
image: a Tensor of shape [height, width, 3] representing the input image.
center_crop_fraction: a float of ratio between the side of the cropped image
and the short side of the original image
Returns:
cropped_image: a Tensor representing the center cropped image.
"""
with tf.name_scope('center_crop_image'):
image_size = tf.cast(tf.shape(image)[:2], dtype=tf.float32)
crop_size = (
center_crop_fraction * tf.math.minimum(image_size[0], image_size[1]))
crop_offset = tf.cast((image_size - crop_size) / 2.0, dtype=tf.int32)
crop_size = tf.cast(crop_size, dtype=tf.int32)
cropped_image = image[
crop_offset[0]:crop_offset[0] + crop_size,
crop_offset[1]:crop_offset[1] + crop_size, :]
return cropped_image
def center_crop_image_v2(
image_bytes, image_shape, center_crop_fraction: float = CENTER_CROP_FRACTION
):
"""Center crop a square shape slice from the input image.
It crops a square shape slice from the image. The side of the actual crop
is 224 / 256 = 0.875 of the short side of the original image. References:
[1] Very Deep Convolutional Networks for Large-Scale Image Recognition
https://arxiv.org/abs/1409.1556
[2] Deep Residual Learning for Image Recognition
https://arxiv.org/abs/1512.03385
This is a faster version of `center_crop_image` which takes the original
image bytes and image size as the inputs, and partially decode the JPEG
bytes according to the center crop.
Args:
image_bytes: a Tensor of type string representing the raw image bytes.
image_shape: a Tensor specifying the shape of the raw image.
center_crop_fraction: a float of ratio between the side of the cropped image
and the short side of the original image
Returns:
cropped_image: a Tensor representing the center cropped image.
"""
with tf.name_scope('center_image_crop_v2'):
image_shape = tf.cast(image_shape, tf.float32)
crop_size = center_crop_fraction * tf.math.minimum(
image_shape[0], image_shape[1]
)
crop_offset = tf.cast((image_shape - crop_size) / 2.0, dtype=tf.int32)
crop_size = tf.cast(crop_size, dtype=tf.int32)
crop_window = tf.stack(
[crop_offset[0], crop_offset[1], crop_size, crop_size])
cropped_image = tf.image.decode_and_crop_jpeg(
image_bytes, crop_window, channels=3)
return cropped_image
def random_crop_image(image,
aspect_ratio_range=(3. / 4., 4. / 3.),
area_range=(0.08, 1.0),
max_attempts=10,
seed=1):
"""Randomly crop an arbitrary shaped slice from the input image.
Args:
image: a Tensor of shape [height, width, 3] representing the input image.
aspect_ratio_range: a list of floats. The cropped area of the image must
have an aspect ratio = width / height within this range.
area_range: a list of floats. The cropped reas of the image must contain
a fraction of the input image within this range.
max_attempts: the number of attempts at generating a cropped region of the
image of the specified constraints. After max_attempts failures, return
the entire image.
seed: the seed of the random generator.
Returns:
cropped_image: a Tensor representing the random cropped image. Can be the
original image if max_attempts is exhausted.
"""
with tf.name_scope('random_crop_image'):
crop_offset, crop_size, _ = tf.image.sample_distorted_bounding_box(
tf.shape(image),
tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]),
seed=seed,
min_object_covered=area_range[0],
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts)
cropped_image = tf.slice(image, crop_offset, crop_size)
return cropped_image
def random_crop_image_v2(image_bytes,
image_shape,
aspect_ratio_range=(3. / 4., 4. / 3.),
area_range=(0.08, 1.0),
max_attempts=10,
seed=1):
"""Randomly crop an arbitrary shaped slice from the input image.
This is a faster version of `random_crop_image` which takes the original
image bytes and image size as the inputs, and partially decode the JPEG
bytes according to the generated crop.
Args:
image_bytes: a Tensor of type string representing the raw image bytes.
image_shape: a Tensor specifying the shape of the raw image.
aspect_ratio_range: a list of floats. The cropped area of the image must
have an aspect ratio = width / height within this range.
area_range: a list of floats. The cropped reas of the image must contain
a fraction of the input image within this range.
max_attempts: the number of attempts at generating a cropped region of the
image of the specified constraints. After max_attempts failures, return
the entire image.
seed: the seed of the random generator.
Returns:
cropped_image: a Tensor representing the random cropped image. Can be the
original image if max_attempts is exhausted.
"""
with tf.name_scope('random_crop_image_v2'):
crop_offset, crop_size, _ = tf.image.sample_distorted_bounding_box(
image_shape,
tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]),
seed=seed,
min_object_covered=area_range[0],
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts)
offset_y, offset_x, _ = tf.unstack(crop_offset)
crop_height, crop_width, _ = tf.unstack(crop_size)
crop_window = tf.stack([offset_y, offset_x, crop_height, crop_width])
cropped_image = tf.image.decode_and_crop_jpeg(
image_bytes, crop_window, channels=3)
return cropped_image
def resize_and_crop_boxes(boxes,
image_scale,
output_size,
offset):
"""Resizes boxes to output size with scale and offset.
Args:
boxes: `Tensor` of shape [N, 4] representing ground truth boxes.
image_scale: 2D float `Tensor` representing scale factors that apply to
[height, width] of input image.
output_size: 2D `Tensor` or `int` representing [height, width] of target
output image size.
offset: 2D `Tensor` representing top-left corner [y0, x0] to crop scaled
boxes.
Returns:
boxes: `Tensor` of shape [N, 4] representing the scaled boxes.
"""
with tf.name_scope('resize_and_crop_boxes'):
# Adjusts box coordinates based on image_scale and offset.
boxes *= tf.tile(tf.expand_dims(image_scale, axis=0), [1, 2])
boxes -= tf.tile(tf.expand_dims(offset, axis=0), [1, 2])
# Clips the boxes.
boxes = box_ops.clip_boxes(boxes, output_size)
return boxes
def resize_and_crop_masks(masks, image_scale, output_size, offset):
"""Resizes boxes to output size with scale and offset.
Args:
masks: `Tensor` of shape [N, H, W, C] representing ground truth masks.
image_scale: 2D float `Tensor` representing scale factors that apply to
[height, width] of input image.
output_size: 2D `Tensor` or `int` representing [height, width] of target
output image size.
offset: 2D `Tensor` representing top-left corner [y0, x0] to crop scaled
boxes.
Returns:
masks: `Tensor` of shape [N, H, W, C] representing the scaled masks.
"""
with tf.name_scope('resize_and_crop_masks'):
mask_size = tf.cast(tf.shape(masks)[1:3], tf.float32)
num_channels = tf.shape(masks)[3]
# Pad masks to avoid empty mask annotations.
masks = tf.concat([
tf.zeros([1, mask_size[0], mask_size[1], num_channels],
dtype=masks.dtype), masks
],
axis=0)
scaled_size = tf.cast(image_scale * mask_size, tf.int32)
scaled_masks = tf.image.resize(
masks, scaled_size, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
offset = tf.cast(offset, tf.int32)
scaled_masks = scaled_masks[
:,
offset[0]:offset[0] + output_size[0],
offset[1]:offset[1] + output_size[1],
:]
output_masks = tf.image.pad_to_bounding_box(
scaled_masks, 0, 0, output_size[0], output_size[1])
# Remove padding.
output_masks = output_masks[1::]
return output_masks
def horizontal_flip_image(image):
"""Flips image horizontally."""
return tf.image.flip_left_right(image)
def horizontal_flip_masks(masks):
"""Flips masks horizontally."""
return masks[:, :, ::-1]
def random_horizontal_flip(
image, normalized_boxes=None, masks=None, seed=1, prob=0.5
):
"""Randomly flips input image and bounding boxes horizontally."""
with tf.name_scope('random_horizontal_flip'):
do_flip = tf.less(tf.random.uniform([], seed=seed), prob)
image = tf.cond(
do_flip,
lambda: horizontal_flip_image(image),
lambda: image)
if normalized_boxes is not None:
normalized_boxes = tf.cond(
do_flip,
lambda: horizontal_flip_boxes(normalized_boxes),
lambda: normalized_boxes)
if masks is not None:
masks = tf.cond(
do_flip,
lambda: horizontal_flip_masks(masks),
lambda: masks)
return image, normalized_boxes, masks
def random_horizontal_flip_with_roi(
image: tf.Tensor,
boxes: Optional[tf.Tensor] = None,
masks: Optional[tf.Tensor] = None,
roi_boxes: Optional[tf.Tensor] = None,
seed: int = 1
) -> Tuple[tf.Tensor, Optional[tf.Tensor], Optional[tf.Tensor],
Optional[tf.Tensor]]:
"""Randomly flips input image and bounding boxes horizontally.
Extends preprocess_ops.random_horizontal_flip to also flip roi_boxes used
by ViLD.
Args:
image: `tf.Tensor`, the image to apply the random flip.
boxes: `tf.Tensor` or `None`, boxes corresponding to the image.
masks: `tf.Tensor` or `None`, masks corresponding to the image.
roi_boxes: `tf.Tensor` or `None`, RoIs corresponding to the image.
seed: Seed for Tensorflow's random number generator.
Returns:
image: `tf.Tensor`, flipped image.
boxes: `tf.Tensor` or `None`, flipped boxes corresponding to the image.
masks: `tf.Tensor` or `None`, flipped masks corresponding to the image.
roi_boxes: `tf.Tensor` or `None`, flipped RoIs corresponding to the image.
"""
with tf.name_scope('random_horizontal_flip'):
do_flip = tf.greater(tf.random.uniform([], seed=seed), 0.5)
image = tf.cond(do_flip, lambda: horizontal_flip_image(image),
lambda: image)
if boxes is not None:
boxes = tf.cond(do_flip, lambda: horizontal_flip_boxes(boxes),
lambda: boxes)
if masks is not None:
masks = tf.cond(do_flip, lambda: horizontal_flip_masks(masks),
lambda: masks)
if roi_boxes is not None:
roi_boxes = tf.cond(do_flip, lambda: horizontal_flip_boxes(roi_boxes),
lambda: roi_boxes)
return image, boxes, masks, roi_boxes
def random_vertical_flip(
image, normalized_boxes=None, masks=None, seed=1, prob=0.5
):
"""Randomly flips input image and bounding boxes vertically."""
with tf.name_scope('random_vertical_flip'):
do_flip = tf.less(tf.random.uniform([], seed=seed), prob)
image = tf.cond(
do_flip,
lambda: tf.image.flip_up_down(image),
lambda: image)
if normalized_boxes is not None:
normalized_boxes = tf.cond(
do_flip,
lambda: vertical_flip_boxes(normalized_boxes),
lambda: normalized_boxes)
if masks is not None:
masks = tf.cond(
do_flip,
lambda: tf.image.flip_up_down(masks[..., None])[..., 0],
lambda: masks)
return image, normalized_boxes, masks
def color_jitter(image: tf.Tensor,
brightness: Optional[float] = 0.,
contrast: Optional[float] = 0.,
saturation: Optional[float] = 0.,
seed: Optional[int] = None) -> tf.Tensor:
"""Applies color jitter to an image, similarly to torchvision`s ColorJitter.
Args:
image (tf.Tensor): Of shape [height, width, 3] and type uint8.
brightness (float, optional): Magnitude for brightness jitter. Defaults to
0.
contrast (float, optional): Magnitude for contrast jitter. Defaults to 0.
saturation (float, optional): Magnitude for saturation jitter. Defaults to
0.
seed (int, optional): Random seed. Defaults to None.
Returns:
tf.Tensor: The augmented `image` of type uint8.
"""
image = tf.cast(image, dtype=tf.uint8)
image = random_brightness(image, brightness, seed=seed)
image = random_contrast(image, contrast, seed=seed)
image = random_saturation(image, saturation, seed=seed)
return image
def random_brightness(image: tf.Tensor,
brightness: float = 0.,
seed: Optional[int] = None) -> tf.Tensor:
"""Jitters brightness of an image.
Args:
image (tf.Tensor): Of shape [height, width, 3] and type uint8.
brightness (float, optional): Magnitude for brightness jitter. Defaults to
0.
seed (int, optional): Random seed. Defaults to None.
Returns:
tf.Tensor: The augmented `image` of type uint8.
"""
assert brightness >= 0, '`brightness` must be positive'
brightness = tf.random.uniform([],
max(0, 1 - brightness),
1 + brightness,
seed=seed,
dtype=tf.float32)
return augment.brightness(image, brightness)
def random_contrast(image: tf.Tensor,
contrast: float = 0.,
seed: Optional[int] = None) -> tf.Tensor:
"""Jitters contrast of an image, similarly to torchvision`s ColorJitter.
Args:
image (tf.Tensor): Of shape [height, width, 3] and type uint8.
contrast (float, optional): Magnitude for contrast jitter. Defaults to 0.
seed (int, optional): Random seed. Defaults to None.
Returns:
tf.Tensor: The augmented `image` of type uint8.
"""
assert contrast >= 0, '`contrast` must be positive'
contrast = tf.random.uniform([],
max(0, 1 - contrast),
1 + contrast,
seed=seed,
dtype=tf.float32)
return augment.contrast(image, contrast)
def random_saturation(image: tf.Tensor,
saturation: float = 0.,
seed: Optional[int] = None) -> tf.Tensor:
"""Jitters saturation of an image, similarly to torchvision`s ColorJitter.
Args:
image (tf.Tensor): Of shape [height, width, 3] and type uint8.
saturation (float, optional): Magnitude for saturation jitter. Defaults to
0.
seed (int, optional): Random seed. Defaults to None.
Returns:
tf.Tensor: The augmented `image` of type uint8.
"""
assert saturation >= 0, '`saturation` must be positive'
saturation = tf.random.uniform([],
max(0, 1 - saturation),
1 + saturation,
seed=seed,
dtype=tf.float32)
return _saturation(image, saturation)
def _saturation(image: tf.Tensor,
saturation: Optional[float] = 0.) -> tf.Tensor:
return augment.blend(
tf.repeat(tf.image.rgb_to_grayscale(image), 3, axis=-1), image,
saturation)
def random_crop_image_with_boxes_and_labels(img, boxes, labels, min_scale,
aspect_ratio_range,
min_overlap_params, max_retry):
"""Crops a random slice from the input image.
The function will correspondingly recompute the bounding boxes and filter out
outside boxes and their labels.
References:
[1] End-to-End Object Detection with Transformers
https://arxiv.org/abs/2005.12872
The preprocessing steps:
1. Sample a minimum IoU overlap.
2. For each trial, sample the new image width, height, and top-left corner.
3. Compute the IoUs of bounding boxes with the cropped image and retry if
the maximum IoU is below the sampled threshold.
4. Find boxes whose centers are in the cropped image.
5. Compute new bounding boxes in the cropped region and only select those
boxes' labels.
Args:
img: a 'Tensor' of shape [height, width, 3] representing the input image.
boxes: a 'Tensor' of shape [N, 4] representing the ground-truth bounding
boxes with (ymin, xmin, ymax, xmax).
labels: a 'Tensor' of shape [N,] representing the class labels of the boxes.
min_scale: a 'float' in [0.0, 1.0) indicating the lower bound of the random
scale variable.
aspect_ratio_range: a list of two 'float' that specifies the lower and upper
bound of the random aspect ratio.
min_overlap_params: a list of four 'float' representing the min value, max
value, step size, and offset for the minimum overlap sample.
max_retry: an 'int' representing the number of trials for cropping. If it is
exhausted, no cropping will be performed.
Returns:
img: a Tensor representing the random cropped image. Can be the
original image if max_retry is exhausted.
boxes: a Tensor representing the bounding boxes in the cropped image.
labels: a Tensor representing the new bounding boxes' labels.
"""
shape = tf.shape(img)
original_h = shape[0]
original_w = shape[1]
minval, maxval, step, offset = min_overlap_params
min_overlap = tf.math.floordiv(
tf.random.uniform([], minval=minval, maxval=maxval), step) * step - offset
min_overlap = tf.clip_by_value(min_overlap, 0.0, 1.1)
if min_overlap > 1.0:
return img, boxes, labels
aspect_ratio_low = aspect_ratio_range[0]
aspect_ratio_high = aspect_ratio_range[1]
for _ in tf.range(max_retry):
scale_h = tf.random.uniform([], min_scale, 1.0)
scale_w = tf.random.uniform([], min_scale, 1.0)
new_h = tf.cast(
scale_h * tf.cast(original_h, dtype=tf.float32), dtype=tf.int32)
new_w = tf.cast(
scale_w * tf.cast(original_w, dtype=tf.float32), dtype=tf.int32)
# Aspect ratio has to be in the prespecified range
aspect_ratio = new_h / new_w
if aspect_ratio_low > aspect_ratio or aspect_ratio > aspect_ratio_high:
continue
left = tf.random.uniform([], 0, original_w - new_w, dtype=tf.int32)
right = left + new_w
top = tf.random.uniform([], 0, original_h - new_h, dtype=tf.int32)
bottom = top + new_h
normalized_left = tf.cast(
left, dtype=tf.float32) / tf.cast(
original_w, dtype=tf.float32)
normalized_right = tf.cast(
right, dtype=tf.float32) / tf.cast(
original_w, dtype=tf.float32)
normalized_top = tf.cast(
top, dtype=tf.float32) / tf.cast(
original_h, dtype=tf.float32)
normalized_bottom = tf.cast(
bottom, dtype=tf.float32) / tf.cast(
original_h, dtype=tf.float32)
cropped_box = tf.expand_dims(
tf.stack([
normalized_top,
normalized_left,
normalized_bottom,
normalized_right,
]),
axis=0)
iou = box_ops.bbox_overlap(
tf.expand_dims(cropped_box, axis=0),
tf.expand_dims(boxes, axis=0)) # (1, 1, n_ground_truth)
iou = tf.squeeze(iou, axis=[0, 1])
# If not a single bounding box has a Jaccard overlap of greater than
# the minimum, try again
if tf.reduce_max(iou) < min_overlap:
continue
centroids = box_ops.yxyx_to_cycxhw(boxes)
mask = tf.math.logical_and(
tf.math.logical_and(centroids[:, 0] > normalized_top,
centroids[:, 0] < normalized_bottom),
tf.math.logical_and(centroids[:, 1] > normalized_left,
centroids[:, 1] < normalized_right))
# If not a single bounding box has its center in the crop, try again.
if tf.reduce_sum(tf.cast(mask, dtype=tf.int32)) > 0:
indices = tf.squeeze(tf.where(mask), axis=1)
filtered_boxes = tf.gather(boxes, indices)
boxes = tf.clip_by_value(
(filtered_boxes[..., :] * tf.cast(
tf.stack([original_h, original_w, original_h, original_w]),
dtype=tf.float32) -
tf.cast(tf.stack([top, left, top, left]), dtype=tf.float32)) /
tf.cast(tf.stack([new_h, new_w, new_h, new_w]), dtype=tf.float32),
0.0, 1.0)
img = tf.image.crop_to_bounding_box(img, top, left, bottom - top,
right - left)
labels = tf.gather(labels, indices)
break
return img, boxes, labels
def random_crop(image,
boxes,
labels,
min_scale=0.3,
aspect_ratio_range=(0.5, 2.0),
min_overlap_params=(0.0, 1.4, 0.2, 0.1),
max_retry=50,
seed=None):
"""Randomly crop the image and boxes, filtering labels.
Args:
image: a 'Tensor' of shape [height, width, 3] representing the input image.
boxes: a 'Tensor' of shape [N, 4] representing the ground-truth bounding
boxes with (ymin, xmin, ymax, xmax).
labels: a 'Tensor' of shape [N,] representing the class labels of the boxes.
min_scale: a 'float' in [0.0, 1.0) indicating the lower bound of the random
scale variable.
aspect_ratio_range: a list of two 'float' that specifies the lower and upper
bound of the random aspect ratio.
min_overlap_params: a list of four 'float' representing the min value, max
value, step size, and offset for the minimum overlap sample.
max_retry: an 'int' representing the number of trials for cropping. If it is
exhausted, no cropping will be performed.
seed: the random number seed of int, but could be None.
Returns:
image: a Tensor representing the random cropped image. Can be the
original image if max_retry is exhausted.
boxes: a Tensor representing the bounding boxes in the cropped image.
labels: a Tensor representing the new bounding boxes' labels.
"""
with tf.name_scope('random_crop'):
do_crop = tf.greater(tf.random.uniform([], seed=seed), 0.5)
if do_crop:
return random_crop_image_with_boxes_and_labels(image, boxes, labels,
min_scale,
aspect_ratio_range,
min_overlap_params,
max_retry)
else:
return image, boxes, labels
| 39,642 | 37.228544 | 80 | py |
models | models-master/official/vision/ops/preprocess_ops_3d_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import itertools
import numpy as np
from PIL import Image
import tensorflow as tf
from official.vision.ops import preprocess_ops_3d
class ParserUtilsTest(tf.test.TestCase):
def setUp(self):
super().setUp()
# [[0, 1, ..., 119], [1, 2, ..., 120], ..., [119, 120, ..., 218]].
self._frames = tf.stack([tf.range(i, i + 120) for i in range(90)])
self._frames = tf.cast(self._frames, tf.uint8)
self._frames = self._frames[tf.newaxis, :, :, tf.newaxis]
self._frames = tf.broadcast_to(self._frames, (6, 90, 120, 3))
# Create an equivalent numpy array for assertions.
self._np_frames = np.array([range(i, i + 120) for i in range(90)])
self._np_frames = self._np_frames[np.newaxis, :, :, np.newaxis]
self._np_frames = np.broadcast_to(self._np_frames, (6, 90, 120, 3))
def test_sample_linspace_sequence(self):
sequence = tf.range(100)
sampled_seq_1 = preprocess_ops_3d.sample_linspace_sequence(
sequence, 10, 10, 1)
sampled_seq_2 = preprocess_ops_3d.sample_linspace_sequence(
sequence, 7, 10, 1)
sampled_seq_3 = preprocess_ops_3d.sample_linspace_sequence(
sequence, 7, 5, 2)
sampled_seq_4 = preprocess_ops_3d.sample_linspace_sequence(
sequence, 101, 1, 1)
self.assertAllEqual(sampled_seq_1, range(100))
# [0, 1, 2, 3, 4, ..., 8, 9, 15, 16, ..., 97, 98, 99]
self.assertAllEqual(
sampled_seq_2,
[15 * i + j for i, j in itertools.product(range(7), range(10))])
# [0, 2, 4, 6, 8, 15, 17, 19, ..., 96, 98]
self.assertAllEqual(
sampled_seq_3,
[15 * i + 2 * j for i, j in itertools.product(range(7), range(5))])
self.assertAllEqual(sampled_seq_4, [0] + list(range(100)))
def test_sample_sequence(self):
sequence = tf.range(100)
sampled_seq_1 = preprocess_ops_3d.sample_sequence(sequence, 10, False, 1)
sampled_seq_2 = preprocess_ops_3d.sample_sequence(sequence, 10, False, 2)
sampled_seq_3 = preprocess_ops_3d.sample_sequence(sequence, 10, True, 1)
self.assertAllEqual(sampled_seq_1, range(45, 55))
self.assertAllEqual(sampled_seq_2, range(40, 60, 2))
offset_3 = sampled_seq_3[0]
self.assertBetween(offset_3, 0, 99)
self.assertAllEqual(sampled_seq_3, range(offset_3, offset_3 + 10))
def test_sample_segment_sequence(self):
sequence = tf.range(100)
sampled_seq_1 = preprocess_ops_3d.sample_segment_sequence(
sequence, 10, False)
sampled_seq_2 = preprocess_ops_3d.sample_segment_sequence(
sequence, 10, True)
self.assertAllEqual(sampled_seq_1, [5 + i * 10 for i in range(10)])
for idx, v in enumerate(sampled_seq_2):
self.assertBetween(v - idx * 10, 0, 10)
def test_decode_jpeg(self):
# Create a random RGB JPEG image.
random_image = np.random.randint(0, 256, size=(263, 320, 3), dtype=np.uint8)
random_image = Image.fromarray(random_image)
with io.BytesIO() as buffer:
random_image.save(buffer, format='JPEG')
raw_image_bytes = buffer.getvalue()
raw_image = tf.constant([raw_image_bytes, raw_image_bytes])
decoded_image = preprocess_ops_3d.decode_jpeg(raw_image, 3)
self.assertEqual(decoded_image.shape.as_list()[3], 3)
self.assertAllEqual(decoded_image.shape, (2, 263, 320, 3))
def test_crop_image(self):
cropped_image_1 = preprocess_ops_3d.crop_image(self._frames, 50, 70)
cropped_image_2 = preprocess_ops_3d.crop_image(self._frames, 200, 200)
cropped_image_3 = preprocess_ops_3d.crop_image(self._frames, 50, 70, True)
cropped_image_4 = preprocess_ops_3d.crop_image(
self._frames, 90, 90, False, 3)
self.assertAllEqual(cropped_image_1.shape, (6, 50, 70, 3))
self.assertAllEqual(cropped_image_1, self._np_frames[:, 20:70, 25:95, :])
self.assertAllEqual(cropped_image_2.shape, (6, 200, 200, 3))
expected = np.pad(
self._np_frames, ((0, 0), (55, 55), (40, 40), (0, 0)), 'constant')
self.assertAllEqual(cropped_image_2, expected)
self.assertAllEqual(cropped_image_3.shape, (6, 50, 70, 3))
offset = cropped_image_3[0, 0, 0, 0]
expected = np.array([range(i, i + 70) for i in range(offset, offset + 50)])
expected = expected[np.newaxis, :, :, np.newaxis]
expected = np.broadcast_to(expected, (6, 50, 70, 3))
self.assertAllEqual(cropped_image_3, expected)
self.assertAllEqual(cropped_image_4.shape, (18, 90, 90, 3))
def test_resize_smallest(self):
resized_frames_1 = preprocess_ops_3d.resize_smallest(self._frames, 180)
resized_frames_2 = preprocess_ops_3d.resize_smallest(self._frames, 45)
resized_frames_3 = preprocess_ops_3d.resize_smallest(self._frames, 90)
resized_frames_4 = preprocess_ops_3d.resize_smallest(
tf.transpose(self._frames, (0, 2, 1, 3)), 45)
self.assertAllEqual(resized_frames_1.shape, (6, 180, 240, 3))
self.assertAllEqual(resized_frames_2.shape, (6, 45, 60, 3))
self.assertAllEqual(resized_frames_3.shape, (6, 90, 120, 3))
self.assertAllEqual(resized_frames_4.shape, (6, 60, 45, 3))
def test_random_crop_resize(self):
resized_frames_1 = preprocess_ops_3d.random_crop_resize(
self._frames, 256, 256, 6, 3, (0.5, 2), (0.3, 1))
resized_frames_2 = preprocess_ops_3d.random_crop_resize(
self._frames, 224, 224, 6, 3, (0.5, 2), (0.3, 1))
resized_frames_3 = preprocess_ops_3d.random_crop_resize(
self._frames, 256, 256, 6, 3, (0.8, 1.2), (0.3, 1))
resized_frames_4 = preprocess_ops_3d.random_crop_resize(
self._frames, 256, 256, 6, 3, (0.5, 2), (0.1, 1))
self.assertAllEqual(resized_frames_1.shape, (6, 256, 256, 3))
self.assertAllEqual(resized_frames_2.shape, (6, 224, 224, 3))
self.assertAllEqual(resized_frames_3.shape, (6, 256, 256, 3))
self.assertAllEqual(resized_frames_4.shape, (6, 256, 256, 3))
def test_random_flip_left_right(self):
flipped_frames = preprocess_ops_3d.random_flip_left_right(self._frames)
flipped = np.fliplr(self._np_frames[0, :, :, 0])
flipped = flipped[np.newaxis, :, :, np.newaxis]
flipped = np.broadcast_to(flipped, (6, 90, 120, 3))
self.assertTrue((flipped_frames == self._np_frames).numpy().all() or (
flipped_frames == flipped).numpy().all())
def test_normalize_image(self):
normalized_images_1 = preprocess_ops_3d.normalize_image(
self._frames, False, tf.float32)
normalized_images_2 = preprocess_ops_3d.normalize_image(
self._frames, True, tf.float32)
self.assertAllClose(normalized_images_1, self._np_frames / 255)
self.assertAllClose(normalized_images_2, self._np_frames * 2 / 255 - 1.0)
if __name__ == '__main__':
tf.test.main()
| 7,239 | 41.840237 | 80 | py |
models | models-master/official/vision/ops/box_matcher.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Box matcher implementation."""
from typing import List, Tuple
import tensorflow as tf
class BoxMatcher:
"""Matcher based on highest value.
This class computes matches from a similarity matrix. Each column is matched
to a single row.
To support object detection target assignment this class enables setting both
positive_threshold (upper threshold) and negative_threshold (lower thresholds)
defining three categories of similarity which define whether examples are
positive, negative, or ignored, for example:
(1) thresholds=[negative_threshold, positive_threshold], and
indicators=[negative_value, ignore_value, positive_value]: The similarity
metrics below negative_threshold will be assigned with negative_value,
the metrics between negative_threshold and positive_threshold will be
assigned ignore_value, and the metrics above positive_threshold will be
assigned positive_value.
(2) thresholds=[negative_threshold, positive_threshold], and
indicators=[ignore_value, negative_value, positive_value]: The similarity
metric below negative_threshold will be assigned with ignore_value,
the metrics between negative_threshold and positive_threshold will be
assigned negative_value, and the metrics above positive_threshold will be
assigned positive_value.
"""
def __init__(self,
thresholds: List[float],
indicators: List[int],
force_match_for_each_col: bool = False):
"""Construct BoxMatcher.
Args:
thresholds: A list of thresholds to classify the matches into different
types (e.g. positive or negative or ignored match). The list needs to be
sorted, and will be prepended with -Inf and appended with +Inf.
indicators: A list of values representing match types (e.g. positive or
negative or ignored match). len(`indicators`) must equal to
len(`thresholds`) + 1.
force_match_for_each_col: If True, ensures that each column is matched to
at least one row (which is not guaranteed otherwise if the
positive_threshold is high). Defaults to False. If True, all force
matched row will be assigned to `indicators[-1]`.
Raises:
ValueError: If `threshold` not sorted,
or len(indicators) != len(threshold) + 1
"""
if not all([lo <= hi for (lo, hi) in zip(thresholds[:-1], thresholds[1:])]):
raise ValueError('`threshold` must be sorted, got {}'.format(thresholds))
self.indicators = indicators
if len(indicators) != len(thresholds) + 1:
raise ValueError('len(`indicators`) must be len(`thresholds`) + 1, got '
'indicators {}, thresholds {}'.format(
indicators, thresholds))
thresholds = thresholds[:]
thresholds.insert(0, -float('inf'))
thresholds.append(float('inf'))
self.thresholds = thresholds
self._force_match_for_each_col = force_match_for_each_col
def __call__(self,
similarity_matrix: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""Tries to match each column of the similarity matrix to a row.
Args:
similarity_matrix: A float tensor of shape [num_rows, num_cols] or
[batch_size, num_rows, num_cols] representing any similarity metric.
Returns:
matched_columns: An integer tensor of shape [num_rows] or [batch_size,
num_rows] storing the index of the matched column for each row.
match_indicators: An integer tensor of shape [num_rows] or [batch_size,
num_rows] storing the match type indicator (e.g. positive or negative or
ignored match).
"""
squeeze_result = False
if len(similarity_matrix.shape) == 2:
squeeze_result = True
similarity_matrix = tf.expand_dims(similarity_matrix, axis=0)
static_shape = similarity_matrix.shape.as_list()
num_rows = static_shape[1] or tf.shape(similarity_matrix)[1]
batch_size = static_shape[0] or tf.shape(similarity_matrix)[0]
def _match_when_rows_are_empty():
"""Performs matching when the rows of similarity matrix are empty.
When the rows are empty, all detections are false positives. So we return
a tensor of -1's to indicate that the rows do not match to any columns.
Returns:
matched_columns: An integer tensor of shape [num_rows] or [batch_size,
num_rows] storing the index of the matched column for each row.
match_indicators: An integer tensor of shape [num_rows] or [batch_size,
num_rows] storing the match type indicator (e.g. positive or negative
or ignored match).
"""
with tf.name_scope('empty_gt_boxes'):
matched_columns = tf.zeros([batch_size, num_rows], dtype=tf.int32)
match_indicators = -tf.ones([batch_size, num_rows], dtype=tf.int32)
return matched_columns, match_indicators
def _match_when_rows_are_non_empty():
"""Performs matching when the rows of similarity matrix are non empty.
Returns:
matched_columns: An integer tensor of shape [num_rows] or [batch_size,
num_rows] storing the index of the matched column for each row.
match_indicators: An integer tensor of shape [num_rows] or [batch_size,
num_rows] storing the match type indicator (e.g. positive or negative
or ignored match).
"""
with tf.name_scope('non_empty_gt_boxes'):
matched_columns = tf.argmax(
similarity_matrix, axis=-1, output_type=tf.int32)
# Get logical indices of ignored and unmatched columns as tf.int64
matched_vals = tf.reduce_max(similarity_matrix, axis=-1)
match_indicators = tf.zeros([batch_size, num_rows], tf.int32)
match_dtype = matched_vals.dtype
for (ind, low, high) in zip(self.indicators, self.thresholds[:-1],
self.thresholds[1:]):
low_threshold = tf.cast(low, match_dtype)
high_threshold = tf.cast(high, match_dtype)
mask = tf.logical_and(
tf.greater_equal(matched_vals, low_threshold),
tf.less(matched_vals, high_threshold))
match_indicators = self._set_values_using_indicator(
match_indicators, mask, ind)
if self._force_match_for_each_col:
# [batch_size, num_cols], for each column (groundtruth_box), find the
# best matching row (anchor).
matching_rows = tf.argmax(
input=similarity_matrix, axis=1, output_type=tf.int32)
# [batch_size, num_cols, num_rows], a transposed 0-1 mapping matrix M,
# where M[j, i] = 1 means column j is matched to row i.
column_to_row_match_mapping = tf.one_hot(
matching_rows, depth=num_rows)
# [batch_size, num_rows], for each row (anchor), find the matched
# column (groundtruth_box).
force_matched_columns = tf.argmax(
input=column_to_row_match_mapping, axis=1, output_type=tf.int32)
# [batch_size, num_rows]
force_matched_column_mask = tf.cast(
tf.reduce_max(column_to_row_match_mapping, axis=1), tf.bool)
# [batch_size, num_rows]
matched_columns = tf.where(force_matched_column_mask,
force_matched_columns, matched_columns)
match_indicators = tf.where(
force_matched_column_mask, self.indicators[-1] *
tf.ones([batch_size, num_rows], dtype=tf.int32), match_indicators)
return matched_columns, match_indicators
num_gt_boxes = similarity_matrix.shape.as_list()[-1] or tf.shape(
similarity_matrix)[-1]
matched_columns, match_indicators = tf.cond(
pred=tf.greater(num_gt_boxes, 0),
true_fn=_match_when_rows_are_non_empty,
false_fn=_match_when_rows_are_empty)
if squeeze_result:
matched_columns = tf.squeeze(matched_columns, axis=0)
match_indicators = tf.squeeze(match_indicators, axis=0)
return matched_columns, match_indicators
def _set_values_using_indicator(self, x, indicator, val):
"""Set the indicated fields of x to val.
Args:
x: tensor.
indicator: boolean with same shape as x.
val: scalar with value to set.
Returns:
modified tensor.
"""
indicator = tf.cast(indicator, x.dtype)
return tf.add(tf.multiply(x, 1 - indicator), val * indicator)
| 9,057 | 43.62069 | 80 | py |
models | models-master/official/vision/ops/augment.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Augmentation policies for enhanced image/video preprocessing.
AutoAugment Reference:
- AutoAugment Reference: https://arxiv.org/abs/1805.09501
- AutoAugment for Object Detection Reference: https://arxiv.org/abs/1906.11172
RandAugment Reference: https://arxiv.org/abs/1909.13719
RandomErasing Reference: https://arxiv.org/abs/1708.04896
MixupAndCutmix:
- Mixup: https://arxiv.org/abs/1710.09412
- Cutmix: https://arxiv.org/abs/1905.04899
RandomErasing, Mixup and Cutmix are inspired by
https://github.com/rwightman/pytorch-image-models
"""
import inspect
import math
from typing import Any, List, Iterable, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
def to_4d(image: tf.Tensor) -> tf.Tensor:
"""Converts an input Tensor to 4 dimensions.
4D image => [N, H, W, C] or [N, C, H, W]
3D image => [1, H, W, C] or [1, C, H, W]
2D image => [1, H, W, 1]
Args:
image: The 2/3/4D input tensor.
Returns:
A 4D image tensor.
Raises:
`TypeError` if `image` is not a 2/3/4D tensor.
"""
shape = tf.shape(image)
original_rank = tf.rank(image)
left_pad = tf.cast(tf.less_equal(original_rank, 3), dtype=tf.int32)
right_pad = tf.cast(tf.equal(original_rank, 2), dtype=tf.int32)
new_shape = tf.concat(
[
tf.ones(shape=left_pad, dtype=tf.int32),
shape,
tf.ones(shape=right_pad, dtype=tf.int32),
],
axis=0,
)
return tf.reshape(image, new_shape)
def from_4d(image: tf.Tensor, ndims: tf.Tensor) -> tf.Tensor:
"""Converts a 4D image back to `ndims` rank."""
shape = tf.shape(image)
begin = tf.cast(tf.less_equal(ndims, 3), dtype=tf.int32)
end = 4 - tf.cast(tf.equal(ndims, 2), dtype=tf.int32)
new_shape = shape[begin:end]
return tf.reshape(image, new_shape)
def _pad(
image: tf.Tensor,
filter_shape: Union[List[int], Tuple[int, ...]],
mode: str = 'CONSTANT',
constant_values: Union[int, tf.Tensor] = 0,
) -> tf.Tensor:
"""Explicitly pads a 4-D image.
Equivalent to the implicit padding method offered in `tf.nn.conv2d` and
`tf.nn.depthwise_conv2d`, but supports non-zero, reflect and symmetric
padding mode. For the even-sized filter, it pads one more value to the
right or the bottom side.
Args:
image: A 4-D `Tensor` of shape `[batch_size, height, width, channels]`.
filter_shape: A `tuple`/`list` of 2 integers, specifying the height and
width of the 2-D filter.
mode: A `string`, one of "REFLECT", "CONSTANT", or "SYMMETRIC". The type of
padding algorithm to use, which is compatible with `mode` argument in
`tf.pad`. For more details, please refer to
https://www.tensorflow.org/api_docs/python/tf/pad.
constant_values: A `scalar`, the pad value to use in "CONSTANT" padding
mode.
Returns:
A padded image.
"""
if mode.upper() not in {'REFLECT', 'CONSTANT', 'SYMMETRIC'}:
raise ValueError(
'padding should be one of "REFLECT", "CONSTANT", or "SYMMETRIC".'
)
constant_values = tf.convert_to_tensor(constant_values, image.dtype)
filter_height, filter_width = filter_shape
pad_top = (filter_height - 1) // 2
pad_bottom = filter_height - 1 - pad_top
pad_left = (filter_width - 1) // 2
pad_right = filter_width - 1 - pad_left
paddings = [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]
return tf.pad(image, paddings, mode=mode, constant_values=constant_values)
def _get_gaussian_kernel(sigma, filter_shape):
"""Computes 1D Gaussian kernel."""
sigma = tf.convert_to_tensor(sigma)
x = tf.range(-filter_shape // 2 + 1, filter_shape // 2 + 1)
x = tf.cast(x**2, sigma.dtype)
x = tf.nn.softmax(-x / (2.0 * (sigma**2)))
return x
def _get_gaussian_kernel_2d(gaussian_filter_x, gaussian_filter_y):
"""Computes 2D Gaussian kernel given 1D kernels."""
gaussian_kernel = tf.matmul(gaussian_filter_x, gaussian_filter_y)
return gaussian_kernel
def _normalize_tuple(value, n, name):
"""Transforms an integer or iterable of integers into an integer tuple.
Args:
value: The value to validate and convert. Could an int, or any iterable of
ints.
n: The size of the tuple to be returned.
name: The name of the argument being validated, e.g. "strides" or
"kernel_size". This is only used to format error messages.
Returns:
A tuple of n integers.
Raises:
ValueError: If something else than an int/long or iterable thereof was
passed.
"""
if isinstance(value, int):
return (value,) * n
else:
try:
value_tuple = tuple(value)
except TypeError as exc:
raise TypeError(
f'The {name} argument must be a tuple of {n} integers. '
f'Received: {value}'
) from exc
if len(value_tuple) != n:
raise ValueError(
f'The {name} argument must be a tuple of {n} integers. '
f'Received: {value}'
)
for single_value in value_tuple:
try:
int(single_value)
except (ValueError, TypeError) as exc:
raise ValueError(
f'The {name} argument must be a tuple of {n} integers. Received:'
f' {value} including element {single_value} of type'
f' {type(single_value)}.'
) from exc
return value_tuple
def gaussian_filter2d(
image: tf.Tensor,
filter_shape: Union[List[int], Tuple[int, ...], int],
sigma: Union[List[float], Tuple[float], float] = 1.0,
padding: str = 'REFLECT',
constant_values: Union[int, tf.Tensor] = 0,
name: Optional[str] = None,
) -> tf.Tensor:
"""Performs Gaussian blur on image(s).
Args:
image: Either a 2-D `Tensor` of shape `[height, width]`, a 3-D `Tensor` of
shape `[height, width, channels]`, or a 4-D `Tensor` of shape
`[batch_size, height, width, channels]`.
filter_shape: An `integer` or `tuple`/`list` of 2 integers, specifying the
height and width of the 2-D gaussian filter. Can be a single integer to
specify the same value for all spatial dimensions.
sigma: A `float` or `tuple`/`list` of 2 floats, specifying the standard
deviation in x and y direction the 2-D gaussian filter. Can be a single
float to specify the same value for all spatial dimensions.
padding: A `string`, one of "REFLECT", "CONSTANT", or "SYMMETRIC". The type
of padding algorithm to use, which is compatible with `mode` argument in
`tf.pad`. For more details, please refer to
https://www.tensorflow.org/api_docs/python/tf/pad.
constant_values: A `scalar`, the pad value to use in "CONSTANT" padding
mode.
name: A name for this operation (optional).
Returns:
2-D, 3-D or 4-D `Tensor` of the same dtype as input.
Raises:
ValueError: If `image` is not 2, 3 or 4-dimensional,
if `padding` is other than "REFLECT", "CONSTANT" or "SYMMETRIC",
if `filter_shape` is invalid,
or if `sigma` is invalid.
"""
with tf.name_scope(name or 'gaussian_filter2d'):
if isinstance(sigma, (list, tuple)):
if len(sigma) != 2:
raise ValueError('sigma should be a float or a tuple/list of 2 floats')
else:
sigma = (sigma,) * 2
if any(s < 0 for s in sigma):
raise ValueError('sigma should be greater than or equal to 0.')
image = tf.convert_to_tensor(image, name='image')
sigma = tf.convert_to_tensor(sigma, name='sigma')
original_ndims = tf.rank(image)
image = to_4d(image)
# Keep the precision if it's float;
# otherwise, convert to float32 for computing.
orig_dtype = image.dtype
if not image.dtype.is_floating:
image = tf.cast(image, tf.float32)
channels = tf.shape(image)[3]
filter_shape = _normalize_tuple(filter_shape, 2, 'filter_shape')
sigma = tf.cast(sigma, image.dtype)
gaussian_kernel_x = _get_gaussian_kernel(sigma[1], filter_shape[1])
gaussian_kernel_x = gaussian_kernel_x[tf.newaxis, :]
gaussian_kernel_y = _get_gaussian_kernel(sigma[0], filter_shape[0])
gaussian_kernel_y = gaussian_kernel_y[:, tf.newaxis]
gaussian_kernel_2d = _get_gaussian_kernel_2d(
gaussian_kernel_y, gaussian_kernel_x
)
gaussian_kernel_2d = gaussian_kernel_2d[:, :, tf.newaxis, tf.newaxis]
gaussian_kernel_2d = tf.tile(gaussian_kernel_2d, [1, 1, channels, 1])
image = _pad(
image, filter_shape, mode=padding, constant_values=constant_values
)
output = tf.nn.depthwise_conv2d(
input=image,
filter=gaussian_kernel_2d,
strides=(1, 1, 1, 1),
padding='VALID',
)
output = from_4d(output, original_ndims)
return tf.cast(output, orig_dtype)
def _convert_translation_to_transform(translations: tf.Tensor) -> tf.Tensor:
"""Converts translations to a projective transform.
The translation matrix looks like this:
[[1 0 -dx]
[0 1 -dy]
[0 0 1]]
Args:
translations: The 2-element list representing [dx, dy], or a matrix of
2-element lists representing [dx dy] to translate for each image. The
shape must be static.
Returns:
The transformation matrix of shape (num_images, 8).
Raises:
`TypeError` if
- the shape of `translations` is not known or
- the shape of `translations` is not rank 1 or 2.
"""
translations = tf.convert_to_tensor(translations, dtype=tf.float32)
if translations.get_shape().ndims is None:
raise TypeError('translations rank must be statically known')
elif len(translations.get_shape()) == 1:
translations = translations[None]
elif len(translations.get_shape()) != 2:
raise TypeError('translations should have rank 1 or 2.')
num_translations = tf.shape(translations)[0]
return tf.concat(
values=[
tf.ones((num_translations, 1), tf.dtypes.float32),
tf.zeros((num_translations, 1), tf.dtypes.float32),
-translations[:, 0, None],
tf.zeros((num_translations, 1), tf.dtypes.float32),
tf.ones((num_translations, 1), tf.dtypes.float32),
-translations[:, 1, None],
tf.zeros((num_translations, 2), tf.dtypes.float32),
],
axis=1,
)
def _convert_angles_to_transform(angles: tf.Tensor, image_width: tf.Tensor,
image_height: tf.Tensor) -> tf.Tensor:
"""Converts an angle or angles to a projective transform.
Args:
angles: A scalar to rotate all images, or a vector to rotate a batch of
images. This must be a scalar.
image_width: The width of the image(s) to be transformed.
image_height: The height of the image(s) to be transformed.
Returns:
A tensor of shape (num_images, 8).
Raises:
`TypeError` if `angles` is not rank 0 or 1.
"""
angles = tf.convert_to_tensor(angles, dtype=tf.float32)
if len(angles.get_shape()) == 0: # pylint:disable=g-explicit-length-test
angles = angles[None]
elif len(angles.get_shape()) != 1:
raise TypeError('Angles should have a rank 0 or 1.')
x_offset = ((image_width - 1) -
(tf.math.cos(angles) * (image_width - 1) - tf.math.sin(angles) *
(image_height - 1))) / 2.0
y_offset = ((image_height - 1) -
(tf.math.sin(angles) * (image_width - 1) + tf.math.cos(angles) *
(image_height - 1))) / 2.0
num_angles = tf.shape(angles)[0]
return tf.concat(
values=[
tf.math.cos(angles)[:, None],
-tf.math.sin(angles)[:, None],
x_offset[:, None],
tf.math.sin(angles)[:, None],
tf.math.cos(angles)[:, None],
y_offset[:, None],
tf.zeros((num_angles, 2), tf.dtypes.float32),
],
axis=1,
)
def _apply_transform_to_images(
images,
transforms,
fill_mode='reflect',
fill_value=0.0,
interpolation='bilinear',
output_shape=None,
name=None,
):
"""Applies the given transform(s) to the image(s).
Args:
images: A tensor of shape `(num_images, num_rows, num_columns,
num_channels)` (NHWC). The rank must be statically known (the shape is
not `TensorShape(None)`).
transforms: Projective transform matrix/matrices. A vector of length 8 or
tensor of size N x 8. If one row of transforms is [a0, a1, a2, b0, b1,
b2, c0, c1], then it maps the *output* point `(x, y)` to a transformed
*input* point `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) /
k)`, where `k = c0 x + c1 y + 1`. The transforms are *inverted* compared
to the transform mapping input points to output points. Note that
gradients are not backpropagated into transformation parameters.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{"constant", "reflect", "wrap", "nearest"}`).
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode="constant"`.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
output_shape: Output dimension after the transform, `[height, width]`. If
`None`, output is the same size as input image.
name: The name of the op. Fill mode behavior for each valid value is as
follows
- `"reflect"`: `(d c b a | a b c d | d c b a)` The input is extended by
reflecting about the edge of the last pixel.
- `"constant"`: `(k k k k | a b c d | k k k k)` The input is extended by
filling all values beyond the edge with the same constant value k = 0.
- `"wrap"`: `(a b c d | a b c d | a b c d)` The input is extended by
wrapping around to the opposite edge.
- `"nearest"`: `(a a a a | a b c d | d d d d)` The input is extended by
the nearest pixel. Input shape: 4D tensor with shape:
`(samples, height, width, channels)`, in `"channels_last"` format.
Output shape: 4D tensor with shape: `(samples, height, width, channels)`,
in `"channels_last"` format.
Returns:
Image(s) with the same type and shape as `images`, with the given
transform(s) applied. Transformed coordinates outside of the input image
will be filled with zeros.
"""
with tf.name_scope(name or 'transform'):
if output_shape is None:
output_shape = tf.shape(images)[1:3]
if not tf.executing_eagerly():
output_shape_value = tf.get_static_value(output_shape)
if output_shape_value is not None:
output_shape = output_shape_value
output_shape = tf.convert_to_tensor(
output_shape, tf.int32, name='output_shape'
)
if not output_shape.get_shape().is_compatible_with([2]):
raise ValueError(
'output_shape must be a 1-D Tensor of 2 elements: '
'new_height, new_width, instead got '
f'output_shape={output_shape}'
)
fill_value = tf.convert_to_tensor(fill_value, tf.float32, name='fill_value')
return tf.raw_ops.ImageProjectiveTransformV3(
images=images,
output_shape=output_shape,
fill_value=fill_value,
transforms=transforms,
fill_mode=fill_mode.upper(),
interpolation=interpolation.upper(),
)
def transform(
image: tf.Tensor,
transforms: Any,
interpolation: str = 'nearest',
output_shape=None,
fill_mode: str = 'reflect',
fill_value: float = 0.0,
) -> tf.Tensor:
"""Transforms an image."""
original_ndims = tf.rank(image)
transforms = tf.convert_to_tensor(transforms, dtype=tf.float32)
if transforms.shape.rank == 1:
transforms = transforms[None]
image = to_4d(image)
image = _apply_transform_to_images(
images=image,
transforms=transforms,
interpolation=interpolation,
fill_mode=fill_mode,
fill_value=fill_value,
output_shape=output_shape,
)
return from_4d(image, original_ndims)
def translate(
image: tf.Tensor,
translations,
fill_value: float = 0.0,
fill_mode: str = 'reflect',
interpolation: str = 'nearest',
) -> tf.Tensor:
"""Translates image(s) by provided vectors.
Args:
image: An image Tensor of type uint8.
translations: A vector or matrix representing [dx dy].
fill_value: a float represents the value to be filled outside the boundaries
when `fill_mode="constant"`.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{"constant", "reflect", "wrap", "nearest"}`).
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
Returns:
The translated version of the image.
"""
transforms = _convert_translation_to_transform(translations) # pytype: disable=wrong-arg-types # always-use-return-annotations
return transform(
image,
transforms=transforms,
interpolation=interpolation,
fill_value=fill_value,
fill_mode=fill_mode,
)
def rotate(image: tf.Tensor, degrees: float) -> tf.Tensor:
"""Rotates the image by degrees either clockwise or counterclockwise.
Args:
image: An image Tensor of type uint8.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
Returns:
The rotated version of image.
"""
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = tf.cast(degrees * degrees_to_radians, tf.float32)
original_ndims = tf.rank(image)
image = to_4d(image)
image_height = tf.cast(tf.shape(image)[1], tf.float32)
image_width = tf.cast(tf.shape(image)[2], tf.float32)
transforms = _convert_angles_to_transform(
angles=radians, image_width=image_width, image_height=image_height)
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
image = transform(image, transforms=transforms)
return from_4d(image, original_ndims)
def blend(image1: tf.Tensor, image2: tf.Tensor, factor: float) -> tf.Tensor:
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor of type uint8.
image2: An image Tensor of type uint8.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor of type uint8.
"""
if factor == 0.0:
return tf.convert_to_tensor(image1)
if factor == 1.0:
return tf.convert_to_tensor(image2)
image1 = tf.cast(image1, tf.float32)
image2 = tf.cast(image2, tf.float32)
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = tf.cast(image1, tf.float32) + scaled
# Interpolate
if factor > 0.0 and factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return tf.cast(temp, tf.uint8)
# Extrapolate:
#
# We need to clip and then cast.
return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8)
def cutout(image: tf.Tensor, pad_size: int, replace: int = 0) -> tf.Tensor:
"""Apply cutout (https://arxiv.org/abs/1708.04552) to image.
This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
a random location within `image`. The pixel values filled in will be of the
value `replace`. The location where the mask will be applied is randomly
chosen uniformly over the whole image.
Args:
image: An image Tensor of type uint8.
pad_size: Specifies how big the zero mask that will be generated is that is
applied to the image. The mask will be of size (2*pad_size x 2*pad_size).
replace: What pixel value to fill in the image in the area that has the
cutout mask applied to it.
Returns:
An image Tensor that is of type uint8.
"""
if image.shape.rank not in [3, 4]:
raise ValueError('Bad image rank: {}'.format(image.shape.rank))
if image.shape.rank == 4:
return cutout_video(image, replace=replace)
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random.uniform(
shape=[], minval=0, maxval=image_height, dtype=tf.int32)
cutout_center_width = tf.random.uniform(
shape=[], minval=0, maxval=image_width, dtype=tf.int32)
image = _fill_rectangle(image, cutout_center_width, cutout_center_height,
pad_size, pad_size, replace)
return image
def _fill_rectangle(image,
center_width,
center_height,
half_width,
half_height,
replace=None):
"""Fills blank area."""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
lower_pad = tf.maximum(0, center_height - half_height)
upper_pad = tf.maximum(0, image_height - center_height - half_height)
left_pad = tf.maximum(0, center_width - half_width)
right_pad = tf.maximum(0, image_width - center_width - half_width)
cutout_shape = [
image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)
]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims,
constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, 3])
if replace is None:
fill = tf.random.normal(tf.shape(image), dtype=image.dtype)
elif isinstance(replace, tf.Tensor):
fill = replace
else:
fill = tf.ones_like(image, dtype=image.dtype) * replace
image = tf.where(tf.equal(mask, 0), fill, image)
return image
def _fill_rectangle_video(image,
center_width,
center_height,
half_width,
half_height,
replace=None):
"""Fills blank area for video."""
image_time = tf.shape(image)[0]
image_height = tf.shape(image)[1]
image_width = tf.shape(image)[2]
lower_pad = tf.maximum(0, center_height - half_height)
upper_pad = tf.maximum(0, image_height - center_height - half_height)
left_pad = tf.maximum(0, center_width - half_width)
right_pad = tf.maximum(0, image_width - center_width - half_width)
cutout_shape = [
image_time, image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)
]
padding_dims = [[0, 0], [lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims,
constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, 1, 3])
if replace is None:
fill = tf.random.normal(tf.shape(image), dtype=image.dtype)
elif isinstance(replace, tf.Tensor):
fill = replace
else:
fill = tf.ones_like(image, dtype=image.dtype) * replace
image = tf.where(tf.equal(mask, 0), fill, image)
return image
def cutout_video(
video: tf.Tensor,
mask_shape: Optional[tf.Tensor] = None,
replace: int = 0,
) -> tf.Tensor:
"""Apply cutout (https://arxiv.org/abs/1708.04552) to a video.
This operation applies a random size 3D mask of zeros to a random location
within `video`. The mask is padded The pixel values filled in will be of the
value `replace`. The location where the mask will be applied is randomly
chosen uniformly over the whole video. If the size of the mask is not set,
then, it is randomly sampled uniformly from [0.25*height, 0.5*height],
[0.25*width, 0.5*width], and [1, 0.25*depth], which represent the height,
width, and number of frames of the input video tensor respectively.
Args:
video: A video Tensor of shape [T, H, W, C].
mask_shape: An optional integer tensor that specifies the depth, height and
width of the mask to cut. If it is not set, the shape is randomly sampled
as described above. The shape dimensions should be divisible by 2
otherwise they will rounded down.
replace: What pixel value to fill in the image in the area that has the
cutout mask applied to it.
Returns:
A video Tensor with cutout applied.
"""
tf.debugging.assert_shapes([
(video, ('T', 'H', 'W', 'C')),
])
video_depth = tf.shape(video)[0]
video_height = tf.shape(video)[1]
video_width = tf.shape(video)[2]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random.uniform(
shape=[], minval=0, maxval=video_height, dtype=tf.int32
)
cutout_center_width = tf.random.uniform(
shape=[], minval=0, maxval=video_width, dtype=tf.int32
)
cutout_center_depth = tf.random.uniform(
shape=[], minval=0, maxval=video_depth, dtype=tf.int32
)
if mask_shape is not None:
pad_shape = tf.maximum(1, mask_shape // 2)
pad_size_depth, pad_size_height, pad_size_width = (
pad_shape[0],
pad_shape[1],
pad_shape[2],
)
else:
pad_size_height = tf.random.uniform(
shape=[],
minval=tf.maximum(1, tf.cast(video_height / 4, tf.int32)),
maxval=tf.maximum(2, tf.cast(video_height / 2, tf.int32)),
dtype=tf.int32,
)
pad_size_width = tf.random.uniform(
shape=[],
minval=tf.maximum(1, tf.cast(video_width / 4, tf.int32)),
maxval=tf.maximum(2, tf.cast(video_width / 2, tf.int32)),
dtype=tf.int32,
)
pad_size_depth = tf.random.uniform(
shape=[],
minval=1,
maxval=tf.maximum(2, tf.cast(video_depth / 4, tf.int32)),
dtype=tf.int32,
)
lower_pad = tf.maximum(0, cutout_center_height - pad_size_height)
upper_pad = tf.maximum(
0, video_height - cutout_center_height - pad_size_height
)
left_pad = tf.maximum(0, cutout_center_width - pad_size_width)
right_pad = tf.maximum(0, video_width - cutout_center_width - pad_size_width)
back_pad = tf.maximum(0, cutout_center_depth - pad_size_depth)
forward_pad = tf.maximum(
0, video_depth - cutout_center_depth - pad_size_depth
)
cutout_shape = [
video_depth - (back_pad + forward_pad),
video_height - (lower_pad + upper_pad),
video_width - (left_pad + right_pad),
]
padding_dims = [[back_pad, forward_pad],
[lower_pad, upper_pad],
[left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=video.dtype), padding_dims, constant_values=1
)
mask = tf.expand_dims(mask, -1)
num_channels = tf.shape(video)[-1]
mask = tf.tile(mask, [1, 1, 1, num_channels])
video = tf.where(
tf.equal(mask, 0), tf.ones_like(video, dtype=video.dtype) * replace, video
)
return video
def gaussian_noise(
image: tf.Tensor, low: float = 0.1, high: float = 2.0) -> tf.Tensor:
"""Add Gaussian noise to image(s)."""
augmented_image = gaussian_filter2d( # pylint: disable=g-long-lambda
image, filter_shape=[3, 3], sigma=np.random.uniform(low=low, high=high)
)
return augmented_image
def solarize(image: tf.Tensor, threshold: int = 128) -> tf.Tensor:
"""Solarize the input image(s)."""
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
return tf.where(image < threshold, image, 255 - image)
def solarize_add(image: tf.Tensor,
addition: int = 0,
threshold: int = 128) -> tf.Tensor:
"""Additive solarize the input image(s)."""
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128.
added_image = tf.cast(image, tf.int64) + addition
added_image = tf.cast(tf.clip_by_value(added_image, 0, 255), tf.uint8)
return tf.where(image < threshold, added_image, image)
def grayscale(image: tf.Tensor) -> tf.Tensor:
"""Convert image to grayscale."""
return tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
def color(image: tf.Tensor, factor: float) -> tf.Tensor:
"""Equivalent of PIL Color."""
degenerate = grayscale(image)
return blend(degenerate, image, factor)
def contrast(image: tf.Tensor, factor: float) -> tf.Tensor:
"""Equivalent of PIL Contrast."""
degenerate = tf.image.rgb_to_grayscale(image)
# Cast before calling tf.histogram.
degenerate = tf.cast(degenerate, tf.int32)
# Compute the grayscale histogram, then compute the mean pixel value,
# and create a constant image size of that value. Use that as the
# blending degenerate target of the original image.
hist = tf.histogram_fixed_width(degenerate, [0, 255], nbins=256)
mean = tf.reduce_sum(tf.cast(hist, tf.float32)) / 256.0
degenerate = tf.ones_like(degenerate, dtype=tf.float32) * mean
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.image.grayscale_to_rgb(tf.cast(degenerate, tf.uint8))
return blend(degenerate, image, factor)
def brightness(image: tf.Tensor, factor: float) -> tf.Tensor:
"""Equivalent of PIL Brightness."""
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image: tf.Tensor, bits: int) -> tf.Tensor:
"""Equivalent of PIL Posterize."""
shift = 8 - bits
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def wrapped_rotate(image: tf.Tensor, degrees: float, replace: int) -> tf.Tensor:
"""Applies rotation with wrap/unwrap."""
image = rotate(wrap(image), degrees=degrees)
return unwrap(image, replace)
def translate_x(image: tf.Tensor, pixels: int, replace: int) -> tf.Tensor:
"""Equivalent of PIL Translate in X dimension."""
image = translate(wrap(image), [-pixels, 0])
return unwrap(image, replace)
def translate_y(image: tf.Tensor, pixels: int, replace: int) -> tf.Tensor:
"""Equivalent of PIL Translate in Y dimension."""
image = translate(wrap(image), [0, -pixels])
return unwrap(image, replace)
def shear_x(image: tf.Tensor, level: float, replace: int) -> tf.Tensor:
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
image = transform(
image=wrap(image), transforms=[1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image, replace)
def shear_y(image: tf.Tensor, level: float, replace: int) -> tf.Tensor:
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1].
image = transform(
image=wrap(image), transforms=[1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image, replace)
def autocontrast(image: tf.Tensor) -> tf.Tensor:
"""Implements Autocontrast function from PIL using TF ops.
Args:
image: A 3D uint8 tensor.
Returns:
The image after it has had autocontrast applied to it and will be of type
uint8.
"""
def scale_channel(image: tf.Tensor) -> tf.Tensor:
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.cast(tf.reduce_min(image), tf.float32)
hi = tf.cast(tf.reduce_max(image), tf.float32)
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.cast(im, tf.float32) * scale + offset
im = tf.clip_by_value(im, 0.0, 255.0)
return tf.cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[..., 0])
s2 = scale_channel(image[..., 1])
s3 = scale_channel(image[..., 2])
image = tf.stack([s1, s2, s3], -1)
return image
def sharpness(image: tf.Tensor, factor: float) -> tf.Tensor:
"""Implements Sharpness function from PIL using TF ops."""
orig_image = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation.
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel.
if orig_image.shape.rank == 3:
kernel = tf.constant([[1, 1, 1], [1, 5, 1], [1, 1, 1]],
dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension.
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
degenerate = tf.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', dilations=[1, 1])
elif orig_image.shape.rank == 4:
kernel = tf.constant([[1, 1, 1], [1, 5, 1], [1, 1, 1]],
dtype=tf.float32,
shape=[1, 3, 3, 1, 1]) / 13.
strides = [1, 1, 1, 1, 1]
# Run the kernel across each channel
channels = tf.split(image, 3, axis=-1)
degenerates = [
tf.nn.conv3d(channel, kernel, strides, padding='VALID',
dilations=[1, 1, 1, 1, 1])
for channel in channels
]
degenerate = tf.concat(degenerates, -1)
else:
raise ValueError('Bad image rank: {}'.format(image.shape.rank))
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.squeeze(tf.cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
paddings = [[0, 0]] * (orig_image.shape.rank - 3)
padded_mask = tf.pad(mask, paddings + [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, paddings + [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)
# Blend the final result.
return blend(result, orig_image, factor)
def equalize(image: tf.Tensor) -> tf.Tensor:
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[..., c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(
tf.equal(step, 0), lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], -1)
return image
def invert(image: tf.Tensor) -> tf.Tensor:
"""Inverts the image pixels."""
image = tf.convert_to_tensor(image)
return 255 - image
def wrap(image: tf.Tensor) -> tf.Tensor:
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf.shape(image)
extended_channel = tf.expand_dims(tf.ones(shape[:-1], image.dtype), -1)
extended = tf.concat([image, extended_channel], axis=-1)
return extended
def unwrap(image: tf.Tensor, replace: int) -> tf.Tensor:
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[-1]])
# Find all pixels where the last channel is zero.
alpha_channel = tf.expand_dims(flattened_image[..., 3], axis=-1)
replace = tf.concat([replace, tf.ones([1], image.dtype)], 0)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(
image,
[0] * image.shape.rank,
tf.concat([image_shape[:-1], [3]], -1))
return image
def _scale_bbox_only_op_probability(prob):
"""Reduce the probability of the bbox-only operation.
Probability is reduced so that we do not distort the content of too many
bounding boxes that are close to each other. The value of 3.0 was a chosen
hyper parameter when designing the autoaugment algorithm that we found
empirically to work well.
Args:
prob: Float that is the probability of applying the bbox-only operation.
Returns:
Reduced probability.
"""
return prob / 3.0
def _apply_bbox_augmentation(image, bbox, augmentation_func, *args):
"""Applies augmentation_func to the subsection of image indicated by bbox.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
augmentation_func: Augmentation function that will be applied to the
subsection of image.
*args: Additional parameters that will be passed into augmentation_func
when it is called.
Returns:
A modified version of image, where the bbox location in the image will
have `ugmentation_func applied to it.
"""
image_height = tf.cast(tf.shape(image)[0], tf.float32)
image_width = tf.cast(tf.shape(image)[1], tf.float32)
min_y = tf.cast(image_height * bbox[0], tf.int32)
min_x = tf.cast(image_width * bbox[1], tf.int32)
max_y = tf.cast(image_height * bbox[2], tf.int32)
max_x = tf.cast(image_width * bbox[3], tf.int32)
image_height = tf.cast(image_height, tf.int32)
image_width = tf.cast(image_width, tf.int32)
# Clip to be sure the max values do not fall out of range.
max_y = tf.minimum(max_y, image_height - 1)
max_x = tf.minimum(max_x, image_width - 1)
# Get the sub-tensor that is the image within the bounding box region.
bbox_content = image[min_y:max_y + 1, min_x:max_x + 1, :]
# Apply the augmentation function to the bbox portion of the image.
augmented_bbox_content = augmentation_func(bbox_content, *args)
# Pad the augmented_bbox_content and the mask to match the shape of original
# image.
augmented_bbox_content = tf.pad(augmented_bbox_content,
[[min_y, (image_height - 1) - max_y],
[min_x, (image_width - 1) - max_x],
[0, 0]])
# Create a mask that will be used to zero out a part of the original image.
mask_tensor = tf.zeros_like(bbox_content)
mask_tensor = tf.pad(mask_tensor,
[[min_y, (image_height - 1) - max_y],
[min_x, (image_width - 1) - max_x],
[0, 0]],
constant_values=1)
# Replace the old bbox content with the new augmented content.
image = image * mask_tensor + augmented_bbox_content
return image
def _concat_bbox(bbox, bboxes):
"""Helper function that concates bbox to bboxes along the first dimension."""
# Note if all elements in bboxes are -1 (_INVALID_BOX), then this means
# we discard bboxes and start the bboxes Tensor with the current bbox.
bboxes_sum_check = tf.reduce_sum(bboxes)
bbox = tf.expand_dims(bbox, 0)
# This check will be true when it is an _INVALID_BOX
bboxes = tf.cond(tf.equal(bboxes_sum_check, -4.0),
lambda: bbox,
lambda: tf.concat([bboxes, bbox], 0))
return bboxes
def _apply_bbox_augmentation_wrapper(image, bbox, new_bboxes, prob,
augmentation_func, func_changes_bbox,
*args):
"""Applies _apply_bbox_augmentation with probability prob.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
new_bboxes: 2D Tensor that is a list of the bboxes in the image after they
have been altered by aug_func. These will only be changed when
func_changes_bbox is set to true. Each bbox has 4 elements
(min_y, min_x, max_y, max_x) of type float that are the normalized
bbox coordinates between 0 and 1.
prob: Float that is the probability of applying _apply_bbox_augmentation.
augmentation_func: Augmentation function that will be applied to the
subsection of image.
func_changes_bbox: Boolean. Does augmentation_func return bbox in addition
to image.
*args: Additional parameters that will be passed into augmentation_func
when it is called.
Returns:
A tuple. Fist element is a modified version of image, where the bbox
location in the image will have augmentation_func applied to it if it is
chosen to be called with probability `prob`. The second element is a
Tensor of Tensors of length 4 that will contain the altered bbox after
applying augmentation_func.
"""
should_apply_op = tf.cast(
tf.floor(tf.random.uniform([], dtype=tf.float32) + prob), tf.bool)
if func_changes_bbox:
augmented_image, bbox = tf.cond(
should_apply_op,
lambda: augmentation_func(image, bbox, *args),
lambda: (image, bbox))
else:
augmented_image = tf.cond(
should_apply_op,
lambda: _apply_bbox_augmentation(image, bbox, augmentation_func, *args),
lambda: image)
new_bboxes = _concat_bbox(bbox, new_bboxes)
return augmented_image, new_bboxes
def _apply_multi_bbox_augmentation_wrapper(image, bboxes, prob, aug_func,
func_changes_bbox, *args):
"""Checks to be sure num bboxes > 0 before calling inner function."""
num_bboxes = tf.shape(bboxes)[0]
image, bboxes = tf.cond(
tf.equal(num_bboxes, 0),
lambda: (image, bboxes),
# pylint:disable=g-long-lambda
lambda: _apply_multi_bbox_augmentation(
image, bboxes, prob, aug_func, func_changes_bbox, *args))
# pylint:enable=g-long-lambda
return image, bboxes
# Represents an invalid bounding box that is used for checking for padding
# lists of bounding box coordinates for a few augmentation operations
_INVALID_BOX = [[-1.0, -1.0, -1.0, -1.0]]
def _apply_multi_bbox_augmentation(image, bboxes, prob, aug_func,
func_changes_bbox, *args):
"""Applies aug_func to the image for each bbox in bboxes.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float.
prob: Float that is the probability of applying aug_func to a specific
bounding box within the image.
aug_func: Augmentation function that will be applied to the
subsections of image indicated by the bbox values in bboxes.
func_changes_bbox: Boolean. Does augmentation_func return bbox in addition
to image.
*args: Additional parameters that will be passed into augmentation_func
when it is called.
Returns:
A modified version of image, where each bbox location in the image will
have augmentation_func applied to it if it is chosen to be called with
probability prob independently across all bboxes. Also the final
bboxes are returned that will be unchanged if func_changes_bbox is set to
false and if true, the new altered ones will be returned.
Raises:
ValueError if applied to video.
"""
if image.shape.rank == 4:
raise ValueError('Image rank 4 is not supported')
# Will keep track of the new altered bboxes after aug_func is repeatedly
# applied. The -1 values are a dummy value and this first Tensor will be
# removed upon appending the first real bbox.
new_bboxes = tf.constant(_INVALID_BOX)
# If the bboxes are empty, then just give it _INVALID_BOX. The result
# will be thrown away.
bboxes = tf.cond(tf.equal(tf.size(bboxes), 0),
lambda: tf.constant(_INVALID_BOX),
lambda: bboxes)
bboxes = tf.ensure_shape(bboxes, (None, 4))
# pylint:disable=g-long-lambda
wrapped_aug_func = (
lambda _image, bbox, _new_bboxes: _apply_bbox_augmentation_wrapper(
_image, bbox, _new_bboxes, prob, aug_func, func_changes_bbox, *args))
# pylint:enable=g-long-lambda
# Setup the while_loop.
num_bboxes = tf.shape(bboxes)[0] # We loop until we go over all bboxes.
idx = tf.constant(0) # Counter for the while loop.
# Conditional function when to end the loop once we go over all bboxes
# images_and_bboxes contain (_image, _new_bboxes)
cond = lambda _idx, _images_and_bboxes: tf.less(_idx, num_bboxes)
# Shuffle the bboxes so that the augmentation order is not deterministic if
# we are not changing the bboxes with aug_func.
if not func_changes_bbox:
loop_bboxes = tf.random.shuffle(bboxes)
else:
loop_bboxes = bboxes
# Main function of while_loop where we repeatedly apply augmentation on the
# bboxes in the image.
# pylint:disable=g-long-lambda
body = lambda _idx, _images_and_bboxes: [
_idx + 1, wrapped_aug_func(_images_and_bboxes[0],
loop_bboxes[_idx],
_images_and_bboxes[1])]
# pylint:enable=g-long-lambda
_, (image, new_bboxes) = tf.while_loop(
cond, body, [idx, (image, new_bboxes)],
shape_invariants=[idx.get_shape(),
(image.get_shape(), tf.TensorShape([None, 4]))])
# Either return the altered bboxes or the original ones depending on if
# we altered them in anyway.
if func_changes_bbox:
final_bboxes = new_bboxes
else:
final_bboxes = bboxes
return image, final_bboxes
def _clip_bbox(min_y, min_x, max_y, max_x):
"""Clip bounding box coordinates between 0 and 1.
Args:
min_y: Normalized bbox coordinate of type float between 0 and 1.
min_x: Normalized bbox coordinate of type float between 0 and 1.
max_y: Normalized bbox coordinate of type float between 0 and 1.
max_x: Normalized bbox coordinate of type float between 0 and 1.
Returns:
Clipped coordinate values between 0 and 1.
"""
min_y = tf.clip_by_value(min_y, 0.0, 1.0)
min_x = tf.clip_by_value(min_x, 0.0, 1.0)
max_y = tf.clip_by_value(max_y, 0.0, 1.0)
max_x = tf.clip_by_value(max_x, 0.0, 1.0)
return min_y, min_x, max_y, max_x
def _check_bbox_area(min_y, min_x, max_y, max_x, delta=0.05):
"""Adjusts bbox coordinates to make sure the area is > 0.
Args:
min_y: Normalized bbox coordinate of type float between 0 and 1.
min_x: Normalized bbox coordinate of type float between 0 and 1.
max_y: Normalized bbox coordinate of type float between 0 and 1.
max_x: Normalized bbox coordinate of type float between 0 and 1.
delta: Float, this is used to create a gap of size 2 * delta between
bbox min/max coordinates that are the same on the boundary.
This prevents the bbox from having an area of zero.
Returns:
Tuple of new bbox coordinates between 0 and 1 that will now have a
guaranteed area > 0.
"""
height = max_y - min_y
width = max_x - min_x
def _adjust_bbox_boundaries(min_coord, max_coord):
# Make sure max is never 0 and min is never 1.
max_coord = tf.maximum(max_coord, 0.0 + delta)
min_coord = tf.minimum(min_coord, 1.0 - delta)
return min_coord, max_coord
min_y, max_y = tf.cond(tf.equal(height, 0.0),
lambda: _adjust_bbox_boundaries(min_y, max_y),
lambda: (min_y, max_y))
min_x, max_x = tf.cond(tf.equal(width, 0.0),
lambda: _adjust_bbox_boundaries(min_x, max_x),
lambda: (min_x, max_x))
return min_y, min_x, max_y, max_x
def _rotate_bbox(bbox, image_height, image_width, degrees):
"""Rotates the bbox coordinated by degrees.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
image_height: Int, height of the image.
image_width: Int, height of the image.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
Returns:
A tensor of the same shape as bbox, but now with the rotated coordinates.
"""
image_height, image_width = (
tf.cast(image_height, tf.float32), tf.cast(image_width, tf.float32))
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# Translate the bbox to the center of the image and turn the normalized 0-1
# coordinates to absolute pixel locations.
# Y coordinates are made negative as the y axis of images goes down with
# increasing pixel values, so we negate to make sure x axis and y axis points
# are in the traditionally positive direction.
min_y = -tf.cast(image_height * (bbox[0] - 0.5), tf.int32)
min_x = tf.cast(image_width * (bbox[1] - 0.5), tf.int32)
max_y = -tf.cast(image_height * (bbox[2] - 0.5), tf.int32)
max_x = tf.cast(image_width * (bbox[3] - 0.5), tf.int32)
coordinates = tf.stack(
[[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]])
coordinates = tf.cast(coordinates, tf.float32)
# Rotate the coordinates according to the rotation matrix clockwise if
# radians is positive, else negative
rotation_matrix = tf.stack(
[[tf.cos(radians), tf.sin(radians)],
[-tf.sin(radians), tf.cos(radians)]])
new_coords = tf.cast(
tf.matmul(rotation_matrix, tf.transpose(coordinates)), tf.int32)
# Find min/max values and convert them back to normalized 0-1 floats.
min_y = -(
tf.cast(tf.reduce_max(new_coords[0, :]), tf.float32) / image_height - 0.5)
min_x = tf.cast(tf.reduce_min(new_coords[1, :]),
tf.float32) / image_width + 0.5
max_y = -(
tf.cast(tf.reduce_min(new_coords[0, :]), tf.float32) / image_height - 0.5)
max_x = tf.cast(tf.reduce_max(new_coords[1, :]),
tf.float32) / image_width + 0.5
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return tf.stack([min_y, min_x, max_y, max_x])
def rotate_with_bboxes(image, bboxes, degrees, replace):
"""Equivalent of PIL Rotate that rotates the image and bbox.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of rotating
image by degrees. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the rotated image.
Raises:
ValueError: If applied to video.
"""
if image.shape.rank == 4:
raise ValueError('Image rank 4 is not supported')
# Rotate the image.
image = wrapped_rotate(image, degrees, replace)
# Convert bbox coordinates to pixel values.
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# pylint:disable=g-long-lambda
wrapped_rotate_bbox = lambda bbox: _rotate_bbox(
bbox, image_height, image_width, degrees)
# pylint:enable=g-long-lambda
bboxes = tf.map_fn(wrapped_rotate_bbox, bboxes)
return image, bboxes
def _shear_bbox(bbox, image_height, image_width, level, shear_horizontal):
"""Shifts the bbox according to how the image was sheared.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
image_height: Int, height of the image.
image_width: Int, height of the image.
level: Float. How much to shear the image.
shear_horizontal: If true then shear in X dimension else shear in
the Y dimension.
Returns:
A tensor of the same shape as bbox, but now with the shifted coordinates.
"""
image_height, image_width = (
tf.cast(image_height, tf.float32), tf.cast(image_width, tf.float32))
# Change bbox coordinates to be pixels.
min_y = tf.cast(image_height * bbox[0], tf.int32)
min_x = tf.cast(image_width * bbox[1], tf.int32)
max_y = tf.cast(image_height * bbox[2], tf.int32)
max_x = tf.cast(image_width * bbox[3], tf.int32)
coordinates = tf.stack(
[[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]])
coordinates = tf.cast(coordinates, tf.float32)
# Shear the coordinates according to the translation matrix.
if shear_horizontal:
translation_matrix = tf.stack(
[[1, 0], [-level, 1]])
else:
translation_matrix = tf.stack(
[[1, -level], [0, 1]])
translation_matrix = tf.cast(translation_matrix, tf.float32)
new_coords = tf.cast(
tf.matmul(translation_matrix, tf.transpose(coordinates)), tf.int32)
# Find min/max values and convert them back to floats.
min_y = tf.cast(tf.reduce_min(new_coords[0, :]), tf.float32) / image_height
min_x = tf.cast(tf.reduce_min(new_coords[1, :]), tf.float32) / image_width
max_y = tf.cast(tf.reduce_max(new_coords[0, :]), tf.float32) / image_height
max_x = tf.cast(tf.reduce_max(new_coords[1, :]), tf.float32) / image_width
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return tf.stack([min_y, min_x, max_y, max_x])
def shear_with_bboxes(image, bboxes, level, replace, shear_horizontal):
"""Applies Shear Transformation to the image and shifts the bboxes.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
level: Float. How much to shear the image. This value will be between
-0.3 to 0.3.
replace: A one or three value 1D tensor to fill empty pixels.
shear_horizontal: Boolean. If true then shear in X dimension else shear in
the Y dimension.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of shearing
image by level. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the sheared image.
Raises:
ValueError: If applied to video.
"""
if image.shape.rank == 4:
raise ValueError('Image rank 4 is not supported')
if shear_horizontal:
image = shear_x(image, level, replace)
else:
image = shear_y(image, level, replace)
# Convert bbox coordinates to pixel values.
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# pylint:disable=g-long-lambda
wrapped_shear_bbox = lambda bbox: _shear_bbox(
bbox, image_height, image_width, level, shear_horizontal)
# pylint:enable=g-long-lambda
bboxes = tf.map_fn(wrapped_shear_bbox, bboxes)
return image, bboxes
def _shift_bbox(bbox, image_height, image_width, pixels, shift_horizontal):
"""Shifts the bbox coordinates by pixels.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
image_height: Int, height of the image.
image_width: Int, width of the image.
pixels: An int. How many pixels to shift the bbox.
shift_horizontal: Boolean. If true then shift in X dimension else shift in
Y dimension.
Returns:
A tensor of the same shape as bbox, but now with the shifted coordinates.
"""
pixels = tf.cast(pixels, tf.int32)
# Convert bbox to integer pixel locations.
min_y = tf.cast(tf.cast(image_height, tf.float32) * bbox[0], tf.int32)
min_x = tf.cast(tf.cast(image_width, tf.float32) * bbox[1], tf.int32)
max_y = tf.cast(tf.cast(image_height, tf.float32) * bbox[2], tf.int32)
max_x = tf.cast(tf.cast(image_width, tf.float32) * bbox[3], tf.int32)
if shift_horizontal:
min_x = tf.maximum(0, min_x - pixels)
max_x = tf.minimum(image_width, max_x - pixels)
else:
min_y = tf.maximum(0, min_y - pixels)
max_y = tf.minimum(image_height, max_y - pixels)
# Convert bbox back to floats.
min_y = tf.cast(min_y, tf.float32) / tf.cast(image_height, tf.float32)
min_x = tf.cast(min_x, tf.float32) / tf.cast(image_width, tf.float32)
max_y = tf.cast(max_y, tf.float32) / tf.cast(image_height, tf.float32)
max_x = tf.cast(max_x, tf.float32) / tf.cast(image_width, tf.float32)
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return tf.stack([min_y, min_x, max_y, max_x])
def translate_bbox(image, bboxes, pixels, replace, shift_horizontal):
"""Equivalent of PIL Translate in X/Y dimension that shifts image and bbox.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
pixels: An int. How many pixels to shift the image and bboxes
replace: A one or three value 1D tensor to fill empty pixels.
shift_horizontal: Boolean. If true then shift in X dimension else shift in
Y dimension.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of translating
image by pixels. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the shifted image.
Raises:
ValueError if applied to video.
"""
if image.shape.rank == 4:
raise ValueError('Image rank 4 is not supported')
if shift_horizontal:
image = translate_x(image, pixels, replace)
else:
image = translate_y(image, pixels, replace)
# Convert bbox coordinates to pixel values.
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# pylint:disable=g-long-lambda
wrapped_shift_bbox = lambda bbox: _shift_bbox(
bbox, image_height, image_width, pixels, shift_horizontal)
# pylint:enable=g-long-lambda
bboxes = tf.map_fn(wrapped_shift_bbox, bboxes)
return image, bboxes
def translate_y_only_bboxes(
image: tf.Tensor, bboxes: tf.Tensor, prob: float, pixels: int, replace):
"""Apply translate_y to each bbox in the image with probability prob."""
if bboxes.shape.rank == 4:
raise ValueError('translate_y_only_bboxes does not support rank 4 boxes')
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, translate_y, func_changes_bbox, pixels, replace)
def _randomly_negate_tensor(tensor):
"""With 50% prob turn the tensor negative."""
should_flip = tf.cast(tf.floor(tf.random.uniform([]) + 0.5), tf.bool)
final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor)
return final_tensor
def _rotate_level_to_arg(level: float):
level = (level / _MAX_LEVEL) * 30.
level = _randomly_negate_tensor(level)
return (level,)
def _shrink_level_to_arg(level: float):
"""Converts level to ratio by which we shrink the image content."""
if level == 0:
return (1.0,) # if level is zero, do not shrink the image
# Maximum shrinking ratio is 2.9.
level = 2. / (_MAX_LEVEL / level) + 0.9
return (level,)
def _enhance_level_to_arg(level: float):
return ((level / _MAX_LEVEL) * 1.8 + 0.1,)
def _shear_level_to_arg(level: float):
level = (level / _MAX_LEVEL) * 0.3
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _translate_level_to_arg(level: float, translate_const: float):
level = (level / _MAX_LEVEL) * float(translate_const)
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _gaussian_noise_level_to_arg(level: float, translate_const: float):
low_std = (level / _MAX_LEVEL)
high_std = translate_const * low_std
return low_std, high_std
def _mult_to_arg(level: float, multiplier: float = 1.):
return (int((level / _MAX_LEVEL) * multiplier),)
def _apply_func_with_prob(func: Any, image: tf.Tensor,
bboxes: Optional[tf.Tensor], args: Any, prob: float):
"""Apply `func` to image w/ `args` as input with probability `prob`."""
assert isinstance(args, tuple)
assert inspect.getfullargspec(func)[0][1] == 'bboxes'
# Apply the function with probability `prob`.
should_apply_op = tf.cast(
tf.floor(tf.random.uniform([], dtype=tf.float32) + prob), tf.bool)
augmented_image, augmented_bboxes = tf.cond(
should_apply_op,
lambda: func(image, bboxes, *args),
lambda: (image, bboxes))
return augmented_image, augmented_bboxes
def select_and_apply_random_policy(policies: Any,
image: tf.Tensor,
bboxes: Optional[tf.Tensor] = None):
"""Select a random policy from `policies` and apply it to `image`."""
policy_to_select = tf.random.uniform([], maxval=len(policies), dtype=tf.int32)
# Note that using tf.case instead of tf.conds would result in significantly
# larger graphs and would even break export for some larger policies.
for (i, policy) in enumerate(policies):
image, bboxes = tf.cond(
tf.equal(i, policy_to_select),
lambda selected_policy=policy: selected_policy(image, bboxes),
lambda: (image, bboxes))
return image, bboxes
NAME_TO_FUNC = {
'AutoContrast': autocontrast,
'Equalize': equalize,
'Invert': invert,
'Rotate': wrapped_rotate,
'Posterize': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x,
'TranslateY': translate_y,
'Cutout': cutout,
'Rotate_BBox': rotate_with_bboxes,
'Grayscale': grayscale,
'Gaussian_Noise': gaussian_noise,
# pylint:disable=g-long-lambda
'ShearX_BBox': lambda image, bboxes, level, replace: shear_with_bboxes(
image, bboxes, level, replace, shear_horizontal=True),
'ShearY_BBox': lambda image, bboxes, level, replace: shear_with_bboxes(
image, bboxes, level, replace, shear_horizontal=False),
'TranslateX_BBox': lambda image, bboxes, pixels, replace: translate_bbox(
image, bboxes, pixels, replace, shift_horizontal=True),
'TranslateY_BBox': lambda image, bboxes, pixels, replace: translate_bbox(
image, bboxes, pixels, replace, shift_horizontal=False),
# pylint:enable=g-long-lambda
'TranslateY_Only_BBoxes': translate_y_only_bboxes,
}
# Functions that require a `bboxes` parameter.
REQUIRE_BOXES_FUNCS = frozenset({
'Rotate_BBox',
'ShearX_BBox',
'ShearY_BBox',
'TranslateX_BBox',
'TranslateY_BBox',
'TranslateY_Only_BBoxes',
})
# Functions that have a 'prob' parameter
PROB_FUNCS = frozenset({
'TranslateY_Only_BBoxes',
})
# Functions that have a 'replace' parameter
REPLACE_FUNCS = frozenset({
'Rotate',
'TranslateX',
'ShearX',
'ShearY',
'TranslateY',
'Cutout',
'Rotate_BBox',
'ShearX_BBox',
'ShearY_BBox',
'TranslateX_BBox',
'TranslateY_BBox',
'TranslateY_Only_BBoxes',
})
def level_to_arg(cutout_const: float, translate_const: float):
"""Creates a dict mapping image operation names to their arguments."""
no_arg = lambda level: ()
posterize_arg = lambda level: _mult_to_arg(level, 4)
solarize_arg = lambda level: _mult_to_arg(level, 256)
solarize_add_arg = lambda level: _mult_to_arg(level, 110)
cutout_arg = lambda level: _mult_to_arg(level, cutout_const)
translate_arg = lambda level: _translate_level_to_arg(level, translate_const)
translate_bbox_arg = lambda level: _translate_level_to_arg(level, 120)
args = {
'AutoContrast': no_arg,
'Equalize': no_arg,
'Invert': no_arg,
'Rotate': _rotate_level_to_arg,
'Posterize': posterize_arg,
'Solarize': solarize_arg,
'SolarizeAdd': solarize_add_arg,
'Color': _enhance_level_to_arg,
'Contrast': _enhance_level_to_arg,
'Brightness': _enhance_level_to_arg,
'Sharpness': _enhance_level_to_arg,
'ShearX': _shear_level_to_arg,
'ShearY': _shear_level_to_arg,
'Cutout': cutout_arg,
'TranslateX': translate_arg,
'TranslateY': translate_arg,
'Rotate_BBox': _rotate_level_to_arg,
'ShearX_BBox': _shear_level_to_arg,
'ShearY_BBox': _shear_level_to_arg,
'Grayscale': no_arg,
# pylint:disable=g-long-lambda
'Gaussian_Noise': lambda level: _gaussian_noise_level_to_arg(
level, translate_const),
# pylint:disable=g-long-lambda
'TranslateX_BBox': lambda level: _translate_level_to_arg(
level, translate_const),
'TranslateY_BBox': lambda level: _translate_level_to_arg(
level, translate_const),
# pylint:enable=g-long-lambda
'TranslateY_Only_BBoxes': translate_bbox_arg,
}
return args
def bbox_wrapper(func):
"""Adds a bboxes function argument to func and returns unchanged bboxes."""
def wrapper(images, bboxes, *args, **kwargs):
return (func(images, *args, **kwargs), bboxes)
return wrapper
def _parse_policy_info(name: str,
prob: float,
level: float,
replace_value: List[int],
cutout_const: float,
translate_const: float,
level_std: float = 0.) -> Tuple[Any, float, Any]:
"""Return the function that corresponds to `name` and update `level` param."""
func = NAME_TO_FUNC[name]
if level_std > 0:
level += tf.random.normal([], dtype=tf.float32)
level = tf.clip_by_value(level, 0., _MAX_LEVEL)
args = level_to_arg(cutout_const, translate_const)[name](level)
if name in PROB_FUNCS:
# Add in the prob arg if it is required for the function that is called.
args = tuple([prob] + list(args))
if name in REPLACE_FUNCS:
# Add in replace arg if it is required for the function that is called.
args = tuple(list(args) + [replace_value])
# Add bboxes as the second positional argument for the function if it does
# not already exist.
if 'bboxes' not in inspect.getfullargspec(func)[0]:
func = bbox_wrapper(func)
return func, prob, args
class ImageAugment(object):
"""Image augmentation class for applying image distortions."""
def distort(
self,
image: tf.Tensor
) -> tf.Tensor:
"""Given an image tensor, returns a distorted image with the same shape.
Args:
image: `Tensor` of shape [height, width, 3] or
[num_frames, height, width, 3] representing an image or image sequence.
Returns:
The augmented version of `image`.
"""
raise NotImplementedError()
def distort_with_boxes(
self,
image: tf.Tensor,
bboxes: tf.Tensor
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Distorts the image and bounding boxes.
Args:
image: `Tensor` of shape [height, width, 3] or
[num_frames, height, width, 3] representing an image or image sequence.
bboxes: `Tensor` of shape [num_boxes, 4] or [num_frames, num_boxes, 4]
representing bounding boxes for an image or image sequence.
Returns:
The augmented version of `image` and `bboxes`.
"""
raise NotImplementedError
class AutoAugment(ImageAugment):
"""Applies the AutoAugment policy to images.
AutoAugment is from the paper: https://arxiv.org/abs/1805.09501.
"""
def __init__(self,
augmentation_name: str = 'v0',
policies: Optional[Iterable[Iterable[Tuple[str, float,
float]]]] = None,
cutout_const: float = 100,
translate_const: float = 250):
"""Applies the AutoAugment policy to images.
Args:
augmentation_name: The name of the AutoAugment policy to use. The
available options are `v0`, `test`, `reduced_cifar10`, `svhn` and
`reduced_imagenet`. `v0` is the policy used for all
of the results in the paper and was found to achieve the best results on
the COCO dataset. `v1`, `v2` and `v3` are additional good policies found
on the COCO dataset that have slight variation in what operations were
used during the search procedure along with how many operations are
applied in parallel to a single image (2 vs 3). Make sure to set
`policies` to `None` (the default) if you want to set options using
`augmentation_name`.
policies: list of lists of tuples in the form `(func, prob, level)`,
`func` is a string name of the augmentation function, `prob` is the
probability of applying the `func` operation, `level` (or magnitude) is
the input argument for `func`. For example:
```
[[('Equalize', 0.9, 3), ('Color', 0.7, 8)],
[('Invert', 0.6, 5), ('Rotate', 0.2, 9), ('ShearX', 0.1, 2)], ...]
```
The outer-most list must be 3-d. The number of operations in a
sub-policy can vary from one sub-policy to another.
If you provide `policies` as input, any option set with
`augmentation_name` will get overriden as they are mutually exclusive.
cutout_const: multiplier for applying cutout.
translate_const: multiplier for applying translation.
Raises:
ValueError if `augmentation_name` is unsupported.
"""
super(AutoAugment, self).__init__()
self.augmentation_name = augmentation_name
self.cutout_const = float(cutout_const)
self.translate_const = float(translate_const)
self.available_policies = {
'detection_v0': self.detection_policy_v0(),
'v0': self.policy_v0(),
'test': self.policy_test(),
'simple': self.policy_simple(),
'reduced_cifar10': self.policy_reduced_cifar10(),
'svhn': self.policy_svhn(),
'reduced_imagenet': self.policy_reduced_imagenet(),
'panoptic_deeplab_policy': self.panoptic_deeplab_policy(),
'vit': self.vit(),
'deit3_three_augment': self.deit3_three_augment(),
}
if not policies:
if augmentation_name not in self.available_policies:
raise ValueError(
'Invalid augmentation_name: {}'.format(augmentation_name))
self.policies = self.available_policies[augmentation_name]
else:
self._check_policy_shape(policies)
self.policies = policies
def _check_policy_shape(self, policies):
"""Checks dimension and shape of the custom policy.
Args:
policies: List of list of tuples in the form `(func, prob, level)`. Must
have shape of `(:, :, 3)`.
Raises:
ValueError if the shape of `policies` is unexpected.
"""
in_shape = np.array(policies).shape
if len(in_shape) != 3 or in_shape[-1:] != (3,):
raise ValueError('Wrong shape detected for custom policy. Expected '
'(:, :, 3) but got {}.'.format(in_shape))
def _make_tf_policies(self):
"""Prepares the TF functions for augmentations based on the policies."""
replace_value = [128] * 3
# func is the string name of the augmentation function, prob is the
# probability of applying the operation and level is the parameter
# associated with the tf op.
# tf_policies are functions that take in an image and return an augmented
# image.
tf_policies = []
for policy in self.policies:
tf_policy = []
assert_ranges = []
# Link string name to the correct python function and make sure the
# correct argument is passed into that function.
for policy_info in policy:
_, prob, level = policy_info
assert_ranges.append(tf.Assert(tf.less_equal(prob, 1.), [prob]))
assert_ranges.append(
tf.Assert(tf.less_equal(level, int(_MAX_LEVEL)), [level]))
policy_info = list(policy_info) + [
replace_value, self.cutout_const, self.translate_const
]
tf_policy.append(_parse_policy_info(*policy_info))
# Now build the tf policy that will apply the augmentation procedue
# on image.
def make_final_policy(tf_policy_):
def final_policy(image_, bboxes_):
for func, prob, args in tf_policy_:
image_, bboxes_ = _apply_func_with_prob(func, image_, bboxes_, args,
prob)
return image_, bboxes_
return final_policy
with tf.control_dependencies(assert_ranges):
tf_policies.append(make_final_policy(tf_policy))
return tf_policies
def distort(self, image: tf.Tensor) -> tf.Tensor:
"""See base class."""
input_image_type = image.dtype
if input_image_type != tf.uint8:
image = tf.clip_by_value(image, 0.0, 255.0)
image = tf.cast(image, dtype=tf.uint8)
tf_policies = self._make_tf_policies()
image, _ = select_and_apply_random_policy(tf_policies, image, bboxes=None)
image = tf.cast(image, dtype=input_image_type)
return image
def distort_with_boxes(self, image: tf.Tensor,
bboxes: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""See base class."""
input_image_type = image.dtype
if input_image_type != tf.uint8:
image = tf.clip_by_value(image, 0.0, 255.0)
image = tf.cast(image, dtype=tf.uint8)
tf_policies = self._make_tf_policies()
image, bboxes = select_and_apply_random_policy(tf_policies, image, bboxes)
return image, bboxes
@staticmethod
def detection_policy_v0():
"""Autoaugment policy that was used in AutoAugment Paper for Detection.
https://arxiv.org/pdf/1906.11172
Each tuple is an augmentation operation of the form
(operation, probability, magnitude). Each element in policy is a
sub-policy that will be applied sequentially on the image.
Returns:
the policy.
"""
policy = [
[('TranslateX_BBox', 0.6, 4), ('Equalize', 0.8, 10)],
[('TranslateY_Only_BBoxes', 0.2, 2), ('Cutout', 0.8, 8)],
[('Sharpness', 0.0, 8), ('ShearX_BBox', 0.4, 0)],
[('ShearY_BBox', 1.0, 2), ('TranslateY_Only_BBoxes', 0.6, 6)],
[('Rotate_BBox', 0.6, 10), ('Color', 1.0, 6)],
]
return policy
@staticmethod
def policy_v0():
"""Autoaugment policy that was used in AutoAugment Paper.
Each tuple is an augmentation operation of the form
(operation, probability, magnitude). Each element in policy is a
sub-policy that will be applied sequentially on the image.
Returns:
the policy.
"""
policy = [
[('Equalize', 0.8, 1), ('ShearY', 0.8, 4)],
[('Color', 0.4, 9), ('Equalize', 0.6, 3)],
[('Color', 0.4, 1), ('Rotate', 0.6, 8)],
[('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
[('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
[('Color', 0.2, 0), ('Equalize', 0.8, 8)],
[('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
[('ShearX', 0.2, 9), ('Rotate', 0.6, 8)],
[('Color', 0.6, 1), ('Equalize', 1.0, 2)],
[('Invert', 0.4, 9), ('Rotate', 0.6, 0)],
[('Equalize', 1.0, 9), ('ShearY', 0.6, 3)],
[('Color', 0.4, 7), ('Equalize', 0.6, 0)],
[('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],
[('Solarize', 0.6, 8), ('Color', 0.6, 9)],
[('Solarize', 0.2, 4), ('Rotate', 0.8, 9)],
[('Rotate', 1.0, 7), ('TranslateY', 0.8, 9)],
[('ShearX', 0.0, 0), ('Solarize', 0.8, 4)],
[('ShearY', 0.8, 0), ('Color', 0.6, 4)],
[('Color', 1.0, 0), ('Rotate', 0.6, 2)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
[('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
[('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)],
[('Posterize', 0.8, 2), ('Solarize', 0.6, 10)],
[('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
[('Color', 0.8, 6), ('Rotate', 0.4, 5)],
]
return policy
@staticmethod
def policy_reduced_cifar10():
"""Autoaugment policy for reduced CIFAR-10 dataset.
Result is from the AutoAugment paper: https://arxiv.org/abs/1805.09501.
Each tuple is an augmentation operation of the form
(operation, probability, magnitude). Each element in policy is a
sub-policy that will be applied sequentially on the image.
Returns:
the policy.
"""
policy = [
[('Invert', 0.1, 7), ('Contrast', 0.2, 6)],
[('Rotate', 0.7, 2), ('TranslateX', 0.3, 9)],
[('Sharpness', 0.8, 1), ('Sharpness', 0.9, 3)],
[('ShearY', 0.5, 8), ('TranslateY', 0.7, 9)],
[('AutoContrast', 0.5, 8), ('Equalize', 0.9, 2)],
[('ShearY', 0.2, 7), ('Posterize', 0.3, 7)],
[('Color', 0.4, 3), ('Brightness', 0.6, 7)],
[('Sharpness', 0.3, 9), ('Brightness', 0.7, 9)],
[('Equalize', 0.6, 5), ('Equalize', 0.5, 1)],
[('Contrast', 0.6, 7), ('Sharpness', 0.6, 5)],
[('Color', 0.7, 7), ('TranslateX', 0.5, 8)],
[('Equalize', 0.3, 7), ('AutoContrast', 0.4, 8)],
[('TranslateY', 0.4, 3), ('Sharpness', 0.2, 6)],
[('Brightness', 0.9, 6), ('Color', 0.2, 8)],
[('Solarize', 0.5, 2), ('Invert', 0.0, 3)],
[('Equalize', 0.2, 0), ('AutoContrast', 0.6, 0)],
[('Equalize', 0.2, 8), ('Equalize', 0.6, 4)],
[('Color', 0.9, 9), ('Equalize', 0.6, 6)],
[('AutoContrast', 0.8, 4), ('Solarize', 0.2, 8)],
[('Brightness', 0.1, 3), ('Color', 0.7, 0)],
[('Solarize', 0.4, 5), ('AutoContrast', 0.9, 3)],
[('TranslateY', 0.9, 9), ('TranslateY', 0.7, 9)],
[('AutoContrast', 0.9, 2), ('Solarize', 0.8, 3)],
[('Equalize', 0.8, 8), ('Invert', 0.1, 3)],
[('TranslateY', 0.7, 9), ('AutoContrast', 0.9, 1)],
]
return policy
@staticmethod
def policy_svhn():
"""Autoaugment policy for SVHN dataset.
Result is from the AutoAugment paper: https://arxiv.org/abs/1805.09501.
Each tuple is an augmentation operation of the form
(operation, probability, magnitude). Each element in policy is a
sub-policy that will be applied sequentially on the image.
Returns:
the policy.
"""
policy = [
[('ShearX', 0.9, 4), ('Invert', 0.2, 3)],
[('ShearY', 0.9, 8), ('Invert', 0.7, 5)],
[('Equalize', 0.6, 5), ('Solarize', 0.6, 6)],
[('Invert', 0.9, 3), ('Equalize', 0.6, 3)],
[('Equalize', 0.6, 1), ('Rotate', 0.9, 3)],
[('ShearX', 0.9, 4), ('AutoContrast', 0.8, 3)],
[('ShearY', 0.9, 8), ('Invert', 0.4, 5)],
[('ShearY', 0.9, 5), ('Solarize', 0.2, 6)],
[('Invert', 0.9, 6), ('AutoContrast', 0.8, 1)],
[('Equalize', 0.6, 3), ('Rotate', 0.9, 3)],
[('ShearX', 0.9, 4), ('Solarize', 0.3, 3)],
[('ShearY', 0.8, 8), ('Invert', 0.7, 4)],
[('Equalize', 0.9, 5), ('TranslateY', 0.6, 6)],
[('Invert', 0.9, 4), ('Equalize', 0.6, 7)],
[('Contrast', 0.3, 3), ('Rotate', 0.8, 4)],
[('Invert', 0.8, 5), ('TranslateY', 0.0, 2)],
[('ShearY', 0.7, 6), ('Solarize', 0.4, 8)],
[('Invert', 0.6, 4), ('Rotate', 0.8, 4)],
[('ShearY', 0.3, 7), ('TranslateX', 0.9, 3)],
[('ShearX', 0.1, 6), ('Invert', 0.6, 5)],
[('Solarize', 0.7, 2), ('TranslateY', 0.6, 7)],
[('ShearY', 0.8, 4), ('Invert', 0.8, 8)],
[('ShearX', 0.7, 9), ('TranslateY', 0.8, 3)],
[('ShearY', 0.8, 5), ('AutoContrast', 0.7, 3)],
[('ShearX', 0.7, 2), ('Invert', 0.1, 5)],
]
return policy
@staticmethod
def policy_reduced_imagenet():
"""Autoaugment policy for reduced ImageNet dataset.
Result is from the AutoAugment paper: https://arxiv.org/abs/1805.09501.
Each tuple is an augmentation operation of the form
(operation, probability, magnitude). Each element in policy is a
sub-policy that will be applied sequentially on the image.
Returns:
the policy.
"""
policy = [
[('Posterize', 0.4, 8), ('Rotate', 0.6, 9)],
[('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)],
[('Equalize', 0.8, 8), ('Equalize', 0.6, 3)],
[('Posterize', 0.6, 7), ('Posterize', 0.6, 6)],
[('Equalize', 0.4, 7), ('Solarize', 0.2, 4)],
[('Equalize', 0.4, 4), ('Rotate', 0.8, 8)],
[('Solarize', 0.6, 3), ('Equalize', 0.6, 7)],
[('Posterize', 0.8, 5), ('Equalize', 1.0, 2)],
[('Rotate', 0.2, 3), ('Solarize', 0.6, 8)],
[('Equalize', 0.6, 8), ('Posterize', 0.4, 6)],
[('Rotate', 0.8, 8), ('Color', 0.4, 0)],
[('Rotate', 0.4, 9), ('Equalize', 0.6, 2)],
[('Equalize', 0.0, 7), ('Equalize', 0.8, 8)],
[('Invert', 0.6, 4), ('Equalize', 1.0, 8)],
[('Color', 0.6, 4), ('Contrast', 1.0, 8)],
[('Rotate', 0.8, 8), ('Color', 1.0, 2)],
[('Color', 0.8, 8), ('Solarize', 0.8, 7)],
[('Sharpness', 0.4, 7), ('Invert', 0.6, 8)],
[('ShearX', 0.6, 5), ('Equalize', 1.0, 9)],
[('Color', 0.4, 0), ('Equalize', 0.6, 3)],
[('Equalize', 0.4, 7), ('Solarize', 0.2, 4)],
[('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)],
[('Invert', 0.6, 4), ('Equalize', 1.0, 8)],
[('Color', 0.6, 4), ('Contrast', 1.0, 8)],
[('Equalize', 0.8, 8), ('Equalize', 0.6, 3)]
]
return policy
@staticmethod
def policy_simple():
"""Same as `policy_v0`, except with custom ops removed."""
policy = [
[('Color', 0.4, 9), ('Equalize', 0.6, 3)],
[('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
[('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
[('Color', 0.2, 0), ('Equalize', 0.8, 8)],
[('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
[('Color', 0.6, 1), ('Equalize', 1.0, 2)],
[('Color', 0.4, 7), ('Equalize', 0.6, 0)],
[('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],
[('Solarize', 0.6, 8), ('Color', 0.6, 9)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
[('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
[('Posterize', 0.8, 2), ('Solarize', 0.6, 10)],
[('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
]
return policy
@staticmethod
def panoptic_deeplab_policy():
policy = [
[('Sharpness', 0.4, 1.4), ('Brightness', 0.2, 2.0)],
[('Equalize', 0.0, 1.8), ('Contrast', 0.2, 2.0)],
[('Sharpness', 0.2, 1.8), ('Color', 0.2, 1.8)],
[('Solarize', 0.2, 1.4), ('Equalize', 0.6, 1.8)],
[('Sharpness', 0.2, 0.2), ('Equalize', 0.2, 1.4)]]
return policy
@staticmethod
def vit():
"""Autoaugment policy for a generic ViT."""
policy = [
[('Sharpness', 0.4, 1.4), ('Brightness', 0.2, 2.0), ('Cutout', 0.8, 8)],
[('Equalize', 0.0, 1.8), ('Contrast', 0.2, 2.0), ('Cutout', 0.8, 8)],
[('Sharpness', 0.2, 1.8), ('Color', 0.2, 1.8), ('Cutout', 0.8, 8)],
[('Solarize', 0.2, 1.4), ('Equalize', 0.6, 1.8), ('Cutout', 0.8, 8)],
[('Sharpness', 0.2, 0.2), ('Equalize', 0.2, 1.4), ('Cutout', 0.8, 8)],
[('Sharpness', 0.4, 7), ('Invert', 0.6, 8), ('Cutout', 0.8, 8)],
[('Invert', 0.6, 4), ('Equalize', 1.0, 8), ('Cutout', 0.8, 8)],
[('Posterize', 0.6, 7), ('Posterize', 0.6, 6), ('Cutout', 0.8, 8)],
[('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5), ('Cutout', 0.8, 8)],
]
return policy
@staticmethod
def deit3_three_augment():
"""Autoaugment policy for three augmentations.
Proposed in paper: https://arxiv.org/abs/2204.07118.
Each tuple is an augmentation operation of the form
(operation, probability, magnitude). Each element in policy is a
sub-policy that will be applied on the image. Randomly chooses one of the
three augmentation to apply on image.
Returns:
the policy.
"""
policy = [
[('Grayscale', 1.0, 0)],
[('Solarize', 1.0, 5)], # to have threshold as 128
[('Gaussian_Noise', 1.0, 1)], # to have low_std as 0.1
]
return policy
@staticmethod
def policy_test():
"""Autoaugment test policy for debugging."""
policy = [
[('TranslateX', 1.0, 4), ('Equalize', 1.0, 10)],
]
return policy
def _maybe_identity(x: Optional[tf.Tensor]) -> Optional[tf.Tensor]:
return tf.identity(x) if x is not None else None
class RandAugment(ImageAugment):
"""Applies the RandAugment policy to images.
RandAugment is from the paper https://arxiv.org/abs/1909.13719.
"""
def __init__(self,
num_layers: int = 2,
magnitude: float = 10.,
cutout_const: float = 40.,
translate_const: float = 100.,
magnitude_std: float = 0.0,
prob_to_apply: Optional[float] = None,
exclude_ops: Optional[List[str]] = None):
"""Applies the RandAugment policy to images.
Args:
num_layers: Integer, the number of augmentation transformations to apply
sequentially to an image. Represented as (N) in the paper. Usually best
values will be in the range [1, 3].
magnitude: Integer, shared magnitude across all augmentation operations.
Represented as (M) in the paper. Usually best values are in the range
[5, 10].
cutout_const: multiplier for applying cutout.
translate_const: multiplier for applying translation.
magnitude_std: randomness of the severity as proposed by the authors of
the timm library.
prob_to_apply: The probability to apply the selected augmentation at each
layer.
exclude_ops: exclude selected operations.
"""
super(RandAugment, self).__init__()
self.num_layers = num_layers
self.magnitude = float(magnitude)
self.cutout_const = float(cutout_const)
self.translate_const = float(translate_const)
self.prob_to_apply = (
float(prob_to_apply) if prob_to_apply is not None else None)
self.available_ops = [
'AutoContrast', 'Equalize', 'Invert', 'Rotate', 'Posterize', 'Solarize',
'Color', 'Contrast', 'Brightness', 'Sharpness', 'ShearX', 'ShearY',
'TranslateX', 'TranslateY', 'Cutout', 'SolarizeAdd'
]
self.magnitude_std = magnitude_std
if exclude_ops:
self.available_ops = [
op for op in self.available_ops if op not in exclude_ops
]
@classmethod
def build_for_detection(cls,
num_layers: int = 2,
magnitude: float = 10.,
cutout_const: float = 40.,
translate_const: float = 100.,
magnitude_std: float = 0.0,
prob_to_apply: Optional[float] = None,
exclude_ops: Optional[List[str]] = None):
"""Builds a RandAugment that modifies bboxes for geometric transforms."""
augmenter = cls(
num_layers=num_layers,
magnitude=magnitude,
cutout_const=cutout_const,
translate_const=translate_const,
magnitude_std=magnitude_std,
prob_to_apply=prob_to_apply,
exclude_ops=exclude_ops)
box_aware_ops_by_base_name = {
'Rotate': 'Rotate_BBox',
'ShearX': 'ShearX_BBox',
'ShearY': 'ShearY_BBox',
'TranslateX': 'TranslateX_BBox',
'TranslateY': 'TranslateY_BBox',
}
augmenter.available_ops = [
box_aware_ops_by_base_name.get(op_name) or op_name
for op_name in augmenter.available_ops
]
return augmenter
def _distort_common(
self,
image: tf.Tensor,
bboxes: Optional[tf.Tensor] = None
) -> Tuple[tf.Tensor, Optional[tf.Tensor]]:
"""Distorts the image and optionally bounding boxes."""
input_image_type = image.dtype
if input_image_type != tf.uint8:
image = tf.clip_by_value(image, 0.0, 255.0)
image = tf.cast(image, dtype=tf.uint8)
replace_value = [128] * 3
min_prob, max_prob = 0.2, 0.8
aug_image = image
aug_bboxes = bboxes
for _ in range(self.num_layers):
op_to_select = tf.random.uniform([],
maxval=len(self.available_ops) + 1,
dtype=tf.int32)
branch_fns = []
for (i, op_name) in enumerate(self.available_ops):
prob = tf.random.uniform([],
minval=min_prob,
maxval=max_prob,
dtype=tf.float32)
func, _, args = _parse_policy_info(op_name, prob, self.magnitude,
replace_value, self.cutout_const,
self.translate_const,
self.magnitude_std)
branch_fns.append((
i,
# pylint:disable=g-long-lambda
lambda selected_func=func, selected_args=args: selected_func(
image, bboxes, *selected_args)))
# pylint:enable=g-long-lambda
aug_image, aug_bboxes = tf.switch_case(
branch_index=op_to_select,
branch_fns=branch_fns,
default=lambda: (tf.identity(image), _maybe_identity(bboxes))) # pylint: disable=cell-var-from-loop
if self.prob_to_apply is not None:
aug_image, aug_bboxes = tf.cond(
tf.random.uniform(shape=[], dtype=tf.float32) < self.prob_to_apply,
lambda: (tf.identity(aug_image), _maybe_identity(aug_bboxes)),
lambda: (tf.identity(image), _maybe_identity(bboxes)))
image = aug_image
bboxes = aug_bboxes
image = tf.cast(image, dtype=input_image_type)
return image, bboxes
def distort(self, image: tf.Tensor) -> tf.Tensor:
"""See base class."""
image, _ = self._distort_common(image)
return image
def distort_with_boxes(self, image: tf.Tensor,
bboxes: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""See base class."""
image, bboxes = self._distort_common(image, bboxes)
return image, bboxes
class RandomErasing(ImageAugment):
"""Applies RandomErasing to a single image.
Reference: https://arxiv.org/abs/1708.04896
Implementation is inspired by
https://github.com/rwightman/pytorch-image-models.
"""
def __init__(self,
probability: float = 0.25,
min_area: float = 0.02,
max_area: float = 1 / 3,
min_aspect: float = 0.3,
max_aspect: Optional[float] = None,
min_count=1,
max_count=1,
trials=10):
"""Applies RandomErasing to a single image.
Args:
probability: Probability of augmenting the image. Defaults to `0.25`.
min_area: Minimum area of the random erasing rectangle. Defaults to
`0.02`.
max_area: Maximum area of the random erasing rectangle. Defaults to `1/3`.
min_aspect: Minimum aspect rate of the random erasing rectangle. Defaults
to `0.3`.
max_aspect: Maximum aspect rate of the random erasing rectangle. Defaults
to `None`.
min_count: Minimum number of erased rectangles. Defaults to `1`.
max_count: Maximum number of erased rectangles. Defaults to `1`.
trials: Maximum number of trials to randomly sample a rectangle that
fulfills constraint. Defaults to `10`.
"""
self._probability = probability
self._min_area = float(min_area)
self._max_area = float(max_area)
self._min_log_aspect = math.log(min_aspect)
self._max_log_aspect = math.log(max_aspect or 1 / min_aspect)
self._min_count = min_count
self._max_count = max_count
self._trials = trials
def distort(self, image: tf.Tensor) -> tf.Tensor:
"""Applies RandomErasing to single `image`.
Args:
image (tf.Tensor): Of shape [height, width, 3] representing an image.
Returns:
tf.Tensor: The augmented version of `image`.
"""
uniform_random = tf.random.uniform(shape=[], minval=0., maxval=1.0)
mirror_cond = tf.less(uniform_random, self._probability)
image = tf.cond(mirror_cond, lambda: self._erase(image), lambda: image)
return image
@tf.function
def _erase(self, image: tf.Tensor) -> tf.Tensor:
"""Erase an area."""
if self._min_count == self._max_count:
count = self._min_count
else:
count = tf.random.uniform(
shape=[],
minval=int(self._min_count),
maxval=int(self._max_count - self._min_count + 1),
dtype=tf.int32)
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
area = tf.cast(image_width * image_height, tf.float32)
for _ in range(count):
# Work around since break is not supported in tf.function
is_trial_successfull = False
for _ in range(self._trials):
if not is_trial_successfull:
erase_area = tf.random.uniform(
shape=[],
minval=area * self._min_area,
maxval=area * self._max_area)
aspect_ratio = tf.math.exp(
tf.random.uniform(
shape=[],
minval=self._min_log_aspect,
maxval=self._max_log_aspect))
half_height = tf.cast(
tf.math.round(tf.math.sqrt(erase_area * aspect_ratio) / 2),
dtype=tf.int32)
half_width = tf.cast(
tf.math.round(tf.math.sqrt(erase_area / aspect_ratio) / 2),
dtype=tf.int32)
if 2 * half_height < image_height and 2 * half_width < image_width:
center_height = tf.random.uniform(
shape=[],
minval=0,
maxval=int(image_height - 2 * half_height),
dtype=tf.int32)
center_width = tf.random.uniform(
shape=[],
minval=0,
maxval=int(image_width - 2 * half_width),
dtype=tf.int32)
image = _fill_rectangle(
image,
center_width,
center_height,
half_width,
half_height,
replace=None)
is_trial_successfull = True
return image
class MixupAndCutmix:
"""Applies Mixup and/or Cutmix to a batch of images.
- Mixup: https://arxiv.org/abs/1710.09412
- Cutmix: https://arxiv.org/abs/1905.04899
Implementaion is inspired by https://github.com/rwightman/pytorch-image-models
"""
def __init__(self,
mixup_alpha: float = .8,
cutmix_alpha: float = 1.,
prob: float = 1.0,
switch_prob: float = 0.5,
label_smoothing: float = 0.1,
num_classes: int = 1001):
"""Applies Mixup and/or Cutmix to a batch of images.
Args:
mixup_alpha (float, optional): For drawing a random lambda (`lam`) from a
beta distribution (for each image). If zero Mixup is deactivated.
Defaults to .8.
cutmix_alpha (float, optional): For drawing a random lambda (`lam`) from a
beta distribution (for each image). If zero Cutmix is deactivated.
Defaults to 1..
prob (float, optional): Of augmenting the batch. Defaults to 1.0.
switch_prob (float, optional): Probability of applying Cutmix for the
batch. Defaults to 0.5.
label_smoothing (float, optional): Constant for label smoothing. Defaults
to 0.1.
num_classes (int, optional): Number of classes. Defaults to 1001.
"""
self.mixup_alpha = mixup_alpha
self.cutmix_alpha = cutmix_alpha
self.mix_prob = prob
self.switch_prob = switch_prob
self.label_smoothing = label_smoothing
self.num_classes = num_classes
self.mode = 'batch'
self.mixup_enabled = True
if self.mixup_alpha and not self.cutmix_alpha:
self.switch_prob = -1
elif not self.mixup_alpha and self.cutmix_alpha:
self.switch_prob = 1
def __call__(self, images: tf.Tensor,
labels: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
return self.distort(images, labels)
def distort(self, images: tf.Tensor,
labels: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""Applies Mixup and/or Cutmix to batch of images and transforms labels.
Args:
images (tf.Tensor): Of shape [batch_size, height, width, 3] representing a
batch of image, or [batch_size, time, height, width, 3] representing a
batch of video.
labels (tf.Tensor): Of shape [batch_size, ] representing the class id for
each image of the batch.
Returns:
Tuple[tf.Tensor, tf.Tensor]: The augmented version of `image` and
`labels`.
"""
labels = tf.reshape(labels, [-1])
augment_cond = tf.less(
tf.random.uniform(shape=[], minval=0., maxval=1.0), self.mix_prob)
# pylint: disable=g-long-lambda
augment_a = lambda: self._update_labels(*tf.cond(
tf.less(
tf.random.uniform(shape=[], minval=0., maxval=1.0), self.switch_prob
), lambda: self._cutmix(images, labels), lambda: self._mixup(
images, labels)))
augment_b = lambda: (images, self._smooth_labels(labels))
# pylint: enable=g-long-lambda
return tf.cond(augment_cond, augment_a, augment_b)
@staticmethod
def _sample_from_beta(alpha, beta, shape):
sample_alpha = tf.random.gamma(shape, 1., beta=alpha)
sample_beta = tf.random.gamma(shape, 1., beta=beta)
return sample_alpha / (sample_alpha + sample_beta)
def _cutmix(self, images: tf.Tensor,
labels: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
"""Applies cutmix."""
lam = MixupAndCutmix._sample_from_beta(self.cutmix_alpha, self.cutmix_alpha,
tf.shape(labels))
ratio = tf.math.sqrt(1 - lam)
batch_size = tf.shape(images)[0]
if images.shape.rank == 4:
image_height, image_width = tf.shape(images)[1], tf.shape(images)[2]
fill_fn = _fill_rectangle
elif images.shape.rank == 5:
image_height, image_width = tf.shape(images)[2], tf.shape(images)[3]
fill_fn = _fill_rectangle_video
else:
raise ValueError('Bad image rank: {}'.format(images.shape.rank))
cut_height = tf.cast(
ratio * tf.cast(image_height, dtype=tf.float32), dtype=tf.int32)
cut_width = tf.cast(
ratio * tf.cast(image_height, dtype=tf.float32), dtype=tf.int32)
random_center_height = tf.random.uniform(
shape=[batch_size], minval=0, maxval=image_height, dtype=tf.int32)
random_center_width = tf.random.uniform(
shape=[batch_size], minval=0, maxval=image_width, dtype=tf.int32)
bbox_area = cut_height * cut_width
lam = 1. - bbox_area / (image_height * image_width)
lam = tf.cast(lam, dtype=tf.float32)
images = tf.map_fn(
lambda x: fill_fn(*x),
(images, random_center_width, random_center_height, cut_width // 2,
cut_height // 2, tf.reverse(images, [0])),
dtype=(
images.dtype, tf.int32, tf.int32, tf.int32, tf.int32, images.dtype),
fn_output_signature=tf.TensorSpec(images.shape[1:], dtype=images.dtype))
return images, labels, lam
def _mixup(self, images: tf.Tensor,
labels: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
"""Applies mixup."""
lam = MixupAndCutmix._sample_from_beta(self.mixup_alpha, self.mixup_alpha,
tf.shape(labels))
if images.shape.rank == 4:
lam = tf.reshape(lam, [-1, 1, 1, 1])
elif images.shape.rank == 5:
lam = tf.reshape(lam, [-1, 1, 1, 1, 1])
else:
raise ValueError('Bad image rank: {}'.format(images.shape.rank))
lam_cast = tf.cast(lam, dtype=images.dtype)
images = lam_cast * images + (1. - lam_cast) * tf.reverse(images, [0])
return images, labels, tf.squeeze(lam)
def _smooth_labels(self, labels: tf.Tensor) -> tf.Tensor:
off_value = self.label_smoothing / self.num_classes
on_value = 1. - self.label_smoothing + off_value
smooth_labels = tf.one_hot(
labels, self.num_classes, on_value=on_value, off_value=off_value)
return smooth_labels
def _update_labels(self, images: tf.Tensor, labels: tf.Tensor,
lam: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
labels_1 = self._smooth_labels(labels)
labels_2 = tf.reverse(labels_1, [0])
lam = tf.reshape(lam, [-1, 1])
labels = lam * labels_1 + (1. - lam) * labels_2
return images, labels
| 102,890 | 36.064481 | 130 | py |
models | models-master/official/vision/ops/mask_ops.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for segmentations."""
import math
from typing import List, Tuple
# Import libraries
import cv2
import numpy as np
import tensorflow as tf
from official.vision.ops import spatial_transform_ops
def paste_instance_masks(masks: np.ndarray, detected_boxes: np.ndarray,
image_height: int, image_width: int) -> np.ndarray:
"""Paste instance masks to generate the image segmentation results.
Args:
masks: a numpy array of shape [N, mask_height, mask_width] representing the
instance masks w.r.t. the `detected_boxes`.
detected_boxes: a numpy array of shape [N, 4] representing the reference
bounding boxes.
image_height: an integer representing the height of the image.
image_width: an integer representing the width of the image.
Returns:
segms: a numpy array of shape [N, image_height, image_width] representing
the instance masks *pasted* on the image canvas.
"""
def expand_boxes(boxes: np.ndarray, scale: float) -> np.ndarray:
"""Expands an array of boxes by a given scale."""
# Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/boxes.py#L227 # pylint: disable=line-too-long
# The `boxes` in the reference implementation is in [x1, y1, x2, y2] form,
# whereas `boxes` here is in [x1, y1, w, h] form
w_half = boxes[:, 2] * 0.5
h_half = boxes[:, 3] * 0.5
x_c = boxes[:, 0] + w_half
y_c = boxes[:, 1] + h_half
w_half *= scale
h_half *= scale
boxes_exp = np.zeros(boxes.shape)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
# Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/core/test.py#L812 # pylint: disable=line-too-long
# To work around an issue with cv2.resize (it seems to automatically pad
# with repeated border values), we manually zero-pad the masks by 1 pixel
# prior to resizing back to the original image resolution. This prevents
# "top hat" artifacts. We therefore need to expand the reference boxes by an
# appropriate factor.
_, mask_height, mask_width = masks.shape
scale = max((mask_width + 2.0) / mask_width,
(mask_height + 2.0) / mask_height)
ref_boxes = expand_boxes(detected_boxes, scale)
ref_boxes = ref_boxes.astype(np.int32)
padded_mask = np.zeros((mask_height + 2, mask_width + 2), dtype=np.float32)
segms = []
for mask_ind, mask in enumerate(masks):
im_mask = np.zeros((image_height, image_width), dtype=np.uint8)
# Process mask inside bounding boxes.
padded_mask[1:-1, 1:-1] = mask[:, :]
ref_box = ref_boxes[mask_ind, :]
w = ref_box[2] - ref_box[0] + 1
h = ref_box[3] - ref_box[1] + 1
w = np.maximum(w, 1)
h = np.maximum(h, 1)
mask = cv2.resize(padded_mask, (w, h))
mask = np.array(mask > 0.5, dtype=np.uint8)
x_0 = min(max(ref_box[0], 0), image_width)
x_1 = min(max(ref_box[2] + 1, 0), image_width)
y_0 = min(max(ref_box[1], 0), image_height)
y_1 = min(max(ref_box[3] + 1, 0), image_height)
im_mask[y_0:y_1, x_0:x_1] = mask[
(y_0 - ref_box[1]):(y_1 - ref_box[1]),
(x_0 - ref_box[0]):(x_1 - ref_box[0])
]
segms.append(im_mask)
segms = np.array(segms)
assert masks.shape[0] == segms.shape[0]
return segms
def paste_instance_masks_v2(masks: np.ndarray, detected_boxes: np.ndarray,
image_height: int, image_width: int) -> np.ndarray:
"""Paste instance masks to generate the image segmentation (v2).
Args:
masks: a numpy array of shape [N, mask_height, mask_width] representing the
instance masks w.r.t. the `detected_boxes`.
detected_boxes: a numpy array of shape [N, 4] representing the reference
bounding boxes.
image_height: an integer representing the height of the image.
image_width: an integer representing the width of the image.
Returns:
segms: a numpy array of shape [N, image_height, image_width] representing
the instance masks *pasted* on the image canvas.
"""
_, mask_height, mask_width = masks.shape
segms = []
for i, mask in enumerate(masks):
box = detected_boxes[i, :]
xmin = box[0]
ymin = box[1]
xmax = xmin + box[2]
ymax = ymin + box[3]
# Sample points of the cropped mask w.r.t. the image grid.
# Note that these coordinates may fall beyond the image.
# Pixel clipping will happen after warping.
xmin_int = int(math.floor(xmin))
xmax_int = int(math.ceil(xmax))
ymin_int = int(math.floor(ymin))
ymax_int = int(math.ceil(ymax))
alpha = box[2] / (1.0 * mask_width)
beta = box[3] / (1.0 * mask_height)
# pylint: disable=invalid-name
# Transformation from mask pixel indices to image coordinate.
M_mask_to_image = np.array(
[[alpha, 0, xmin],
[0, beta, ymin],
[0, 0, 1]],
dtype=np.float32)
# Transformation from image to cropped mask coordinate.
M_image_to_crop = np.array(
[[1, 0, -xmin_int],
[0, 1, -ymin_int],
[0, 0, 1]],
dtype=np.float32)
M = np.dot(M_image_to_crop, M_mask_to_image)
# Compensate the half pixel offset that OpenCV has in the
# warpPerspective implementation: the top-left pixel is sampled
# at (0,0), but we want it to be at (0.5, 0.5).
M = np.dot(
np.dot(
np.array([[1, 0, -0.5],
[0, 1, -0.5],
[0, 0, 1]], np.float32),
M),
np.array([[1, 0, 0.5],
[0, 1, 0.5],
[0, 0, 1]], np.float32))
# pylint: enable=invalid-name
cropped_mask = cv2.warpPerspective(
mask.astype(np.float32), M,
(xmax_int - xmin_int, ymax_int - ymin_int))
cropped_mask = np.array(cropped_mask > 0.5, dtype=np.uint8)
img_mask = np.zeros((image_height, image_width))
x0 = max(min(xmin_int, image_width), 0)
x1 = max(min(xmax_int, image_width), 0)
y0 = max(min(ymin_int, image_height), 0)
y1 = max(min(ymax_int, image_height), 0)
img_mask[y0:y1, x0:x1] = cropped_mask[
(y0 - ymin_int):(y1 - ymin_int),
(x0 - xmin_int):(x1 - xmin_int)]
segms.append(img_mask)
segms = np.array(segms)
return segms
def instance_masks_overlap(
boxes: tf.Tensor,
masks: tf.Tensor,
gt_boxes: tf.Tensor,
gt_masks: tf.Tensor,
output_size: List[int],
mask_binarize_threshold: float = 0.5,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Calculates the IoUs and IoAs between the detection masks and the ground truth masks.
IoU: intersection over union.
IoA: intersection over the area of the detection masks.
Args:
boxes: a tensor with a shape of [batch_size, N, 4]. The last dimension is
the pixel coordinates in [ymin, xmin, ymax, xmax] form.
masks: a float tensor with a shape of [batch_size, N, mask_height,
mask_width] representing the instance masks w.r.t. the `boxes`.
gt_boxes: a tensor with a shape of [batch_size, M, 4]. The last dimension is
the pixel coordinates in [ymin, xmin, ymax, xmax] form.
gt_masks: a float tensor with a shape of [batch_size, M, gt_mask_height,
gt_mask_width] representing the instance masks w.r.t. the `gt_boxes`.
output_size: two integers that represent the height and width of the output
masks.
mask_binarize_threshold: a float representing the threshold for binarizing
mask values. Default value is 0.5.
Returns:
iou: a tensor with as a shape of [batch_size, N, M].
"""
_, num_detections, mask_height, mask_width = masks.get_shape().as_list()
_, num_gts, gt_mask_height, gt_mask_width = gt_masks.get_shape().as_list()
output_height, output_width = output_size
masks = tf.where(masks < 0, tf.zeros_like(masks), masks)
gt_masks = tf.where(gt_masks < 0, tf.zeros_like(gt_masks), gt_masks)
pasted_masks = tf.reshape(
spatial_transform_ops.bilinear_resize_to_bbox(
tf.reshape(masks, [-1, mask_height, mask_width]),
tf.reshape(boxes, [-1, 4]),
output_size,
),
shape=[-1, num_detections, output_height, output_width],
)
pasted_gt_masks = tf.reshape(
spatial_transform_ops.bilinear_resize_to_bbox(
tf.reshape(gt_masks, [-1, gt_mask_height, gt_mask_width]),
tf.reshape(gt_boxes, [-1, 4]),
output_size,
),
shape=[-1, num_gts, output_height, output_width],
)
# (batch_size, num_detections, output_height * output_width)
flattened_binary_masks = tf.reshape(
pasted_masks > mask_binarize_threshold,
[-1, num_detections, output_height * output_width],
)
# (batch_size, num_gts, output_height * output_width)
flattened_gt_binary_masks = tf.reshape(
pasted_gt_masks > mask_binarize_threshold,
[-1, num_gts, output_height * output_width],
)
# (batch_size, output_height * output_width, num_gts)
flattened_gt_binary_masks = tf.transpose(flattened_gt_binary_masks, [0, 2, 1])
flattened_binary_masks = tf.cast(flattened_binary_masks, tf.float32)
flattened_gt_binary_masks = tf.cast(flattened_gt_binary_masks, tf.float32)
# (batch_size, num_detections, num_gts)
intersection = tf.matmul(flattened_binary_masks, flattened_gt_binary_masks)
detection_area = tf.reduce_sum(flattened_binary_masks, axis=-1, keepdims=True)
gt_area = tf.reduce_sum(flattened_gt_binary_masks, axis=-2, keepdims=True)
union = detection_area + gt_area - intersection
return tf.math.divide_no_nan(intersection, union), tf.math.divide_no_nan(
intersection, detection_area
)
| 10,260 | 37.003704 | 137 | py |
models | models-master/official/vision/ops/augment_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for autoaugment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.vision.ops import augment
def get_dtype_test_cases():
return [
('uint8', tf.uint8),
('int32', tf.int32),
('float16', tf.float16),
('float32', tf.float32),
]
@parameterized.named_parameters(get_dtype_test_cases())
class TransformsTest(parameterized.TestCase, tf.test.TestCase):
"""Basic tests for fundamental transformations."""
def test_to_from_4d(self, dtype):
for shape in [(10, 10), (10, 10, 10), (10, 10, 10, 10)]:
original_ndims = len(shape)
image = tf.zeros(shape, dtype=dtype)
image_4d = augment.to_4d(image)
self.assertEqual(4, tf.rank(image_4d))
self.assertAllEqual(image, augment.from_4d(image_4d, original_ndims))
def test_transform(self, dtype):
image = tf.constant([[1, 2], [3, 4]], dtype=dtype)
self.assertAllEqual(
augment.transform(image, transforms=[1] * 8), [[4, 4], [4, 4]])
def test_translate(self, dtype):
image = tf.constant(
[[1, 0, 1, 0], [0, 1, 0, 1], [1, 0, 1, 0], [0, 1, 0, 1]], dtype=dtype)
translations = [-1, -1]
translated = augment.translate(image=image, translations=translations)
expected = [[1, 0, 1, 1], [0, 1, 0, 0], [1, 0, 1, 1], [1, 0, 1, 1]]
self.assertAllEqual(translated, expected)
def test_translate_shapes(self, dtype):
translation = [0, 0]
for shape in [(3, 3), (5, 5), (224, 224, 3)]:
image = tf.zeros(shape, dtype=dtype)
self.assertAllEqual(image, augment.translate(image, translation))
def test_translate_invalid_translation(self, dtype):
image = tf.zeros((1, 1), dtype=dtype)
invalid_translation = [[[1, 1]]]
with self.assertRaisesRegex(TypeError, 'rank 1 or 2'):
_ = augment.translate(image, invalid_translation)
def test_rotate(self, dtype):
image = tf.reshape(tf.cast(tf.range(9), dtype), (3, 3))
rotation = 90.
transformed = augment.rotate(image=image, degrees=rotation)
expected = [[2, 5, 8], [1, 4, 7], [0, 3, 6]]
self.assertAllEqual(transformed, expected)
def test_rotate_shapes(self, dtype):
degrees = 0.
for shape in [(3, 3), (5, 5), (224, 224, 3)]:
image = tf.zeros(shape, dtype=dtype)
self.assertAllEqual(image, augment.rotate(image, degrees))
def test_random_cutout_video(self, dtype):
for num_channels in (1, 2, 3):
video = tf.ones((2, 2, 2, num_channels), dtype=dtype)
video = augment.cutout_video(video)
num_zeros = np.sum(video == 0)
self.assertGreater(num_zeros, 0)
def test_cutout_video_with_fixed_shape(self, dtype):
tf.random.set_seed(0)
video = tf.ones((10, 10, 10, 1), dtype=dtype)
video = augment.cutout_video(video, mask_shape=tf.constant([2, 2, 2]))
num_zeros = np.sum(video == 0)
self.assertEqual(num_zeros, 8)
class AutoaugmentTest(tf.test.TestCase, parameterized.TestCase):
AVAILABLE_POLICIES = [
'v0',
'test',
'simple',
'reduced_cifar10',
'svhn',
'reduced_imagenet',
'detection_v0',
'vit',
]
def test_autoaugment(self):
"""Smoke test to be sure there are no syntax errors."""
image = tf.zeros((224, 224, 3), dtype=tf.uint8)
for policy in self.AVAILABLE_POLICIES:
augmenter = augment.AutoAugment(augmentation_name=policy)
aug_image = augmenter.distort(image)
self.assertEqual((224, 224, 3), aug_image.shape)
def test_autoaugment_with_bboxes(self):
"""Smoke test to be sure there are no syntax errors with bboxes."""
image = tf.zeros((224, 224, 3), dtype=tf.uint8)
bboxes = tf.ones((2, 4), dtype=tf.float32)
for policy in self.AVAILABLE_POLICIES:
augmenter = augment.AutoAugment(augmentation_name=policy)
aug_image, aug_bboxes = augmenter.distort_with_boxes(image, bboxes)
self.assertEqual((224, 224, 3), aug_image.shape)
self.assertEqual((2, 4), aug_bboxes.shape)
def test_randaug(self):
"""Smoke test to be sure there are no syntax errors."""
image = tf.zeros((224, 224, 3), dtype=tf.uint8)
augmenter = augment.RandAugment()
aug_image = augmenter.distort(image)
self.assertEqual((224, 224, 3), aug_image.shape)
def test_randaug_with_bboxes(self):
"""Smoke test to be sure there are no syntax errors with bboxes."""
image = tf.zeros((224, 224, 3), dtype=tf.uint8)
bboxes = tf.ones((2, 4), dtype=tf.float32)
augmenter = augment.RandAugment()
aug_image, aug_bboxes = augmenter.distort_with_boxes(image, bboxes)
self.assertEqual((224, 224, 3), aug_image.shape)
self.assertEqual((2, 4), aug_bboxes.shape)
def test_randaug_build_for_detection(self):
"""Smoke test to be sure there are no syntax errors built for detection."""
image = tf.zeros((224, 224, 3), dtype=tf.uint8)
bboxes = tf.ones((2, 4), dtype=tf.float32)
augmenter = augment.RandAugment.build_for_detection()
self.assertCountEqual(augmenter.available_ops, [
'AutoContrast', 'Equalize', 'Invert', 'Posterize', 'Solarize', 'Color',
'Contrast', 'Brightness', 'Sharpness', 'Cutout', 'SolarizeAdd',
'Rotate_BBox', 'ShearX_BBox', 'ShearY_BBox', 'TranslateX_BBox',
'TranslateY_BBox'
])
aug_image, aug_bboxes = augmenter.distort_with_boxes(image, bboxes)
self.assertEqual((224, 224, 3), aug_image.shape)
self.assertEqual((2, 4), aug_bboxes.shape)
def test_all_policy_ops(self):
"""Smoke test to be sure all augmentation functions can execute."""
prob = 1
magnitude = 10
replace_value = [128] * 3
cutout_const = 100
translate_const = 250
image = tf.ones((224, 224, 3), dtype=tf.uint8)
bboxes = None
for op_name in augment.NAME_TO_FUNC.keys() - augment.REQUIRE_BOXES_FUNCS:
func, _, args = augment._parse_policy_info(op_name, prob, magnitude,
replace_value, cutout_const,
translate_const)
image, bboxes = func(image, bboxes, *args)
self.assertEqual((224, 224, 3), image.shape)
self.assertIsNone(bboxes)
def test_all_policy_ops_with_bboxes(self):
"""Smoke test to be sure all augmentation functions can execute."""
prob = 1
magnitude = 10
replace_value = [128] * 3
cutout_const = 100
translate_const = 250
image = tf.ones((224, 224, 3), dtype=tf.uint8)
bboxes = tf.ones((2, 4), dtype=tf.float32)
for op_name in augment.NAME_TO_FUNC:
func, _, args = augment._parse_policy_info(op_name, prob, magnitude,
replace_value, cutout_const,
translate_const)
image, bboxes = func(image, bboxes, *args)
self.assertEqual((224, 224, 3), image.shape)
self.assertEqual((2, 4), bboxes.shape)
def test_autoaugment_video(self):
"""Smoke test with video to be sure there are no syntax errors."""
image = tf.zeros((2, 224, 224, 3), dtype=tf.uint8)
for policy in self.AVAILABLE_POLICIES:
augmenter = augment.AutoAugment(augmentation_name=policy)
aug_image = augmenter.distort(image)
self.assertEqual((2, 224, 224, 3), aug_image.shape)
def test_autoaugment_video_with_boxes(self):
"""Smoke test with video to be sure there are no syntax errors."""
image = tf.zeros((2, 224, 224, 3), dtype=tf.uint8)
bboxes = tf.ones((2, 2, 4), dtype=tf.float32)
for policy in self.AVAILABLE_POLICIES:
augmenter = augment.AutoAugment(augmentation_name=policy)
aug_image, aug_bboxes = augmenter.distort_with_boxes(image, bboxes)
self.assertEqual((2, 224, 224, 3), aug_image.shape)
self.assertEqual((2, 2, 4), aug_bboxes.shape)
def test_randaug_video(self):
"""Smoke test with video to be sure there are no syntax errors."""
image = tf.zeros((2, 224, 224, 3), dtype=tf.uint8)
augmenter = augment.RandAugment()
aug_image = augmenter.distort(image)
self.assertEqual((2, 224, 224, 3), aug_image.shape)
def test_all_policy_ops_video(self):
"""Smoke test to be sure all video augmentation functions can execute."""
prob = 1
magnitude = 10
replace_value = [128] * 3
cutout_const = 100
translate_const = 250
image = tf.ones((2, 224, 224, 3), dtype=tf.uint8)
bboxes = None
for op_name in augment.NAME_TO_FUNC.keys() - augment.REQUIRE_BOXES_FUNCS:
func, _, args = augment._parse_policy_info(op_name, prob, magnitude,
replace_value, cutout_const,
translate_const)
image, bboxes = func(image, bboxes, *args)
self.assertEqual((2, 224, 224, 3), image.shape)
self.assertIsNone(bboxes)
def test_all_policy_ops_video_with_bboxes(self):
"""Smoke test to be sure all video augmentation functions can execute."""
prob = 1
magnitude = 10
replace_value = [128] * 3
cutout_const = 100
translate_const = 250
image = tf.ones((2, 224, 224, 3), dtype=tf.uint8)
bboxes = tf.ones((2, 2, 4), dtype=tf.float32)
for op_name in augment.NAME_TO_FUNC:
func, _, args = augment._parse_policy_info(op_name, prob, magnitude,
replace_value, cutout_const,
translate_const)
if op_name in {
'Rotate_BBox',
'ShearX_BBox',
'ShearY_BBox',
'TranslateX_BBox',
'TranslateY_BBox',
'TranslateY_Only_BBoxes',
}:
with self.assertRaises(ValueError):
func(image, bboxes, *args)
else:
image, bboxes = func(image, bboxes, *args)
self.assertEqual((2, 224, 224, 3), image.shape)
self.assertEqual((2, 2, 4), bboxes.shape)
def _generate_test_policy(self):
"""Generate a test policy at random."""
op_list = list(augment.NAME_TO_FUNC.keys())
size = 6
prob = [round(random.uniform(0., 1.), 1) for _ in range(size)]
mag = [round(random.uniform(0, 10)) for _ in range(size)]
policy = []
for i in range(0, size, 2):
policy.append([(op_list[i], prob[i], mag[i]),
(op_list[i + 1], prob[i + 1], mag[i + 1])])
return policy
def test_custom_policy(self):
"""Test autoaugment with a custom policy."""
image = tf.zeros((224, 224, 3), dtype=tf.uint8)
augmenter = augment.AutoAugment(policies=self._generate_test_policy())
aug_image = augmenter.distort(image)
self.assertEqual((224, 224, 3), aug_image.shape)
def test_autoaugment_three_augment(self):
"""Test three augmentation."""
image = tf.random.normal(shape=(224, 224, 3), dtype=tf.float32)
augmenter = augment.AutoAugment(augmentation_name='deit3_three_augment')
aug_image = augmenter.distort(image)
self.assertEqual((224, 224, 3), aug_image.shape)
self.assertFalse(tf.math.reduce_all(image == aug_image))
@parameterized.named_parameters(
{'testcase_name': '_OutOfRangeProb',
'sub_policy': ('Equalize', 1.1, 3), 'value': '1.1'},
{'testcase_name': '_OutOfRangeMag',
'sub_policy': ('Equalize', 0.9, 11), 'value': '11'},
)
def test_invalid_custom_sub_policy(self, sub_policy, value):
"""Test autoaugment with out-of-range values in the custom policy."""
image = tf.zeros((224, 224, 3), dtype=tf.uint8)
policy = self._generate_test_policy()
policy[0][0] = sub_policy
augmenter = augment.AutoAugment(policies=policy)
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError,
r'Expected \'tf.Tensor\(False, shape=\(\), dtype=bool\)\' to be true. '
r'Summarized data: ({})'.format(value)):
augmenter.distort(image)
def test_invalid_custom_policy_ndim(self):
"""Test autoaugment with wrong dimension in the custom policy."""
policy = [[('Equalize', 0.8, 1), ('Shear', 0.8, 4)],
[('TranslateY', 0.6, 3), ('Rotate', 0.9, 3)]]
policy = [[policy]]
with self.assertRaisesRegex(
ValueError,
r'Expected \(:, :, 3\) but got \(1, 1, 2, 2, 3\).'):
augment.AutoAugment(policies=policy)
def test_invalid_custom_policy_shape(self):
"""Test autoaugment with wrong shape in the custom policy."""
policy = [[('Equalize', 0.8, 1, 1), ('Shear', 0.8, 4, 1)],
[('TranslateY', 0.6, 3, 1), ('Rotate', 0.9, 3, 1)]]
with self.assertRaisesRegex(
ValueError,
r'Expected \(:, :, 3\) but got \(2, 2, 4\)'):
augment.AutoAugment(policies=policy)
def test_invalid_custom_policy_key(self):
"""Test autoaugment with invalid key in the custom policy."""
image = tf.zeros((224, 224, 3), dtype=tf.uint8)
policy = [[('AAAAA', 0.8, 1), ('Shear', 0.8, 4)],
[('TranslateY', 0.6, 3), ('Rotate', 0.9, 3)]]
augmenter = augment.AutoAugment(policies=policy)
with self.assertRaisesRegex(KeyError, '\'AAAAA\''):
augmenter.distort(image)
class RandomErasingTest(tf.test.TestCase, parameterized.TestCase):
def test_random_erase_replaces_some_pixels(self):
image = tf.zeros((224, 224, 3), dtype=tf.float32)
augmenter = augment.RandomErasing(probability=1., max_count=10)
aug_image = augmenter.distort(image)
self.assertEqual((224, 224, 3), aug_image.shape)
self.assertNotEqual(0, tf.reduce_max(aug_image))
class MixupAndCutmixTest(tf.test.TestCase, parameterized.TestCase):
def test_mixup_and_cutmix_smoothes_labels(self):
batch_size = 12
num_classes = 1000
label_smoothing = 0.1
images = tf.random.normal((batch_size, 224, 224, 3), dtype=tf.float32)
labels = tf.range(batch_size)
augmenter = augment.MixupAndCutmix(
num_classes=num_classes, label_smoothing=label_smoothing)
aug_images, aug_labels = augmenter.distort(images, labels)
self.assertEqual(images.shape, aug_images.shape)
self.assertEqual(images.dtype, aug_images.dtype)
self.assertEqual([batch_size, num_classes], aug_labels.shape)
self.assertAllLessEqual(aug_labels, 1. - label_smoothing +
2. / num_classes) # With tolerance
self.assertAllGreaterEqual(aug_labels, label_smoothing / num_classes -
1e4) # With tolerance
def test_mixup_changes_image(self):
batch_size = 12
num_classes = 1000
label_smoothing = 0.1
images = tf.random.normal((batch_size, 224, 224, 3), dtype=tf.float32)
labels = tf.range(batch_size)
augmenter = augment.MixupAndCutmix(
mixup_alpha=1., cutmix_alpha=0., num_classes=num_classes)
aug_images, aug_labels = augmenter.distort(images, labels)
self.assertEqual(images.shape, aug_images.shape)
self.assertEqual(images.dtype, aug_images.dtype)
self.assertEqual([batch_size, num_classes], aug_labels.shape)
self.assertAllLessEqual(aug_labels, 1. - label_smoothing +
2. / num_classes) # With tolerance
self.assertAllGreaterEqual(aug_labels, label_smoothing / num_classes -
1e4) # With tolerance
self.assertFalse(tf.math.reduce_all(images == aug_images))
def test_cutmix_changes_image(self):
batch_size = 12
num_classes = 1000
label_smoothing = 0.1
images = tf.random.normal((batch_size, 224, 224, 3), dtype=tf.float32)
labels = tf.range(batch_size)
augmenter = augment.MixupAndCutmix(
mixup_alpha=0., cutmix_alpha=1., num_classes=num_classes)
aug_images, aug_labels = augmenter.distort(images, labels)
self.assertEqual(images.shape, aug_images.shape)
self.assertEqual(images.dtype, aug_images.dtype)
self.assertEqual([batch_size, num_classes], aug_labels.shape)
self.assertAllLessEqual(aug_labels, 1. - label_smoothing +
2. / num_classes) # With tolerance
self.assertAllGreaterEqual(aug_labels, label_smoothing / num_classes -
1e4) # With tolerance
self.assertFalse(tf.math.reduce_all(images == aug_images))
def test_mixup_and_cutmix_smoothes_labels_with_videos(self):
batch_size = 12
num_classes = 1000
label_smoothing = 0.1
images = tf.random.normal((batch_size, 8, 224, 224, 3), dtype=tf.float32)
labels = tf.range(batch_size)
augmenter = augment.MixupAndCutmix(
num_classes=num_classes, label_smoothing=label_smoothing)
aug_images, aug_labels = augmenter.distort(images, labels)
self.assertEqual(images.shape, aug_images.shape)
self.assertEqual(images.dtype, aug_images.dtype)
self.assertEqual([batch_size, num_classes], aug_labels.shape)
self.assertAllLessEqual(aug_labels, 1. - label_smoothing +
2. / num_classes) # With tolerance
self.assertAllGreaterEqual(aug_labels, label_smoothing / num_classes -
1e4) # With tolerance
def test_mixup_changes_video(self):
batch_size = 12
num_classes = 1000
label_smoothing = 0.1
images = tf.random.normal((batch_size, 8, 224, 224, 3), dtype=tf.float32)
labels = tf.range(batch_size)
augmenter = augment.MixupAndCutmix(
mixup_alpha=1., cutmix_alpha=0., num_classes=num_classes)
aug_images, aug_labels = augmenter.distort(images, labels)
self.assertEqual(images.shape, aug_images.shape)
self.assertEqual(images.dtype, aug_images.dtype)
self.assertEqual([batch_size, num_classes], aug_labels.shape)
self.assertAllLessEqual(aug_labels, 1. - label_smoothing +
2. / num_classes) # With tolerance
self.assertAllGreaterEqual(aug_labels, label_smoothing / num_classes -
1e4) # With tolerance
self.assertFalse(tf.math.reduce_all(images == aug_images))
def test_cutmix_changes_video(self):
batch_size = 12
num_classes = 1000
label_smoothing = 0.1
images = tf.random.normal((batch_size, 8, 224, 224, 3), dtype=tf.float32)
labels = tf.range(batch_size)
augmenter = augment.MixupAndCutmix(
mixup_alpha=0., cutmix_alpha=1., num_classes=num_classes)
aug_images, aug_labels = augmenter.distort(images, labels)
self.assertEqual(images.shape, aug_images.shape)
self.assertEqual(images.dtype, aug_images.dtype)
self.assertEqual([batch_size, num_classes], aug_labels.shape)
self.assertAllLessEqual(aug_labels, 1. - label_smoothing +
2. / num_classes) # With tolerance
self.assertAllGreaterEqual(aug_labels, label_smoothing / num_classes -
1e4) # With tolerance
self.assertFalse(tf.math.reduce_all(images == aug_images))
if __name__ == '__main__':
tf.test.main()
| 19,453 | 36.055238 | 79 | py |
models | models-master/official/vision/ops/box_ops.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Box related ops."""
# Import libraries
import numpy as np
import tensorflow as tf
EPSILON = 1e-8
BBOX_XFORM_CLIP = np.log(1000. / 16.)
def yxyx_to_xywh(boxes):
"""Converts boxes from ymin, xmin, ymax, xmax to xmin, ymin, width, height.
Args:
boxes: a numpy array whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
Returns:
boxes: a numpy array whose shape is the same as `boxes` in new format.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
boxes_ymin = boxes[..., 0]
boxes_xmin = boxes[..., 1]
boxes_width = boxes[..., 3] - boxes[..., 1]
boxes_height = boxes[..., 2] - boxes[..., 0]
new_boxes = np.stack(
[boxes_xmin, boxes_ymin, boxes_width, boxes_height], axis=-1)
return new_boxes
def yxyx_to_cycxhw(boxes):
"""Converts box corner coordinates to center plus height and width terms.
Args:
boxes: a `Tensor` with last dimension of 4, representing the coordinates of
boxes in ymin, xmin, ymax, xmax order.
Returns:
boxes: a `Tensor` with the same shape as the inputted boxes, in the format
of cy, cx, height, width.
Raises:
ValueError: if the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError('Last dimension of boxes must be 4 but is {:d}'.format(
boxes.shape[-1]))
boxes_ycenter = (boxes[..., 0] + boxes[..., 2]) / 2
boxes_xcenter = (boxes[..., 1] + boxes[..., 3]) / 2
boxes_height = boxes[..., 2] - boxes[..., 0]
boxes_width = boxes[..., 3] - boxes[..., 1]
new_boxes = tf.stack(
[boxes_ycenter, boxes_xcenter, boxes_height, boxes_width], axis=-1)
return new_boxes
def cycxhw_to_yxyx(boxes):
"""Converts box center coordinates plus height and width terms to corner.
Args:
boxes: a numpy array whose last dimension is 4 representing the coordinates
of boxes in cy, cx, height, width order.
Returns:
boxes: a numpy array whose shape is the same as `boxes` in new format.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
boxes_ymin = boxes[..., 0] - boxes[..., 2] / 2
boxes_xmin = boxes[..., 1] - boxes[..., 3] / 2
boxes_ymax = boxes[..., 0] + boxes[..., 2] / 2
boxes_xmax = boxes[..., 1] + boxes[..., 3] / 2
new_boxes = tf.stack([
boxes_ymin, boxes_xmin, boxes_ymax, boxes_xmax], axis=-1)
return new_boxes
def jitter_boxes(boxes, noise_scale=0.025):
"""Jitters the box coordinates by some noise distribution.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates of
boxes in ymin, xmin, ymax, xmax order.
noise_scale: a python float which specifies the magnitude of noise. The rule
of thumb is to set this between (0, 0.1]. The default value is found to
mimic the noisy detections best empirically.
Returns:
jittered_boxes: a tensor whose shape is the same as `boxes` representing
the jittered boxes.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('jitter_boxes'):
bbox_jitters = tf.random.normal(tf.shape(boxes), stddev=noise_scale)
ymin = boxes[..., 0:1]
xmin = boxes[..., 1:2]
ymax = boxes[..., 2:3]
xmax = boxes[..., 3:4]
width = xmax - xmin
height = ymax - ymin
new_center_x = (xmin + xmax) / 2.0 + bbox_jitters[..., 0:1] * width
new_center_y = (ymin + ymax) / 2.0 + bbox_jitters[..., 1:2] * height
new_width = width * tf.math.exp(bbox_jitters[..., 2:3])
new_height = height * tf.math.exp(bbox_jitters[..., 3:4])
jittered_boxes = tf.concat(
[new_center_y - new_height * 0.5, new_center_x - new_width * 0.5,
new_center_y + new_height * 0.5, new_center_x + new_width * 0.5],
axis=-1)
return jittered_boxes
def normalize_boxes(boxes, image_shape):
"""Converts boxes to the normalized coordinates.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
image_shape: a list of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
Returns:
normalized_boxes: a tensor whose shape is the same as `boxes` representing
the normalized boxes.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('normalize_boxes'):
if isinstance(image_shape, list) or isinstance(image_shape, tuple):
height, width = image_shape
else:
image_shape = tf.cast(image_shape, dtype=boxes.dtype)
height = image_shape[..., 0:1]
width = image_shape[..., 1:2]
ymin = boxes[..., 0:1] / height
xmin = boxes[..., 1:2] / width
ymax = boxes[..., 2:3] / height
xmax = boxes[..., 3:4] / width
normalized_boxes = tf.concat([ymin, xmin, ymax, xmax], axis=-1)
return normalized_boxes
def denormalize_boxes(boxes, image_shape):
"""Converts boxes normalized by [height, width] to pixel coordinates.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
image_shape: a list of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
Returns:
denormalized_boxes: a tensor whose shape is the same as `boxes` representing
the denormalized boxes.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
with tf.name_scope('denormalize_boxes'):
if isinstance(image_shape, list) or isinstance(image_shape, tuple):
height, width = image_shape
else:
image_shape = tf.cast(image_shape, dtype=boxes.dtype)
height, width = tf.split(image_shape, 2, axis=-1)
ymin, xmin, ymax, xmax = tf.split(boxes, 4, axis=-1)
ymin = ymin * height
xmin = xmin * width
ymax = ymax * height
xmax = xmax * width
denormalized_boxes = tf.concat([ymin, xmin, ymax, xmax], axis=-1)
return denormalized_boxes
def horizontal_flip_boxes(normalized_boxes):
"""Flips normalized boxes horizontally.
Args:
normalized_boxes: the boxes in normalzied coordinates.
Returns:
horizontally flipped boxes.
"""
if normalized_boxes.shape[-1] != 4:
raise ValueError('boxes.shape[-1] is {:d}, but must be 4.'.format(
normalized_boxes.shape[-1]))
with tf.name_scope('horizontal_flip_boxes'):
ymin, xmin, ymax, xmax = tf.split(
value=normalized_boxes, num_or_size_splits=4, axis=-1)
flipped_xmin = tf.subtract(1.0, xmax)
flipped_xmax = tf.subtract(1.0, xmin)
flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], axis=-1)
return flipped_boxes
def vertical_flip_boxes(normalized_boxes):
"""Flips normalized boxes vertically.
Args:
normalized_boxes: the boxes in normalzied coordinates.
Returns:
vertically flipped boxes.
"""
if normalized_boxes.shape[-1] != 4:
raise ValueError('boxes.shape[-1] is {:d}, but must be 4.'.format(
normalized_boxes.shape[-1]))
with tf.name_scope('vertical_flip_boxes'):
ymin, xmin, ymax, xmax = tf.split(
value=normalized_boxes, num_or_size_splits=4, axis=-1)
flipped_ymin = tf.subtract(1.0, ymax)
flipped_ymax = tf.subtract(1.0, ymin)
flipped_boxes = tf.concat([flipped_ymin, xmin, flipped_ymax, xmax], axis=-1)
return flipped_boxes
def clip_boxes(boxes, image_shape):
"""Clips boxes to image boundaries.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
image_shape: a list of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
Returns:
clipped_boxes: a tensor whose shape is the same as `boxes` representing the
clipped boxes.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('clip_boxes'):
if isinstance(image_shape, list) or isinstance(image_shape, tuple):
height, width = image_shape
max_length = [height, width, height, width]
else:
image_shape = tf.cast(image_shape, dtype=boxes.dtype)
height, width = tf.unstack(image_shape, axis=-1)
max_length = tf.stack([height, width, height, width], axis=-1)
clipped_boxes = tf.math.maximum(tf.math.minimum(boxes, max_length), 0.0)
return clipped_boxes
def compute_outer_boxes(boxes, image_shape, scale=1.0):
"""Computes outer box encloses an object with a margin.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates of
boxes in ymin, xmin, ymax, xmax order.
image_shape: a list of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
scale: a float number specifying the scale of output outer boxes to input
`boxes`.
Returns:
outer_boxes: a tensor whose shape is the same as `boxes` representing the
outer boxes.
"""
if scale < 1.0:
raise ValueError(
'scale is {}, but outer box scale must be greater than 1.0.'.format(
scale))
if scale == 1.0:
return boxes
centers_y = (boxes[..., 0] + boxes[..., 2]) / 2.0
centers_x = (boxes[..., 1] + boxes[..., 3]) / 2.0
box_height = (boxes[..., 2] - boxes[..., 0]) * scale
box_width = (boxes[..., 3] - boxes[..., 1]) * scale
outer_boxes = tf.stack(
[centers_y - box_height / 2.0, centers_x - box_width / 2.0,
centers_y + box_height / 2.0, centers_x + box_width / 2.0],
axis=-1)
outer_boxes = clip_boxes(outer_boxes, image_shape)
return outer_boxes
def encode_boxes(boxes, anchors, weights=None):
"""Encodes boxes to targets.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
anchors: a tensor whose shape is the same as, or `broadcastable` to `boxes`,
representing the coordinates of anchors in ymin, xmin, ymax, xmax order.
weights: None or a list of four float numbers used to scale coordinates.
Returns:
encoded_boxes: a tensor whose shape is the same as `boxes` representing the
encoded box targets.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('encode_boxes'):
boxes = tf.cast(boxes, dtype=anchors.dtype)
ymin = boxes[..., 0:1]
xmin = boxes[..., 1:2]
ymax = boxes[..., 2:3]
xmax = boxes[..., 3:4]
box_h = ymax - ymin
box_w = xmax - xmin
box_yc = ymin + 0.5 * box_h
box_xc = xmin + 0.5 * box_w
anchor_ymin = anchors[..., 0:1]
anchor_xmin = anchors[..., 1:2]
anchor_ymax = anchors[..., 2:3]
anchor_xmax = anchors[..., 3:4]
anchor_h = anchor_ymax - anchor_ymin
anchor_w = anchor_xmax - anchor_xmin
anchor_yc = anchor_ymin + 0.5 * anchor_h
anchor_xc = anchor_xmin + 0.5 * anchor_w
encoded_dy = (box_yc - anchor_yc) / anchor_h
encoded_dx = (box_xc - anchor_xc) / anchor_w
encoded_dh = tf.math.log(box_h / anchor_h)
encoded_dw = tf.math.log(box_w / anchor_w)
if weights:
encoded_dy *= weights[0]
encoded_dx *= weights[1]
encoded_dh *= weights[2]
encoded_dw *= weights[3]
encoded_boxes = tf.concat(
[encoded_dy, encoded_dx, encoded_dh, encoded_dw], axis=-1)
return encoded_boxes
def decode_boxes(encoded_boxes, anchors, weights=None):
"""Decodes boxes.
Args:
encoded_boxes: a tensor whose last dimension is 4 representing the
coordinates of encoded boxes in dy, dx, dh, dw in order.
anchors: a tensor whose shape is the same as, or `broadcastable` to `boxes`,
representing the coordinates of anchors in ymin, xmin, ymax, xmax order.
weights: None or a list of four float numbers used to scale coordinates.
Returns:
decoded_boxes: a tensor whose shape is the same as `boxes` representing the
decoded box targets.
"""
if encoded_boxes.shape[-1] != 4:
raise ValueError(
'encoded_boxes.shape[-1] is {:d}, but must be 4.'
.format(encoded_boxes.shape[-1]))
with tf.name_scope('decode_boxes'):
encoded_boxes = tf.cast(encoded_boxes, dtype=anchors.dtype)
dy, dx, dh, dw = tf.split(encoded_boxes, 4, -1)
if weights:
dy /= weights[0]
dx /= weights[1]
dh /= weights[2]
dw /= weights[3]
dh = tf.math.minimum(dh, BBOX_XFORM_CLIP)
dw = tf.math.minimum(dw, BBOX_XFORM_CLIP)
anchor_ymin, anchor_xmin, anchor_ymax, anchor_xmax = tf.split(
anchors, 4, -1)
anchor_h = anchor_ymax - anchor_ymin
anchor_w = anchor_xmax - anchor_xmin
anchor_yc = anchor_ymin + 0.5 * anchor_h
anchor_xc = anchor_xmin + 0.5 * anchor_w
decoded_boxes_yc = dy * anchor_h + anchor_yc
decoded_boxes_xc = dx * anchor_w + anchor_xc
decoded_boxes_h = tf.math.exp(dh) * anchor_h
decoded_boxes_w = tf.math.exp(dw) * anchor_w
decoded_boxes_ymin = decoded_boxes_yc - 0.5 * decoded_boxes_h
decoded_boxes_xmin = decoded_boxes_xc - 0.5 * decoded_boxes_w
decoded_boxes_ymax = decoded_boxes_ymin + decoded_boxes_h
decoded_boxes_xmax = decoded_boxes_xmin + decoded_boxes_w
decoded_boxes = tf.concat(
[decoded_boxes_ymin, decoded_boxes_xmin,
decoded_boxes_ymax, decoded_boxes_xmax],
axis=-1)
return decoded_boxes
def filter_boxes(boxes, scores, image_shape, min_size_threshold):
"""Filters and remove boxes that are too small or fall outside the image.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates of
boxes in ymin, xmin, ymax, xmax order.
scores: a tensor whose shape is the same as tf.shape(boxes)[:-1]
representing the original scores of the boxes.
image_shape: a tensor whose shape is the same as, or `broadcastable` to
`boxes` except the last dimension, which is 2, representing [height,
width] of the scaled image.
min_size_threshold: a float representing the minimal box size in each side
(w.r.t. the scaled image). Boxes whose sides are smaller than it will be
filtered out.
Returns:
filtered_boxes: a tensor whose shape is the same as `boxes` but with
the position of the filtered boxes are filled with 0.
filtered_scores: a tensor whose shape is the same as 'scores' but with
the positinon of the filtered boxes filled with 0.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('filter_boxes'):
if isinstance(image_shape, list) or isinstance(image_shape, tuple):
height, width = image_shape
else:
image_shape = tf.cast(image_shape, dtype=boxes.dtype)
height = image_shape[..., 0]
width = image_shape[..., 1]
ymin = boxes[..., 0]
xmin = boxes[..., 1]
ymax = boxes[..., 2]
xmax = boxes[..., 3]
h = ymax - ymin
w = xmax - xmin
yc = ymin + 0.5 * h
xc = xmin + 0.5 * w
min_size = tf.cast(
tf.math.maximum(min_size_threshold, 0.0), dtype=boxes.dtype)
filtered_size_mask = tf.math.logical_and(
tf.math.greater(h, min_size), tf.math.greater(w, min_size))
filtered_center_mask = tf.logical_and(
tf.math.logical_and(tf.math.greater(yc, 0.0), tf.math.less(yc, height)),
tf.math.logical_and(tf.math.greater(xc, 0.0), tf.math.less(xc, width)))
filtered_mask = tf.math.logical_and(
filtered_size_mask, filtered_center_mask)
filtered_scores = tf.where(filtered_mask, scores, tf.zeros_like(scores))
filtered_boxes = tf.cast(
tf.expand_dims(filtered_mask, axis=-1), dtype=boxes.dtype) * boxes
return filtered_boxes, filtered_scores
def filter_boxes_by_scores(boxes, scores, min_score_threshold):
"""Filters and remove boxes whose scores are smaller than the threshold.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates of
boxes in ymin, xmin, ymax, xmax order.
scores: a tensor whose shape is the same as tf.shape(boxes)[:-1]
representing the original scores of the boxes.
min_score_threshold: a float representing the minimal box score threshold.
Boxes whose score are smaller than it will be filtered out.
Returns:
filtered_boxes: a tensor whose shape is the same as `boxes` but with
the position of the filtered boxes are filled with -1.
filtered_scores: a tensor whose shape is the same as 'scores' but with
the
"""
if boxes.shape[-1] != 4:
raise ValueError('boxes.shape[1] is {:d}, but must be 4.'.format(
boxes.shape[-1]))
with tf.name_scope('filter_boxes_by_scores'):
filtered_mask = tf.math.greater(scores, min_score_threshold)
filtered_scores = tf.where(filtered_mask, scores, -tf.ones_like(scores))
filtered_boxes = tf.cast(
tf.expand_dims(filtered_mask, axis=-1), dtype=boxes.dtype) * boxes
return filtered_boxes, filtered_scores
def gather_instances(selected_indices, instances, *aux_instances):
"""Gathers instances by indices.
Args:
selected_indices: a Tensor of shape [batch, K] which indicates the selected
indices in instance dimension (2nd dimension).
instances: a Tensor of shape [batch, N, ...] where the 2nd dimension is
the instance dimension to be selected from.
*aux_instances: the additional Tensors whose shapes are in [batch, N, ...]
which are the tensors to be selected from using the `selected_indices`.
Returns:
selected_instances: the tensor of shape [batch, K, ...] which corresponds to
the selected instances of the `instances` tensor.
selected_aux_instances: the additional tensors of shape [batch, K, ...]
which corresponds to the selected instances of the `aus_instances`
tensors.
"""
batch_size = instances.shape[0]
if batch_size == 1:
selected_instances = tf.squeeze(
tf.gather(instances, selected_indices, axis=1), axis=1)
if aux_instances:
selected_aux_instances = [
tf.squeeze(
tf.gather(a, selected_indices, axis=1), axis=1)
for a in aux_instances
]
return tuple([selected_instances] + selected_aux_instances)
else:
return selected_instances
else:
indices_shape = tf.shape(selected_indices)
batch_indices = (
tf.expand_dims(tf.range(indices_shape[0]), axis=-1) *
tf.ones([1, indices_shape[-1]], dtype=tf.int32))
gather_nd_indices = tf.stack(
[batch_indices, selected_indices], axis=-1)
selected_instances = tf.gather_nd(instances, gather_nd_indices)
if aux_instances:
selected_aux_instances = [
tf.gather_nd(a, gather_nd_indices) for a in aux_instances
]
return tuple([selected_instances] + selected_aux_instances)
else:
return selected_instances
def top_k_boxes(boxes, scores, k):
"""Sorts and select top k boxes according to the scores.
Args:
boxes: a tensor of shape [batch_size, N, 4] representing the coordinate of
the boxes. N is the number of boxes per image.
scores: a tensor of shsape [batch_size, N] representing the socre of the
boxes.
k: an integer or a tensor indicating the top k number.
Returns:
selected_boxes: a tensor of shape [batch_size, k, 4] representing the
selected top k box coordinates.
selected_scores: a tensor of shape [batch_size, k] representing the selected
top k box scores.
"""
with tf.name_scope('top_k_boxes'):
selected_scores, top_k_indices = tf.nn.top_k(scores, k=k, sorted=True)
selected_boxes = gather_instances(top_k_indices, boxes)
return selected_boxes, selected_scores
def get_non_empty_box_indices(boxes):
"""Gets indices for non-empty boxes."""
# Selects indices if box height or width is 0.
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
indices = tf.where(tf.logical_and(tf.greater(height, 0),
tf.greater(width, 0)))
return indices[:, 0]
def bbox_overlap(boxes, gt_boxes):
"""Calculates the overlap between proposal and ground truth boxes.
Some `boxes` or `gt_boxes` may have been padded. The returned `iou` tensor
for these boxes will be -1.
Args:
boxes: a tensor with a shape of [batch_size, N, 4]. N is the number of
proposals before groundtruth assignment (e.g., rpn_post_nms_topn). The
last dimension is the pixel coordinates in [ymin, xmin, ymax, xmax] form.
gt_boxes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This
tensor might have paddings with a negative value.
Returns:
iou: a tensor with as a shape of [batch_size, N, MAX_NUM_INSTANCES].
"""
with tf.name_scope('bbox_overlap'):
bb_y_min, bb_x_min, bb_y_max, bb_x_max = tf.split(
value=boxes, num_or_size_splits=4, axis=2)
gt_y_min, gt_x_min, gt_y_max, gt_x_max = tf.split(
value=gt_boxes, num_or_size_splits=4, axis=2)
# Calculates the intersection area.
i_xmin = tf.math.maximum(bb_x_min, tf.transpose(gt_x_min, [0, 2, 1]))
i_xmax = tf.math.minimum(bb_x_max, tf.transpose(gt_x_max, [0, 2, 1]))
i_ymin = tf.math.maximum(bb_y_min, tf.transpose(gt_y_min, [0, 2, 1]))
i_ymax = tf.math.minimum(bb_y_max, tf.transpose(gt_y_max, [0, 2, 1]))
i_area = (
tf.math.maximum((i_xmax - i_xmin), 0) *
tf.math.maximum((i_ymax - i_ymin), 0))
# Calculates the union area.
bb_area = (bb_y_max - bb_y_min) * (bb_x_max - bb_x_min)
gt_area = (gt_y_max - gt_y_min) * (gt_x_max - gt_x_min)
# Adds a small epsilon to avoid divide-by-zero.
u_area = bb_area + tf.transpose(gt_area, [0, 2, 1]) - i_area + 1e-8
# Calculates IoU.
iou = i_area / u_area
# Fills -1 for IoU entries between the padded ground truth boxes.
gt_invalid_mask = tf.less(
tf.reduce_max(gt_boxes, axis=-1, keepdims=True), 0.0)
padding_mask = tf.logical_or(
tf.zeros_like(bb_x_min, dtype=tf.bool),
tf.transpose(gt_invalid_mask, [0, 2, 1]))
iou = tf.where(padding_mask, -tf.ones_like(iou), iou)
# Fills -1 for invalid (-1) boxes.
boxes_invalid_mask = tf.less(
tf.reduce_max(boxes, axis=-1, keepdims=True), 0.0)
iou = tf.where(boxes_invalid_mask, -tf.ones_like(iou), iou)
return iou
def bbox_generalized_overlap(boxes, gt_boxes):
"""Calculates the GIOU between proposal and ground truth boxes.
The generalized intersection of union is an adjustment of the traditional IOU
metric which provides continuous updates even for predictions with no overlap.
This metric is defined in https://giou.stanford.edu/GIoU.pdf. Note, some
`gt_boxes` may have been padded. The returned `giou` tensor for these boxes
will be -1.
Args:
boxes: a `Tensor` with a shape of [batch_size, N, 4]. N is the number of
proposals before groundtruth assignment (e.g., rpn_post_nms_topn). The
last dimension is the pixel coordinates in [ymin, xmin, ymax, xmax] form.
gt_boxes: a `Tensor` with a shape of [batch_size, max_num_instances, 4].
This tensor may have paddings with a negative value and will also be in
the [ymin, xmin, ymax, xmax] format.
Returns:
giou: a `Tensor` with as a shape of [batch_size, N, max_num_instances].
"""
with tf.name_scope('bbox_generalized_overlap'):
assert boxes.shape.as_list(
)[-1] == 4, 'Boxes must be defined by 4 coordinates.'
assert gt_boxes.shape.as_list(
)[-1] == 4, 'Groundtruth boxes must be defined by 4 coordinates.'
bb_y_min, bb_x_min, bb_y_max, bb_x_max = tf.split(
value=boxes, num_or_size_splits=4, axis=2)
gt_y_min, gt_x_min, gt_y_max, gt_x_max = tf.split(
value=gt_boxes, num_or_size_splits=4, axis=2)
# Calculates the hull area for each pair of boxes, with one from
# boxes and the other from gt_boxes.
# Outputs for coordinates are of shape [batch_size, N, max_num_instances]
h_xmin = tf.minimum(bb_x_min, tf.transpose(gt_x_min, [0, 2, 1]))
h_xmax = tf.maximum(bb_x_max, tf.transpose(gt_x_max, [0, 2, 1]))
h_ymin = tf.minimum(bb_y_min, tf.transpose(gt_y_min, [0, 2, 1]))
h_ymax = tf.maximum(bb_y_max, tf.transpose(gt_y_max, [0, 2, 1]))
h_area = tf.maximum((h_xmax - h_xmin), 0) * tf.maximum((h_ymax - h_ymin), 0)
# Add a small epsilon to avoid divide-by-zero.
h_area = h_area + 1e-8
# Calculates the intersection area.
i_xmin = tf.maximum(bb_x_min, tf.transpose(gt_x_min, [0, 2, 1]))
i_xmax = tf.minimum(bb_x_max, tf.transpose(gt_x_max, [0, 2, 1]))
i_ymin = tf.maximum(bb_y_min, tf.transpose(gt_y_min, [0, 2, 1]))
i_ymax = tf.minimum(bb_y_max, tf.transpose(gt_y_max, [0, 2, 1]))
i_area = tf.maximum((i_xmax - i_xmin), 0) * tf.maximum((i_ymax - i_ymin), 0)
# Calculates the union area.
bb_area = (bb_y_max - bb_y_min) * (bb_x_max - bb_x_min)
gt_area = (gt_y_max - gt_y_min) * (gt_x_max - gt_x_min)
# Adds a small epsilon to avoid divide-by-zero.
u_area = bb_area + tf.transpose(gt_area, [0, 2, 1]) - i_area + 1e-8
# Calculates IoU.
iou = i_area / u_area
# Calculates GIoU.
giou = iou - (h_area - u_area) / h_area
# Fills -1 for GIoU entries between the padded ground truth boxes.
gt_invalid_mask = tf.less(
tf.reduce_max(gt_boxes, axis=-1, keepdims=True), 0.0)
padding_mask = tf.broadcast_to(
tf.transpose(gt_invalid_mask, [0, 2, 1]), tf.shape(giou))
giou = tf.where(padding_mask, -tf.ones_like(giou), giou)
return giou
def bbox_intersection_over_area(boxes, gt_boxes):
"""Calculates IoAs (intersection over area) between proposal and ground truth boxes.
Some `boxes` or `gt_boxes` may have been padded. The returned `iou` tensor
for these boxes will be -1.
Args:
boxes: a tensor with a shape of [batch_size, N, 4]. N is the number of
proposals before groundtruth assignment (e.g., rpn_post_nms_topn). The
last dimension is the pixel coordinates in [ymin, xmin, ymax, xmax] form.
gt_boxes: a tensor with a shape of [batch_size, M, 4]. This tensor might
have paddings with a negative value.
Returns:
ioa: a tensor with as a shape of [batch_size, N, M].
"""
with tf.name_scope('bbox_overlap'):
bb_y_min, bb_x_min, bb_y_max, bb_x_max = tf.split(
value=boxes, num_or_size_splits=4, axis=2
)
gt_y_min, gt_x_min, gt_y_max, gt_x_max = tf.split(
value=gt_boxes, num_or_size_splits=4, axis=2
)
# Calculates the intersection area.
i_xmin = tf.math.maximum(bb_x_min, tf.transpose(gt_x_min, [0, 2, 1]))
i_xmax = tf.math.minimum(bb_x_max, tf.transpose(gt_x_max, [0, 2, 1]))
i_ymin = tf.math.maximum(bb_y_min, tf.transpose(gt_y_min, [0, 2, 1]))
i_ymax = tf.math.minimum(bb_y_max, tf.transpose(gt_y_max, [0, 2, 1]))
i_area = tf.math.maximum((i_xmax - i_xmin), 0) * tf.math.maximum(
(i_ymax - i_ymin), 0
)
bb_area = (bb_y_max - bb_y_min) * (bb_x_max - bb_x_min)
ioa = tf.math.divide_no_nan(i_area, bb_area)
# Fills -1 for IoA entries between the padded ground truth boxes.
gt_invalid_mask = tf.less(
tf.reduce_max(gt_boxes, axis=-1, keepdims=True), 0.0
)
padding_mask = tf.logical_or(
tf.zeros_like(bb_x_min, dtype=tf.bool),
tf.transpose(gt_invalid_mask, [0, 2, 1]),
)
ioa = tf.where(padding_mask, -1., ioa)
# Fills -1 for invalid (-1) boxes.
boxes_invalid_mask = tf.less(
tf.reduce_max(boxes, axis=-1, keepdims=True), 0.0
)
ioa = tf.where(boxes_invalid_mask, -1., ioa)
return ioa
def box_matching(boxes, gt_boxes, gt_classes):
"""Matches boxes to groundtruth boxes.
Given the proposal boxes and the groundtruth boxes and classes, perform the
groundtruth matching by taking the argmax of the IoU between boxes and
groundtruth boxes.
Args:
boxes: a tensor of shape of [batch_size, N, 4] representing the box
coordiantes to be matched to groundtruth boxes.
gt_boxes: a tensor of shape of [batch_size, MAX_INSTANCES, 4] representing
the groundtruth box coordinates. It is padded with -1s to indicate the
invalid boxes.
gt_classes: [batch_size, MAX_INSTANCES] representing the groundtruth box
classes. It is padded with -1s to indicate the invalid classes.
Returns:
matched_gt_boxes: a tensor of shape of [batch_size, N, 4], representing
the matched groundtruth box coordinates for each input box. If the box
does not overlap with any groundtruth boxes, the matched boxes of it
will be set to all 0s.
matched_gt_classes: a tensor of shape of [batch_size, N], representing
the matched groundtruth classes for each input box. If the box does not
overlap with any groundtruth boxes, the matched box classes of it will
be set to 0, which corresponds to the background class.
matched_gt_indices: a tensor of shape of [batch_size, N], representing
the indices of the matched groundtruth boxes in the original gt_boxes
tensor. If the box does not overlap with any groundtruth boxes, the
index of the matched groundtruth will be set to -1.
matched_iou: a tensor of shape of [batch_size, N], representing the IoU
between the box and its matched groundtruth box. The matched IoU is the
maximum IoU of the box and all the groundtruth boxes.
iou: a tensor of shape of [batch_size, N, K], representing the IoU matrix
between boxes and the groundtruth boxes. The IoU between a box and the
invalid groundtruth boxes whose coordinates are [-1, -1, -1, -1] is -1.
"""
# Compute IoU between boxes and gt_boxes.
# iou <- [batch_size, N, K]
iou = bbox_overlap(boxes, gt_boxes)
# max_iou <- [batch_size, N]
# 0.0 -> no match to gt, or -1.0 match to no gt
matched_iou = tf.reduce_max(iou, axis=-1)
# background_box_mask <- bool, [batch_size, N]
background_box_mask = tf.less_equal(matched_iou, 0.0)
argmax_iou_indices = tf.argmax(iou, axis=-1, output_type=tf.int32)
matched_gt_boxes, matched_gt_classes = gather_instances(
argmax_iou_indices, gt_boxes, gt_classes)
matched_gt_boxes = tf.where(
tf.tile(tf.expand_dims(background_box_mask, axis=-1), [1, 1, 4]),
tf.zeros_like(matched_gt_boxes, dtype=matched_gt_boxes.dtype),
matched_gt_boxes)
matched_gt_classes = tf.where(
background_box_mask,
tf.zeros_like(matched_gt_classes),
matched_gt_classes)
matched_gt_indices = tf.where(
background_box_mask,
-tf.ones_like(argmax_iou_indices),
argmax_iou_indices)
return (matched_gt_boxes, matched_gt_classes, matched_gt_indices,
matched_iou, iou)
def bbox2mask(bbox: tf.Tensor,
*,
image_height: int,
image_width: int,
dtype: tf.DType = tf.bool) -> tf.Tensor:
"""Converts bounding boxes to bitmasks.
Args:
bbox: A tensor in shape (..., 4) with arbitrary numbers of batch dimensions,
representing the absolute coordinates (ymin, xmin, ymax, xmax) for each
bounding box.
image_height: an integer representing the height of the image.
image_width: an integer representing the width of the image.
dtype: DType of the output bitmasks.
Returns:
A tensor in shape (..., height, width) which stores the bitmasks created
from the bounding boxes. For example:
>>> bbox2mask(tf.constant([[1,2,4,4]]),
image_height=5,
image_width=5,
dtype=tf.int32)
<tf.Tensor: shape=(1, 5, 5), dtype=int32, numpy=
array([[[0, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 0, 1, 1, 0],
[0, 0, 1, 1, 0],
[0, 0, 0, 0, 0]]], dtype=int32)>
"""
bbox_shape = bbox.get_shape().as_list()
if bbox_shape[-1] != 4:
raise ValueError(
'Expected the last dimension of `bbox` has size == 4, but the shape '
'of `bbox` was: %s' % bbox_shape)
# (..., 1)
ymin = bbox[..., 0:1]
xmin = bbox[..., 1:2]
ymax = bbox[..., 2:3]
xmax = bbox[..., 3:4]
# (..., 1, width)
ymin = tf.expand_dims(tf.repeat(ymin, repeats=image_width, axis=-1), axis=-2)
# (..., height, 1)
xmin = tf.expand_dims(tf.repeat(xmin, repeats=image_height, axis=-1), axis=-1)
# (..., 1, width)
ymax = tf.expand_dims(tf.repeat(ymax, repeats=image_width, axis=-1), axis=-2)
# (..., height, 1)
xmax = tf.expand_dims(tf.repeat(xmax, repeats=image_height, axis=-1), axis=-1)
# (height, 1)
y_grid = tf.expand_dims(tf.range(image_height, dtype=bbox.dtype), axis=-1)
# (1, width)
x_grid = tf.expand_dims(tf.range(image_width, dtype=bbox.dtype), axis=-2)
# (..., height, width)
ymin_mask = y_grid >= ymin
xmin_mask = x_grid >= xmin
ymax_mask = y_grid < ymax
xmax_mask = x_grid < xmax
return tf.cast(ymin_mask & xmin_mask & ymax_mask & xmax_mask, dtype)
| 34,487 | 36.365114 | 86 | py |
models | models-master/official/vision/ops/box_matcher_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for box_matcher.py."""
import tensorflow as tf
from official.vision.ops import box_matcher
class BoxMatcherTest(tf.test.TestCase):
def test_box_matcher_unbatched(self):
sim_matrix = tf.constant(
[[0.04, 0, 0, 0],
[0, 0, 1., 0]],
dtype=tf.float32)
fg_threshold = 0.5
bg_thresh_hi = 0.2
bg_thresh_lo = 0.0
matcher = box_matcher.BoxMatcher(
thresholds=[bg_thresh_lo, bg_thresh_hi, fg_threshold],
indicators=[-3, -2, -1, 1])
match_indices, match_indicators = matcher(sim_matrix)
positive_matches = tf.greater_equal(match_indicators, 0)
negative_matches = tf.equal(match_indicators, -2)
self.assertAllEqual(
positive_matches.numpy(), [False, True])
self.assertAllEqual(
negative_matches.numpy(), [True, False])
self.assertAllEqual(
match_indices.numpy(), [0, 2])
self.assertAllEqual(
match_indicators.numpy(), [-2, 1])
def test_box_matcher_batched(self):
sim_matrix = tf.constant(
[[[0.04, 0, 0, 0],
[0, 0, 1., 0]]],
dtype=tf.float32)
fg_threshold = 0.5
bg_thresh_hi = 0.2
bg_thresh_lo = 0.0
matcher = box_matcher.BoxMatcher(
thresholds=[bg_thresh_lo, bg_thresh_hi, fg_threshold],
indicators=[-3, -2, -1, 1])
match_indices, match_indicators = matcher(sim_matrix)
positive_matches = tf.greater_equal(match_indicators, 0)
negative_matches = tf.equal(match_indicators, -2)
self.assertAllEqual(
positive_matches.numpy(), [[False, True]])
self.assertAllEqual(
negative_matches.numpy(), [[True, False]])
self.assertAllEqual(
match_indices.numpy(), [[0, 2]])
self.assertAllEqual(
match_indicators.numpy(), [[-2, 1]])
if __name__ == '__main__':
tf.test.main()
| 2,428 | 29.746835 | 74 | py |
models | models-master/official/vision/ops/anchor.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Anchor box and labeler definition."""
import collections
from typing import Dict, Optional, Tuple
# Import libraries
import tensorflow as tf
from official.vision.ops import anchor_generator
from official.vision.ops import box_matcher
from official.vision.ops import iou_similarity
from official.vision.ops import target_gather
from official.vision.utils.object_detection import balanced_positive_negative_sampler
from official.vision.utils.object_detection import box_list
from official.vision.utils.object_detection import faster_rcnn_box_coder
class Anchor(object):
"""Anchor class for anchor-based object detectors."""
def __init__(
self,
min_level,
max_level,
num_scales,
aspect_ratios,
anchor_size,
image_size,
):
"""Constructs multi-scale anchors.
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added on each
level. For instances, num_scales=2 adds one additional intermediate
anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of float numbers representing the aspect ratio anchors
added on each level. The number indicates the ratio of width to height.
For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each
scale level.
anchor_size: float number representing the scale of size of the base
anchor to the feature stride 2^level.
image_size: a list of integer numbers or Tensors representing [height,
width] of the input image size.The image_size should be divided by the
largest feature stride 2^max_level.
"""
self.min_level = min_level
self.max_level = max_level
self.num_scales = num_scales
self.aspect_ratios = aspect_ratios
self.anchor_size = anchor_size
self.image_size = image_size
self.boxes = self._generate_boxes()
def _generate_boxes(self) -> tf.Tensor:
"""Generates multi-scale anchor boxes.
Returns:
a Tensor of shape [N, 4], representing anchor boxes of all levels
concatenated together.
"""
boxes_all = []
for level in range(self.min_level, self.max_level + 1):
boxes_l = []
for scale in range(self.num_scales):
for aspect_ratio in self.aspect_ratios:
stride = 2**level
intermidate_scale = 2 ** (scale / float(self.num_scales))
base_anchor_size = self.anchor_size * stride * intermidate_scale
aspect_x = aspect_ratio**0.5
aspect_y = aspect_ratio**-0.5
half_anchor_size_x = base_anchor_size * aspect_x / 2.0
half_anchor_size_y = base_anchor_size * aspect_y / 2.0
x = tf.range(stride / 2, self.image_size[1], stride)
y = tf.range(stride / 2, self.image_size[0], stride)
xv, yv = tf.meshgrid(x, y)
xv = tf.cast(tf.reshape(xv, [-1]), dtype=tf.float32)
yv = tf.cast(tf.reshape(yv, [-1]), dtype=tf.float32)
# Tensor shape Nx4.
boxes = tf.stack(
[
yv - half_anchor_size_y,
xv - half_anchor_size_x,
yv + half_anchor_size_y,
xv + half_anchor_size_x,
],
axis=1,
)
boxes_l.append(boxes)
# Concat anchors on the same level to tensor shape NxAx4.
boxes_l = tf.stack(boxes_l, axis=1)
boxes_l = tf.reshape(boxes_l, [-1, 4])
boxes_all.append(boxes_l)
return tf.concat(boxes_all, axis=0)
def unpack_labels(self, labels: tf.Tensor) -> Dict[str, tf.Tensor]:
"""Unpacks an array of labels into multi-scales labels."""
unpacked_labels = collections.OrderedDict()
count = 0
for level in range(self.min_level, self.max_level + 1):
feat_size_y = tf.cast(self.image_size[0] / 2**level, tf.int32)
feat_size_x = tf.cast(self.image_size[1] / 2**level, tf.int32)
steps = feat_size_y * feat_size_x * self.anchors_per_location
unpacked_labels[str(level)] = tf.reshape(
labels[count : count + steps], [feat_size_y, feat_size_x, -1]
)
count += steps
return unpacked_labels
@property
def anchors_per_location(self):
return self.num_scales * len(self.aspect_ratios)
@property
def multilevel_boxes(self):
return self.unpack_labels(self.boxes)
class AnchorLabeler(object):
"""Labeler for dense object detector."""
def __init__(self, match_threshold=0.5, unmatched_threshold=0.5):
"""Constructs anchor labeler to assign labels to anchors.
Args:
match_threshold: a float number between 0 and 1 representing the
lower-bound threshold to assign positive labels for anchors. An anchor
with a score over the threshold is labeled positive.
unmatched_threshold: a float number between 0 and 1 representing the
upper-bound threshold to assign negative labels for anchors. An anchor
with a score below the threshold is labeled negative.
"""
self.similarity_calc = iou_similarity.IouSimilarity()
self.target_gather = target_gather.TargetGather()
self.matcher = box_matcher.BoxMatcher(
thresholds=[unmatched_threshold, match_threshold],
indicators=[-1, -2, 1],
force_match_for_each_col=True,
)
self.box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()
def label_anchors(
self,
anchor_boxes: Dict[str, tf.Tensor],
gt_boxes: tf.Tensor,
gt_labels: tf.Tensor,
gt_attributes: Optional[Dict[str, tf.Tensor]] = None,
gt_weights: Optional[tf.Tensor] = None,
) -> Tuple[
Dict[str, tf.Tensor],
Dict[str, tf.Tensor],
Dict[str, Dict[str, tf.Tensor]],
tf.Tensor,
tf.Tensor,
]:
"""Labels anchors with ground truth inputs.
Args:
anchor_boxes: An ordered dictionary with keys [min_level, min_level+1,
..., max_level]. The values are tensor with shape [height_l, width_l,
num_anchors_per_location * 4]. The height_l and width_l represent the
dimension of the feature pyramid at l-th level. For each anchor box, the
tensor stores [y0, x0, y1, x1] for the four corners.
gt_boxes: A float tensor with shape [N, 4] representing ground-truth
boxes. For each row, it stores [y0, x0, y1, x1] for four corners of a
box.
gt_labels: A integer tensor with shape [N, 1] representing ground-truth
classes.
gt_attributes: If not None, a dict of (name, gt_attribute) pairs.
`gt_attribute` is a float tensor with shape [N, attribute_size]
representing ground-truth attributes.
gt_weights: If not None, a float tensor with shape [N] representing
ground-truth weights.
Returns:
cls_targets_dict: An ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors_per_location]. The height_l and
width_l represent the dimension of class logits at l-th level.
box_targets_dict: An ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors_per_location * 4]. The height_l
and width_l represent the dimension of bounding box regression output at
l-th level.
attribute_targets_dict: A dict with (name, attribute_targets) pairs. Each
`attribute_targets` represents an ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors_per_location * attribute_size].
The height_l and width_l represent the dimension of attribute prediction
output at l-th level.
cls_weights: A flattened Tensor with shape [num_anchors], that serves as
masking / sample weight for classification loss. Its value is 1.0 for
positive and negative matched anchors, and 0.0 for ignored anchors.
box_weights: A flattened Tensor with shape [num_anchors], that serves as
masking / sample weight for regression loss. Its value is 1.0 for
positive matched anchors, and 0.0 for negative and ignored anchors.
"""
flattened_anchor_boxes = []
for anchors in anchor_boxes.values():
flattened_anchor_boxes.append(tf.reshape(anchors, [-1, 4]))
flattened_anchor_boxes = tf.concat(flattened_anchor_boxes, axis=0)
similarity_matrix = self.similarity_calc(flattened_anchor_boxes, gt_boxes)
match_indices, match_indicators = self.matcher(similarity_matrix)
mask = tf.less_equal(match_indicators, 0)
cls_mask = tf.expand_dims(mask, -1)
cls_targets = self.target_gather(gt_labels, match_indices, cls_mask, -1)
box_mask = tf.tile(cls_mask, [1, 4])
box_targets = self.target_gather(gt_boxes, match_indices, box_mask)
att_targets = {}
if gt_attributes:
for k, v in gt_attributes.items():
att_size = v.get_shape().as_list()[-1]
att_mask = tf.tile(cls_mask, [1, att_size])
att_targets[k] = self.target_gather(v, match_indices, att_mask, 0.0)
# When there is no ground truth labels, we force the weight to be 1 so that
# negative matched anchors get non-zero weights.
num_gt_labels = tf.shape(gt_labels)[0]
weights = tf.cond(
tf.greater(num_gt_labels, 0),
lambda: tf.squeeze(tf.ones_like(gt_labels, dtype=tf.float32), -1),
lambda: tf.ones([1], dtype=tf.float32),
)
if gt_weights is not None:
weights = tf.cond(
tf.greater(num_gt_labels, 0),
lambda: tf.math.multiply(weights, gt_weights),
lambda: weights,
)
box_weights = self.target_gather(weights, match_indices, mask)
ignore_mask = tf.equal(match_indicators, -2)
cls_weights = self.target_gather(weights, match_indices, ignore_mask)
box_targets = box_list.BoxList(box_targets)
anchor_box = box_list.BoxList(flattened_anchor_boxes)
box_targets = self.box_coder.encode(box_targets, anchor_box)
# Unpacks labels into multi-level representations.
cls_targets = unpack_targets(cls_targets, anchor_boxes)
box_targets = unpack_targets(box_targets, anchor_boxes)
attribute_targets = {
k: unpack_targets(v, anchor_boxes) for k, v in att_targets.items()
}
return (
cls_targets,
box_targets,
attribute_targets,
cls_weights,
box_weights,
)
class RpnAnchorLabeler(AnchorLabeler):
"""Labeler for Region Proposal Network."""
def __init__(
self,
match_threshold=0.7,
unmatched_threshold=0.3,
rpn_batch_size_per_im=256,
rpn_fg_fraction=0.5,
):
AnchorLabeler.__init__(
self,
match_threshold=match_threshold,
unmatched_threshold=unmatched_threshold,
)
self._rpn_batch_size_per_im = rpn_batch_size_per_im
self._rpn_fg_fraction = rpn_fg_fraction
def _get_rpn_samples(self, match_results):
"""Computes anchor labels.
This function performs subsampling for foreground (fg) and background (bg)
anchors.
Args:
match_results: A integer tensor with shape [N] representing the matching
results of anchors. (1) match_results[i]>=0, meaning that column i is
matched with row match_results[i]. (2) match_results[i]=-1, meaning that
column i is not matched. (3) match_results[i]=-2, meaning that column i
is ignored.
Returns:
score_targets: a integer tensor with the a shape of [N].
(1) score_targets[i]=1, the anchor is a positive sample.
(2) score_targets[i]=0, negative. (3) score_targets[i]=-1, the anchor is
don't care (ignore).
"""
sampler = (
balanced_positive_negative_sampler.BalancedPositiveNegativeSampler(
positive_fraction=self._rpn_fg_fraction, is_static=False
)
)
# indicator includes both positive and negative labels.
# labels includes only positives labels.
# positives = indicator & labels.
# negatives = indicator & !labels.
# ignore = !indicator.
indicator = tf.greater(match_results, -2)
labels = tf.greater(match_results, -1)
samples = sampler.subsample(indicator, self._rpn_batch_size_per_im, labels)
positive_labels = tf.where(
tf.logical_and(samples, labels),
tf.constant(2, dtype=tf.int32, shape=match_results.shape),
tf.constant(0, dtype=tf.int32, shape=match_results.shape),
)
negative_labels = tf.where(
tf.logical_and(samples, tf.logical_not(labels)),
tf.constant(1, dtype=tf.int32, shape=match_results.shape),
tf.constant(0, dtype=tf.int32, shape=match_results.shape),
)
ignore_labels = tf.fill(match_results.shape, -1)
return (
ignore_labels + positive_labels + negative_labels,
positive_labels,
negative_labels,
)
def label_anchors( # pytype: disable=signature-mismatch # overriding-parameter-count-checks
self,
anchor_boxes: Dict[str, tf.Tensor],
gt_boxes: tf.Tensor,
gt_labels: tf.Tensor,
) -> Tuple[Dict[str, tf.Tensor], Dict[str, tf.Tensor]]:
"""Labels anchors with ground truth inputs.
Args:
anchor_boxes: An ordered dictionary with keys [min_level, min_level+1,
..., max_level]. The values are tensor with shape [height_l, width_l,
num_anchors_per_location * 4]. The height_l and width_l represent the
dimension of the feature pyramid at l-th level. For each anchor box, the
tensor stores [y0, x0, y1, x1] for the four corners.
gt_boxes: A float tensor with shape [N, 4] representing ground-truth
boxes. For each row, it stores [y0, x0, y1, x1] for four corners of a
box.
gt_labels: A integer tensor with shape [N, 1] representing ground-truth
classes.
Returns:
score_targets_dict: An ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors_per_location]. The height_l and
width_l represent the dimension of class logits at l-th level.
box_targets_dict: An ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors_per_location * 4]. The height_l
and width_l represent the dimension of bounding box regression output at
l-th level.
"""
flattened_anchor_boxes = []
for anchors in anchor_boxes.values():
flattened_anchor_boxes.append(tf.reshape(anchors, [-1, 4]))
flattened_anchor_boxes = tf.concat(flattened_anchor_boxes, axis=0)
similarity_matrix = self.similarity_calc(flattened_anchor_boxes, gt_boxes)
match_indices, match_indicators = self.matcher(similarity_matrix)
box_mask = tf.tile(
tf.expand_dims(tf.less_equal(match_indicators, 0), -1), [1, 4]
)
box_targets = self.target_gather(gt_boxes, match_indices, box_mask)
box_targets_list = box_list.BoxList(box_targets)
anchor_box_list = box_list.BoxList(flattened_anchor_boxes)
box_targets = self.box_coder.encode(box_targets_list, anchor_box_list)
# Zero out the unmatched and ignored regression targets.
num_matches = match_indices.shape.as_list()[0] or tf.shape(match_indices)[0]
unmatched_ignored_box_targets = tf.zeros([num_matches, 4], dtype=tf.float32)
matched_anchors_mask = tf.greater_equal(match_indicators, 0)
# To broadcast matched_anchors_mask to the same shape as
# matched_reg_targets.
matched_anchors_mask = tf.tile(
tf.expand_dims(matched_anchors_mask, 1), [1, tf.shape(box_targets)[1]]
)
box_targets = tf.where(
matched_anchors_mask, box_targets, unmatched_ignored_box_targets
)
# score_targets contains the subsampled positive and negative anchors.
score_targets, _, _ = self._get_rpn_samples(match_indicators)
# Unpacks labels.
score_targets_dict = unpack_targets(score_targets, anchor_boxes)
box_targets_dict = unpack_targets(box_targets, anchor_boxes)
return score_targets_dict, box_targets_dict
def build_anchor_generator(
min_level, max_level, num_scales, aspect_ratios, anchor_size
):
"""Build anchor generator from levels."""
anchor_sizes = collections.OrderedDict()
strides = collections.OrderedDict()
scales = []
for scale in range(num_scales):
scales.append(2 ** (scale / float(num_scales)))
for level in range(min_level, max_level + 1):
stride = 2**level
strides[str(level)] = stride
anchor_sizes[str(level)] = anchor_size * stride
anchor_gen = anchor_generator.AnchorGenerator(
anchor_sizes=anchor_sizes,
scales=scales,
aspect_ratios=aspect_ratios,
strides=strides,
)
return anchor_gen
def unpack_targets(
targets: tf.Tensor, anchor_boxes_dict: Dict[str, tf.Tensor]
) -> Dict[str, tf.Tensor]:
"""Unpacks an array of labels into multi-scales labels.
Args:
targets: A tensor with shape [num_anchors, M] representing the packed
targets with M values stored for each anchor.
anchor_boxes_dict: An ordered dictionary with keys [min_level, min_level+1,
..., max_level]. The values are tensor with shape [height_l, width_l,
num_anchors_per_location * 4]. The height_l and width_l represent the
dimension of the feature pyramid at l-th level. For each anchor box, the
tensor stores [y0, x0, y1, x1] for the four corners.
Returns:
unpacked_targets: An ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with shape
[height_l, width_l, num_anchors_per_location * M]. The height_l and
width_l represent the dimension of the feature pyramid at l-th level. M is
the number of values stored for each anchor.
"""
unpacked_targets = collections.OrderedDict()
count = 0
for level, anchor_boxes in anchor_boxes_dict.items():
feat_size_shape = anchor_boxes.shape.as_list()
feat_size_y = feat_size_shape[0]
feat_size_x = feat_size_shape[1]
anchors_per_location = int(feat_size_shape[2] / 4)
steps = feat_size_y * feat_size_x * anchors_per_location
unpacked_targets[level] = tf.reshape(
targets[count : count + steps], [feat_size_y, feat_size_x, -1]
)
count += steps
return unpacked_targets
| 19,085 | 40.311688 | 95 | py |
models | models-master/official/vision/ops/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/vision/ops/target_gather.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of target gather, which gathers targets from indices."""
import tensorflow as tf
class TargetGather:
"""Targer gather for dense object detector."""
def __call__(self, labels, match_indices, mask=None, mask_val=0.0):
"""Labels anchors with ground truth inputs.
B: batch_size
N: number of groundtruth boxes.
Args:
labels: An integer tensor with shape [N, dims] or [B, N, ...] representing
groundtruth labels.
match_indices: An integer tensor with shape [M] or [B, M] representing
match label index.
mask: An boolean tensor with shape [M, dims] or [B, M,...] representing
match labels.
mask_val: An integer to fill in for mask.
Returns:
target: An integer Tensor with shape [M] or [B, M]
Raises:
ValueError: If `labels` is higher than rank 3.
"""
if len(labels.shape) <= 2:
return self._gather_unbatched(labels, match_indices, mask, mask_val)
elif len(labels.shape) == 3:
return self._gather_batched(labels, match_indices, mask, mask_val)
else:
raise ValueError("`TargetGather` does not support `labels` with rank "
"larger than 3, got {}".format(len(labels.shape)))
def _gather_unbatched(self, labels, match_indices, mask, mask_val):
"""Gather based on unbatched labels and boxes."""
num_gt_boxes = tf.shape(labels)[0]
def _assign_when_rows_empty():
if len(labels.shape) > 1:
mask_shape = [match_indices.shape[0], labels.shape[-1]]
else:
mask_shape = [match_indices.shape[0]]
return tf.cast(mask_val, labels.dtype) * tf.ones(
mask_shape, dtype=labels.dtype)
def _assign_when_rows_not_empty():
targets = tf.gather(labels, match_indices)
if mask is None:
return targets
else:
masked_targets = tf.cast(mask_val, labels.dtype) * tf.ones_like(
mask, dtype=labels.dtype)
return tf.where(mask, masked_targets, targets)
return tf.cond(tf.greater(num_gt_boxes, 0),
_assign_when_rows_not_empty,
_assign_when_rows_empty)
def _gather_batched(self, labels, match_indices, mask, mask_val):
"""Gather based on batched labels."""
batch_size = labels.shape[0]
if batch_size == 1:
if mask is not None:
result = self._gather_unbatched(
tf.squeeze(labels, axis=0), tf.squeeze(match_indices, axis=0),
tf.squeeze(mask, axis=0), mask_val)
else:
result = self._gather_unbatched(
tf.squeeze(labels, axis=0), tf.squeeze(match_indices, axis=0),
None, mask_val)
return tf.expand_dims(result, axis=0)
else:
indices_shape = tf.shape(match_indices)
indices_dtype = match_indices.dtype
batch_indices = (tf.expand_dims(
tf.range(indices_shape[0], dtype=indices_dtype), axis=-1) *
tf.ones([1, indices_shape[-1]], dtype=indices_dtype))
gather_nd_indices = tf.stack(
[batch_indices, match_indices], axis=-1)
targets = tf.gather_nd(labels, gather_nd_indices)
if mask is None:
return targets
else:
masked_targets = tf.cast(mask_val, labels.dtype) * tf.ones_like(
mask, dtype=labels.dtype)
return tf.where(mask, masked_targets, targets)
| 3,955 | 37.038462 | 80 | py |
models | models-master/official/vision/ops/iou_similarity_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for iou_similarity.py."""
import tensorflow as tf
from official.vision.ops import iou_similarity
class BoxMatcherTest(tf.test.TestCase):
def test_similarity_unbatched(self):
boxes = tf.constant(
[
[0, 0, 1, 1],
[5, 0, 10, 5],
],
dtype=tf.float32)
gt_boxes = tf.constant(
[
[0, 0, 5, 5],
[0, 5, 5, 10],
[5, 0, 10, 5],
[5, 5, 10, 10],
],
dtype=tf.float32)
sim_calc = iou_similarity.IouSimilarity()
sim_matrix = sim_calc(boxes, gt_boxes)
self.assertAllClose(
sim_matrix.numpy(),
[[0.04, 0, 0, 0],
[0, 0, 1., 0]])
def test_similarity_batched(self):
boxes = tf.constant(
[[
[0, 0, 1, 1],
[5, 0, 10, 5],
]],
dtype=tf.float32)
gt_boxes = tf.constant(
[[
[0, 0, 5, 5],
[0, 5, 5, 10],
[5, 0, 10, 5],
[5, 5, 10, 10],
]],
dtype=tf.float32)
sim_calc = iou_similarity.IouSimilarity()
sim_matrix = sim_calc(boxes, gt_boxes)
self.assertAllClose(
sim_matrix.numpy(),
[[[0.04, 0, 0, 0],
[0, 0, 1., 0]]])
if __name__ == '__main__':
tf.test.main()
| 1,898 | 23.662338 | 74 | py |
models | models-master/official/vision/ops/preprocess_ops_3d.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for processing video dataset features."""
from typing import Optional, Tuple
import tensorflow as tf
def _sample_or_pad_sequence_indices(sequence: tf.Tensor, num_steps: int,
stride: int,
offset: tf.Tensor) -> tf.Tensor:
"""Returns indices to take for sampling or padding sequences to fixed size."""
sequence_length = tf.shape(sequence)[0]
sel_idx = tf.range(sequence_length)
# Repeats sequence until num_steps are available in total.
max_length = num_steps * stride + offset
num_repeats = tf.math.floordiv(max_length + sequence_length - 1,
sequence_length)
sel_idx = tf.tile(sel_idx, [num_repeats])
steps = tf.range(offset, offset + num_steps * stride, stride)
return tf.gather(sel_idx, steps)
def sample_linspace_sequence(sequence: tf.Tensor, num_windows: int,
num_steps: int, stride: int) -> tf.Tensor:
"""Samples `num_windows` segments from sequence with linearly spaced offsets.
The samples are concatenated in a single `tf.Tensor` in order to have the same
format structure per timestep (e.g. a single frame). If `num_steps` * `stride`
is bigger than the number of timesteps, the sequence is repeated. This
function can be used in evaluation in order to extract enough segments to span
the entire sequence.
Args:
sequence: Any tensor where the first dimension is timesteps.
num_windows: Number of windows retrieved from the sequence.
num_steps: Number of steps (e.g. frames) to take.
stride: Distance to sample between timesteps.
Returns:
A single `tf.Tensor` with first dimension `num_windows` * `num_steps`. The
tensor contains the concatenated list of `num_windows` tensors which offsets
have been linearly spaced from input.
"""
sequence_length = tf.shape(sequence)[0]
max_offset = tf.maximum(0, sequence_length - num_steps * stride)
offsets = tf.linspace(0.0, tf.cast(max_offset, tf.float32), num_windows)
offsets = tf.cast(offsets, tf.int32)
all_indices = []
for i in range(num_windows):
all_indices.append(
_sample_or_pad_sequence_indices(
sequence=sequence,
num_steps=num_steps,
stride=stride,
offset=offsets[i]))
indices = tf.concat(all_indices, axis=0)
indices.set_shape((num_windows * num_steps,))
return tf.gather(sequence, indices)
def sample_sequence(sequence: tf.Tensor,
num_steps: int,
random: bool,
stride: int,
seed: Optional[int] = None) -> tf.Tensor:
"""Samples a single segment of size `num_steps` from a given sequence.
If `random` is not `True`, this function will simply sample the central window
of the sequence. Otherwise, a random offset will be chosen in a way that the
desired `num_steps` might be extracted from the sequence.
Args:
sequence: Any tensor where the first dimension is timesteps.
num_steps: Number of steps (e.g. frames) to take.
random: A boolean indicating whether to random sample the single window. If
`True`, the offset is randomized. If `False`, the middle frame minus half
of `num_steps` is the first frame.
stride: Distance to sample between timesteps.
seed: A deterministic seed to use when sampling.
Returns:
A single `tf.Tensor` with first dimension `num_steps` with the sampled
segment.
"""
sequence_length = tf.shape(sequence)[0]
if random:
sequence_length = tf.cast(sequence_length, tf.float32)
frame_stride = tf.cast(stride, tf.float32)
max_offset = tf.cond(
sequence_length > (num_steps - 1) * frame_stride,
lambda: sequence_length - (num_steps - 1) * frame_stride,
lambda: sequence_length)
offset = tf.random.uniform((),
maxval=tf.cast(max_offset, dtype=tf.int32),
dtype=tf.int32,
seed=seed)
else:
offset = (sequence_length - num_steps * stride) // 2
offset = tf.maximum(0, offset)
indices = _sample_or_pad_sequence_indices(
sequence=sequence, num_steps=num_steps, stride=stride, offset=offset)
indices.set_shape((num_steps,))
return tf.gather(sequence, indices)
def sample_segment_sequence(sequence: tf.Tensor,
num_frames: int,
is_training: bool,
seed: Optional[int] = None) -> tf.Tensor:
"""Samples a single segment of size `num_frames` from a given sequence.
This function follows the temporal segment network sampling style
(https://arxiv.org/abs/1608.00859). The video sequence would be divided into
`num_frames` non-overlapping segments with same length. If `is_training` is
`True`, we would randomly sampling one frame for each segment, and when
`is_training` is `False`, only the center frame of each segment is sampled.
Args:
sequence: Any tensor where the first dimension is timesteps.
num_frames: Number of frames to take.
is_training: A boolean indicating sampling in training or evaluation mode.
seed: A deterministic seed to use when sampling.
Returns:
A single `tf.Tensor` with first dimension `num_steps` with the sampled
segment.
"""
sequence_length = tf.shape(sequence)[0]
sequence_length = tf.cast(sequence_length, tf.float32)
segment_length = tf.cast(sequence_length // num_frames, tf.float32)
segment_indices = tf.linspace(0.0, sequence_length, num_frames + 1)
segment_indices = tf.cast(segment_indices, tf.int32)
if is_training:
segment_length = tf.cast(segment_length, tf.int32)
# pylint:disable=g-long-lambda
segment_offsets = tf.cond(
segment_length == 0,
lambda: tf.zeros(shape=(num_frames,), dtype=tf.int32),
lambda: tf.random.uniform(
shape=(num_frames,),
minval=0,
maxval=segment_length,
dtype=tf.int32,
seed=seed))
# pylint:disable=g-long-lambda
else:
# Only sampling central frame during inference for being deterministic.
segment_offsets = tf.ones(
shape=(num_frames,), dtype=tf.int32) * tf.cast(
segment_length // 2, dtype=tf.int32)
indices = segment_indices[:-1] + segment_offsets
indices.set_shape((num_frames,))
return tf.gather(sequence, indices)
def decode_jpeg(image_string: tf.Tensor, channels: int = 0) -> tf.Tensor:
"""Decodes JPEG raw bytes string into a RGB uint8 Tensor.
Args:
image_string: A `tf.Tensor` of type strings with the raw JPEG bytes where
the first dimension is timesteps.
channels: Number of channels of the JPEG image. Allowed values are 0, 1 and
3. If 0, the number of channels will be calculated at runtime and no
static shape is set.
Returns:
A Tensor of shape [T, H, W, C] of type uint8 with the decoded images.
"""
return tf.map_fn(
lambda x: tf.image.decode_jpeg(x, channels=channels),
image_string,
back_prop=False,
dtype=tf.uint8)
def crop_image(frames: tf.Tensor,
target_height: int,
target_width: int,
random: bool = False,
num_crops: int = 1,
seed: Optional[int] = None) -> tf.Tensor:
"""Crops the image sequence of images.
If requested size is bigger than image size, image is padded with 0. If not
random cropping, a central crop is performed if num_crops is 1.
Args:
frames: A Tensor of dimension [timesteps, in_height, in_width, channels].
target_height: Target cropped image height.
target_width: Target cropped image width.
random: A boolean indicating if crop should be randomized.
num_crops: Number of crops (support 1 for central crop and 3 for 3-crop).
seed: A deterministic seed to use when random cropping.
Returns:
A Tensor of shape [timesteps, out_height, out_width, channels] of type uint8
with the cropped images.
"""
if random:
# Random spatial crop.
shape = tf.shape(frames)
# If a static_shape is available (e.g. when using this method from add_image
# method), it will be used to have an output tensor with static shape.
static_shape = frames.shape.as_list()
seq_len = shape[0] if static_shape[0] is None else static_shape[0]
channels = shape[3] if static_shape[3] is None else static_shape[3]
frames = tf.image.random_crop(
frames, (seq_len, target_height, target_width, channels), seed)
else:
if num_crops == 1:
# Central crop or pad.
frames = tf.image.resize_with_crop_or_pad(frames, target_height,
target_width)
elif num_crops == 3:
# Three-crop evaluation.
shape = tf.shape(frames)
static_shape = frames.shape.as_list()
seq_len = shape[0] if static_shape[0] is None else static_shape[0]
height = shape[1] if static_shape[1] is None else static_shape[1]
width = shape[2] if static_shape[2] is None else static_shape[2]
channels = shape[3] if static_shape[3] is None else static_shape[3]
size = tf.convert_to_tensor(
(seq_len, target_height, target_width, channels))
offset_1 = tf.broadcast_to([0, 0, 0, 0], [4])
# pylint:disable=g-long-lambda
offset_2 = tf.cond(
tf.greater_equal(height, width),
true_fn=lambda: tf.broadcast_to([
0, tf.cast(height, tf.float32) / 2 - target_height // 2, 0, 0
], [4]),
false_fn=lambda: tf.broadcast_to([
0, 0, tf.cast(width, tf.float32) / 2 - target_width // 2, 0
], [4]))
offset_3 = tf.cond(
tf.greater_equal(height, width),
true_fn=lambda: tf.broadcast_to(
[0, tf.cast(height, tf.float32) - target_height, 0, 0], [4]),
false_fn=lambda: tf.broadcast_to(
[0, 0, tf.cast(width, tf.float32) - target_width, 0], [4]))
# pylint:disable=g-long-lambda
crops = []
for offset in [offset_1, offset_2, offset_3]:
offset = tf.cast(tf.math.round(offset), tf.int32)
crops.append(tf.slice(frames, offset, size))
frames = tf.concat(crops, axis=0)
else:
raise NotImplementedError(
f"Only 1-crop and 3-crop are supported. Found {num_crops!r}.")
return frames
def resize_smallest(frames: tf.Tensor, min_resize: int) -> tf.Tensor:
"""Resizes frames so that min(`height`, `width`) is equal to `min_resize`.
This function will not do anything if the min(`height`, `width`) is already
equal to `min_resize`. This allows to save compute time.
Args:
frames: A Tensor of dimension [timesteps, input_h, input_w, channels].
min_resize: Minimum size of the final image dimensions.
Returns:
A Tensor of shape [timesteps, output_h, output_w, channels] of type
frames.dtype where min(output_h, output_w) = min_resize.
"""
shape = tf.shape(frames)
input_h = shape[1]
input_w = shape[2]
output_h = tf.maximum(min_resize, (input_h * min_resize) // input_w)
output_w = tf.maximum(min_resize, (input_w * min_resize) // input_h)
def resize_fn():
frames_resized = tf.image.resize(frames, (output_h, output_w))
return tf.cast(frames_resized, frames.dtype)
should_resize = tf.math.logical_or(
tf.not_equal(input_w, output_w), tf.not_equal(input_h, output_h))
frames = tf.cond(should_resize, resize_fn, lambda: frames)
return frames
def random_crop_resize(frames: tf.Tensor, output_h: int, output_w: int,
num_frames: int, num_channels: int,
aspect_ratio: Tuple[float, float],
area_range: Tuple[float, float]) -> tf.Tensor:
"""First crops clip with jittering and then resizes to (output_h, output_w).
Args:
frames: A Tensor of dimension [timesteps, input_h, input_w, channels].
output_h: Resized image height.
output_w: Resized image width.
num_frames: Number of input frames per clip.
num_channels: Number of channels of the clip.
aspect_ratio: Float tuple with the aspect range for cropping.
area_range: Float tuple with the area range for cropping.
Returns:
A Tensor of shape [timesteps, output_h, output_w, channels] of type
frames.dtype.
"""
shape = tf.shape(frames)
seq_len, _, _, channels = shape[0], shape[1], shape[2], shape[3]
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
factor = output_w / output_h
aspect_ratio = (aspect_ratio[0] * factor, aspect_ratio[1] * factor)
sample_distorted_bbox = tf.image.sample_distorted_bounding_box(
shape[1:],
bounding_boxes=bbox,
min_object_covered=0.1,
aspect_ratio_range=aspect_ratio,
area_range=area_range,
max_attempts=100,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bbox
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
size = tf.convert_to_tensor((seq_len, target_height, target_width, channels))
offset = tf.convert_to_tensor((0, offset_y, offset_x, 0))
frames = tf.slice(frames, offset, size)
frames = tf.cast(tf.image.resize(frames, (output_h, output_w)), frames.dtype)
frames.set_shape((num_frames, output_h, output_w, num_channels))
return frames
def random_flip_left_right(frames: tf.Tensor,
seed: Optional[int] = None) -> tf.Tensor:
"""Flips all the frames with a probability of 50%.
Args:
frames: A Tensor of shape [timesteps, input_h, input_w, channels].
seed: A seed to use for the random sampling.
Returns:
A Tensor of shape [timesteps, output_h, output_w, channels] eventually
flipped left right.
"""
is_flipped = tf.random.uniform((),
minval=0,
maxval=2,
dtype=tf.int32,
seed=seed)
frames = tf.cond(
tf.equal(is_flipped, 1),
true_fn=lambda: tf.image.flip_left_right(frames),
false_fn=lambda: frames)
return frames
def normalize_image(frames: tf.Tensor,
zero_centering_image: bool,
dtype: tf.dtypes.DType = tf.float32) -> tf.Tensor:
"""Normalizes images.
Args:
frames: A Tensor of numbers.
zero_centering_image: If True, results are in [-1, 1], if False, results are
in [0, 1].
dtype: Type of output Tensor.
Returns:
A Tensor of same shape as the input and of the given type.
"""
frames = tf.cast(frames, dtype)
if zero_centering_image:
return frames * (2.0 / 255.0) - 1.0
else:
return frames / 255.0
| 15,378 | 37.256219 | 80 | py |
models | models-master/official/vision/ops/preprocess_ops_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for preprocess_ops.py."""
import io
# Import libraries
from absl.testing import parameterized
import numpy as np
from PIL import Image
import tensorflow as tf
from official.vision.ops import preprocess_ops
def _encode_image(image_array, fmt):
image = Image.fromarray(image_array)
with io.BytesIO() as output:
image.save(output, format=fmt)
return output.getvalue()
class InputUtilsTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
([1], 10),
([1, 2], 10),
([1, 2, 3], 10),
([11], 10),
([12, 2], 10),
([13, 2, 3], 10),
)
def test_pad_to_fixed_size(self, input_shape, output_size):
# Copies input shape to padding shape.
clip_shape = input_shape[:]
clip_shape[0] = min(output_size, clip_shape[0])
padding_shape = input_shape[:]
padding_shape[0] = max(output_size - input_shape[0], 0)
expected_outputs = np.concatenate(
[np.ones(clip_shape), np.zeros(padding_shape)], axis=0)
data = tf.ones(input_shape)
output_data = preprocess_ops.clip_or_pad_to_fixed_size(
data, output_size, constant_values=0)
output_data = output_data.numpy()
self.assertAllClose(output_size, output_data.shape[0])
self.assertAllClose(expected_outputs, output_data)
@parameterized.parameters(
(100, 200, 100, 200, 32, 1.0, 1.0, 128, 224),
(100, 256, 128, 256, 32, 1.0, 1.0, 128, 256),
(200, 512, 200, 128, 32, 0.25, 0.25, 224, 128),
)
def test_resize_and_crop_image_rectangluar_case(self, input_height,
input_width, desired_height,
desired_width, stride,
scale_y, scale_x,
output_height, output_width):
image = tf.convert_to_tensor(
np.random.rand(input_height, input_width, 3))
desired_size = (desired_height, desired_width)
resized_image, image_info = preprocess_ops.resize_and_crop_image(
image,
desired_size=desired_size,
padded_size=preprocess_ops.compute_padded_size(desired_size, stride))
resized_image_shape = tf.shape(resized_image)
self.assertAllEqual(
[output_height, output_width, 3],
resized_image_shape.numpy())
self.assertNDArrayNear(
[[input_height, input_width],
[desired_height, desired_width],
[scale_y, scale_x],
[0.0, 0.0]],
image_info.numpy(),
1e-5)
@parameterized.parameters(
(100, 200, 220, 220, 32, 1.1, 1.1, 224, 224),
(512, 512, 1024, 1024, 32, 2.0, 2.0, 1024, 1024),
)
def test_resize_and_crop_image_square_case(self, input_height, input_width,
desired_height, desired_width,
stride, scale_y, scale_x,
output_height, output_width):
image = tf.convert_to_tensor(
np.random.rand(input_height, input_width, 3))
desired_size = (desired_height, desired_width)
resized_image, image_info = preprocess_ops.resize_and_crop_image(
image,
desired_size=desired_size,
padded_size=preprocess_ops.compute_padded_size(desired_size, stride))
resized_image_shape = tf.shape(resized_image)
self.assertAllEqual(
[output_height, output_width, 3],
resized_image_shape.numpy())
self.assertNDArrayNear(
[[input_height, input_width],
[desired_height, desired_width],
[scale_y, scale_x],
[0.0, 0.0]],
image_info.numpy(),
1e-5)
@parameterized.parameters((1,), (2,))
def test_resize_and_crop_image_tensor_desired_size(self, aug_scale_max):
image = tf.convert_to_tensor(np.random.rand(100, 200, 3))
desired_size = tf.convert_to_tensor((220, 220), dtype=tf.int32)
resized_image, image_info = preprocess_ops.resize_and_crop_image(
image,
desired_size=desired_size,
padded_size=preprocess_ops.compute_padded_size(desired_size, 32),
aug_scale_max=aug_scale_max)
resized_image_shape = tf.shape(resized_image)
self.assertAllEqual([224, 224, 3], resized_image_shape.numpy())
self.assertAllEqual([[100, 200], [220, 220]], image_info[:2].numpy())
if aug_scale_max == 1: # No random jittering.
self.assertNDArrayNear(
[[1.1, 1.1], [0.0, 0.0]],
image_info[2:].numpy(),
1e-5,
)
@parameterized.parameters(
(100, 200, 100, 300, 32, 1.0, 1.0, 100, 200, 128, 320),
(200, 100, 100, 300, 32, 1.0, 1.0, 200, 100, 320, 128),
(100, 200, 80, 100, 32, 0.5, 0.5, 50, 100, 96, 128),
(200, 100, 80, 100, 32, 0.5, 0.5, 100, 50, 128, 96),
)
def test_resize_and_crop_image_v2(self, input_height, input_width, short_side,
long_side, stride, scale_y, scale_x,
desired_height, desired_width,
output_height, output_width):
image = tf.convert_to_tensor(
np.random.rand(input_height, input_width, 3))
image_shape = tf.shape(image)[0:2]
desired_size = tf.where(
tf.greater(image_shape[0], image_shape[1]),
tf.constant([long_side, short_side], dtype=tf.int32),
tf.constant([short_side, long_side], dtype=tf.int32))
resized_image, image_info = preprocess_ops.resize_and_crop_image_v2(
image,
short_side=short_side,
long_side=long_side,
padded_size=preprocess_ops.compute_padded_size(desired_size, stride))
resized_image_shape = tf.shape(resized_image)
self.assertAllEqual(
[output_height, output_width, 3],
resized_image_shape.numpy())
self.assertNDArrayNear(
[[input_height, input_width],
[desired_height, desired_width],
[scale_y, scale_x],
[0.0, 0.0]],
image_info.numpy(),
1e-5)
@parameterized.parameters(
(400, 600), (600, 400),
)
def test_center_crop_image(self, input_height, input_width):
image = tf.convert_to_tensor(
np.random.rand(input_height, input_width, 3))
cropped_image = preprocess_ops.center_crop_image(image)
cropped_image_shape = tf.shape(cropped_image)
self.assertAllEqual([350, 350, 3], cropped_image_shape.numpy())
@parameterized.parameters(
(400, 600), (600, 400),
)
def test_center_crop_image_v2(self, input_height, input_width):
image_bytes = tf.constant(
_encode_image(
np.uint8(np.random.rand(input_height, input_width, 3) * 255),
fmt='JPEG'),
dtype=tf.string)
cropped_image = preprocess_ops.center_crop_image_v2(
image_bytes, tf.constant([input_height, input_width, 3], tf.int32))
cropped_image_shape = tf.shape(cropped_image)
self.assertAllEqual([350, 350, 3], cropped_image_shape.numpy())
@parameterized.parameters(
(400, 600), (600, 400),
)
def test_random_crop_image(self, input_height, input_width):
image = tf.convert_to_tensor(
np.random.rand(input_height, input_width, 3))
_ = preprocess_ops.random_crop_image(image)
@parameterized.parameters(
(400, 600), (600, 400),
)
def test_random_crop_image_v2(self, input_height, input_width):
image_bytes = tf.constant(
_encode_image(
np.uint8(np.random.rand(input_height, input_width, 3) * 255),
fmt='JPEG'),
dtype=tf.string)
_ = preprocess_ops.random_crop_image_v2(
image_bytes, tf.constant([input_height, input_width, 3], tf.int32))
@parameterized.parameters((400, 600, 0), (400, 600, 0.4), (600, 400, 1.4))
def testColorJitter(self, input_height, input_width, color_jitter):
image = tf.convert_to_tensor(np.random.rand(input_height, input_width, 3))
jittered_image = preprocess_ops.color_jitter(image, color_jitter,
color_jitter, color_jitter)
assert jittered_image.shape == image.shape
@parameterized.parameters((400, 600, 0), (400, 600, 0.4), (600, 400, 1))
def testSaturation(self, input_height, input_width, saturation):
image = tf.convert_to_tensor(np.random.rand(input_height, input_width, 3))
jittered_image = preprocess_ops._saturation(image, saturation)
assert jittered_image.shape == image.shape
@parameterized.parameters((640, 640, 20), (1280, 1280, 30))
def test_random_crop(self, input_height, input_width, num_boxes):
image = tf.convert_to_tensor(np.random.rand(input_height, input_width, 3))
boxes_height = np.random.randint(0, input_height, size=(num_boxes, 1))
top = np.random.randint(0, high=(input_height - boxes_height))
down = top + boxes_height
boxes_width = np.random.randint(0, input_width, size=(num_boxes, 1))
left = np.random.randint(0, high=(input_width - boxes_width))
right = left + boxes_width
boxes = tf.constant(
np.concatenate([top, left, down, right], axis=-1), tf.float32)
labels = tf.constant(
np.random.randint(low=0, high=num_boxes, size=(num_boxes,)), tf.int64)
_ = preprocess_ops.random_crop(image, boxes, labels)
@parameterized.parameters(
((640, 640, 3), (1000, 1000), None, (1000, 1000, 3)),
((1280, 640, 3), 320, None, (640, 320, 3)),
((640, 1280, 3), 320, None, (320, 640, 3)),
((640, 640, 3), 320, 100, (100, 100, 3)))
def test_resize_image(self, input_shape, size, max_size, expected_shape):
resized_img, image_info = preprocess_ops.resize_image(
tf.zeros((input_shape)), size, max_size)
self.assertAllEqual(tf.shape(resized_img), expected_shape)
self.assertAllEqual(image_info[0], input_shape[:-1])
self.assertAllEqual(image_info[1], expected_shape[:-1])
self.assertAllEqual(
image_info[2],
np.array(expected_shape[:-1]) / np.array(input_shape[:-1]))
self.assertAllEqual(image_info[3], [0, 0])
def test_resize_and_crop_masks(self):
# shape: (2, 1, 4, 3)
masks = tf.constant([[[
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11],
]], [[
[12, 13, 14],
[15, 16, 17],
[18, 19, 20],
[21, 22, 23],
]]])
output = preprocess_ops.resize_and_crop_masks(
masks, image_scale=[2.0, 0.5], output_size=[2, 3], offset=[1, 0])
# shape: (2, 2, 3, 3)
expected_output = tf.constant([
[
[
[3, 4, 5],
[9, 10, 11],
[0, 0, 0],
],
[
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
],
],
[
[
[15, 16, 17],
[21, 22, 23],
[0, 0, 0],
],
[
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
],
],
])
self.assertAllEqual(expected_output, output)
@parameterized.parameters(
(100, 200, 1.0, 224, 224, 224, 224),
(512, 512, 1.0, 1024, 1024, 1024, 1024),
)
def test_deit3_resize_center_crop(
self, input_height, input_width, center_crop_fraction,
desired_height, desired_width,
output_height, output_width):
# Make sure that with center_crop_ratio = 1; result has desired resolution.
image = tf.convert_to_tensor(
np.random.rand(input_height, input_width, 3))
desired_size = (desired_height, desired_width)
center_cropped = preprocess_ops.center_crop_image(
image,
center_crop_fraction=center_crop_fraction)
resized_image = tf.image.resize(
center_cropped, desired_size, method=tf.image.ResizeMethod.BICUBIC)
resized_image_shape = tf.shape(resized_image)
self.assertAllEqual(
[output_height, output_width, 3],
resized_image_shape.numpy())
if __name__ == '__main__':
tf.test.main()
| 12,556 | 36.372024 | 80 | py |
models | models-master/official/vision/ops/iou_similarity.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Region Similarity Calculators."""
import tensorflow as tf
def area(box):
"""Computes area of boxes.
B: batch_size
N: number of boxes
Args:
box: a float Tensor with [N, 4], or [B, N, 4].
Returns:
a float Tensor with [N], or [B, N]
"""
with tf.name_scope('Area'):
y_min, x_min, y_max, x_max = tf.split(
value=box, num_or_size_splits=4, axis=-1)
return tf.squeeze((y_max - y_min) * (x_max - x_min), axis=-1)
def intersection(gt_boxes, boxes):
"""Compute pairwise intersection areas between boxes.
B: batch_size
N: number of groundtruth boxes.
M: number of anchor boxes.
Args:
gt_boxes: a float Tensor with [N, 4], or [B, N, 4]
boxes: a float Tensor with [M, 4], or [B, M, 4]
Returns:
a float Tensor with shape [N, M] or [B, N, M] representing pairwise
intersections.
"""
with tf.name_scope('Intersection'):
y_min1, x_min1, y_max1, x_max1 = tf.split(
value=gt_boxes, num_or_size_splits=4, axis=-1)
y_min2, x_min2, y_max2, x_max2 = tf.split(
value=boxes, num_or_size_splits=4, axis=-1)
boxes_rank = len(boxes.shape)
perm = [1, 0] if boxes_rank == 2 else [0, 2, 1]
# [N, M] or [B, N, M]
y_min_max = tf.minimum(y_max1, tf.transpose(y_max2, perm))
y_max_min = tf.maximum(y_min1, tf.transpose(y_min2, perm))
x_min_max = tf.minimum(x_max1, tf.transpose(x_max2, perm))
x_max_min = tf.maximum(x_min1, tf.transpose(x_min2, perm))
intersect_heights = y_min_max - y_max_min
intersect_widths = x_min_max - x_max_min
zeros_t = tf.cast(0, intersect_heights.dtype)
intersect_heights = tf.maximum(zeros_t, intersect_heights)
intersect_widths = tf.maximum(zeros_t, intersect_widths)
return intersect_heights * intersect_widths
def iou(gt_boxes, boxes):
"""Computes pairwise intersection-over-union between box collections.
Args:
gt_boxes: a float Tensor with [N, 4].
boxes: a float Tensor with [M, 4].
Returns:
a Tensor with shape [N, M] representing pairwise iou scores.
"""
with tf.name_scope('IOU'):
intersections = intersection(gt_boxes, boxes)
gt_boxes_areas = area(gt_boxes)
boxes_areas = area(boxes)
boxes_rank = len(boxes_areas.shape)
boxes_axis = 1 if (boxes_rank == 2) else 0
gt_boxes_areas = tf.expand_dims(gt_boxes_areas, -1)
boxes_areas = tf.expand_dims(boxes_areas, boxes_axis)
unions = gt_boxes_areas + boxes_areas
unions = unions - intersections
return tf.where(
tf.equal(intersections, 0.0), tf.zeros_like(intersections),
tf.truediv(intersections, unions))
class IouSimilarity:
"""Class to compute similarity based on Intersection over Union (IOU) metric.
"""
def __init__(self, mask_val=-1):
self.mask_val = mask_val
def __call__(self, boxes_1, boxes_2, boxes_1_masks=None, boxes_2_masks=None):
"""Compute pairwise IOU similarity between ground truth boxes and anchors.
B: batch_size
N: Number of groundtruth boxes.
M: Number of anchor boxes.
Args:
boxes_1: a float Tensor with M or B * M boxes.
boxes_2: a float Tensor with N or B * N boxes, the rank must be less than
or equal to rank of `boxes_1`.
boxes_1_masks: a boolean Tensor with M or B * M boxes. Optional.
boxes_2_masks: a boolean Tensor with N or B * N boxes. Optional.
Returns:
A Tensor with shape [M, N] or [B, M, N] representing pairwise
iou scores, anchor per row and groundtruth_box per colulmn.
Input shape:
boxes_1: [N, 4], or [B, N, 4]
boxes_2: [M, 4], or [B, M, 4]
boxes_1_masks: [N, 1], or [B, N, 1]
boxes_2_masks: [M, 1], or [B, M, 1]
Output shape:
[M, N], or [B, M, N]
"""
boxes_1 = tf.cast(boxes_1, tf.float32)
boxes_2 = tf.cast(boxes_2, tf.float32)
boxes_1_rank = len(boxes_1.shape)
boxes_2_rank = len(boxes_2.shape)
if boxes_1_rank < 2 or boxes_1_rank > 3:
raise ValueError(
'`groudtruth_boxes` must be rank 2 or 3, got {}'.format(boxes_1_rank))
if boxes_2_rank < 2 or boxes_2_rank > 3:
raise ValueError(
'`anchors` must be rank 2 or 3, got {}'.format(boxes_2_rank))
if boxes_1_rank < boxes_2_rank:
raise ValueError('`groundtruth_boxes` is unbatched while `anchors` is '
'batched is not a valid use case, got groundtruth_box '
'rank {}, and anchors rank {}'.format(
boxes_1_rank, boxes_2_rank))
result = iou(boxes_1, boxes_2)
if boxes_1_masks is None and boxes_2_masks is None:
return result
background_mask = None
mask_val_t = tf.cast(self.mask_val, result.dtype) * tf.ones_like(result)
perm = [1, 0] if boxes_2_rank == 2 else [0, 2, 1]
if boxes_1_masks is not None and boxes_2_masks is not None:
background_mask = tf.logical_or(boxes_1_masks,
tf.transpose(boxes_2_masks, perm))
elif boxes_1_masks is not None:
background_mask = boxes_1_masks
else:
background_mask = tf.logical_or(
tf.zeros(tf.shape(boxes_2)[:-1], dtype=tf.bool),
tf.transpose(boxes_2_masks, perm))
return tf.where(background_mask, mask_val_t, result)
| 5,841 | 33.77381 | 80 | py |
models | models-master/official/vision/ops/anchor_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for anchor.py."""
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.vision.ops import anchor
class AnchorTest(parameterized.TestCase, tf.test.TestCase):
# The set of parameters are tailored for the MLPerf configuration, where
# the number of anchors is 495132, rpn_batch_size_per_im=256, and
# rpn_fg_fraction=0.5.
@parameterized.parameters(
(512, 25, 25, 25, 25, (512, 512)),
(512, 25, 25, 25, 25, (512, 640)),
(512, 25, 25, 25, 25, (640, 512)),
(495132, 100, 100, 100, 100, (512, 512)),
(495132, 200, 100, 128, 100, (512, 512)),
(495132, 100, 120, 100, 120, (512, 512)),
(495132, 100, 200, 100, 156, (512, 512)),
(495132, 200, 200, 128, 128, (512, 512)),
)
def testAnchorRpnSample(self, num_anchors, num_positives,
num_negatives, expected_positives,
expected_negatives, image_size):
match_results_np = np.empty([num_anchors])
match_results_np.fill(-2)
match_results_np[:num_positives] = 0
match_results_np[num_positives:num_positives + num_negatives] = -1
match_results = tf.convert_to_tensor(value=match_results_np, dtype=tf.int32)
anchor_labeler = anchor.RpnAnchorLabeler(
match_threshold=0.7,
unmatched_threshold=0.3,
rpn_batch_size_per_im=256,
rpn_fg_fraction=0.5)
rpn_sample_op = anchor_labeler._get_rpn_samples(match_results)
labels = [v.numpy() for v in rpn_sample_op]
self.assertLen(labels[0], num_anchors)
positives = np.sum(np.array(labels[0]) == 1)
negatives = np.sum(np.array(labels[0]) == 0)
self.assertEqual(positives, expected_positives)
self.assertEqual(negatives, expected_negatives)
@parameterized.parameters(
# Single scale anchor.
(5, 5, 1, [1.0], 2.0,
[[-16, -16, 48, 48], [-16, 16, 48, 80],
[16, -16, 80, 48], [16, 16, 80, 80]]),
# Multi scale anchor.
(5, 6, 1, [1.0], 2.0,
[[-16, -16, 48, 48], [-16, 16, 48, 80],
[16, -16, 80, 48], [16, 16, 80, 80], [-32, -32, 96, 96]]),
# # Multi aspect ratio anchor.
(6, 6, 1, [1.0, 4.0, 0.25], 2.0,
[[-32, -32, 96, 96], [-0, -96, 64, 160], [-96, -0, 160, 64]]),
)
def testAnchorGeneration(self, min_level, max_level, num_scales,
aspect_ratios, anchor_size, expected_boxes):
image_size = [64, 64]
anchors = anchor.Anchor(min_level, max_level, num_scales, aspect_ratios,
anchor_size, image_size)
boxes = anchors.boxes.numpy()
self.assertEqual(expected_boxes, boxes.tolist())
@parameterized.parameters(
# Single scale anchor.
(5, 5, 1, [1.0], 2.0,
[[-16, -16, 48, 48], [-16, 16, 48, 80],
[16, -16, 80, 48], [16, 16, 80, 80]]),
# Multi scale anchor.
(5, 6, 1, [1.0], 2.0,
[[-16, -16, 48, 48], [-16, 16, 48, 80],
[16, -16, 80, 48], [16, 16, 80, 80], [-32, -32, 96, 96]]),
# # Multi aspect ratio anchor.
(6, 6, 1, [1.0, 4.0, 0.25], 2.0,
[[-32, -32, 96, 96], [-0, -96, 64, 160], [-96, -0, 160, 64]]),
)
def testAnchorGenerationWithImageSizeAsTensor(self,
min_level,
max_level,
num_scales,
aspect_ratios,
anchor_size,
expected_boxes):
image_size = tf.constant([64, 64], tf.int32)
anchors = anchor.Anchor(min_level, max_level, num_scales, aspect_ratios,
anchor_size, image_size)
boxes = anchors.boxes.numpy()
self.assertEqual(expected_boxes, boxes.tolist())
@parameterized.parameters(
(3, 6, 2, [1.0], 2.0, False),
(3, 6, 2, [1.0], 2.0, True),
)
def testLabelAnchors(self, min_level, max_level, num_scales, aspect_ratios,
anchor_size, has_attribute):
input_size = [512, 512]
ground_truth_class_id = 2
attribute_name = 'depth'
ground_truth_depth = 3.0
# The matched anchors are the anchors used as ground truth and the anchors
# at the next octave scale on the same location.
expected_anchor_locations = [[0, 0, 0], [0, 0, 1]]
anchor_gen = anchor.build_anchor_generator(min_level, max_level, num_scales,
aspect_ratios, anchor_size)
anchor_boxes = anchor_gen(input_size)
anchor_labeler = anchor.AnchorLabeler()
# Uses the first anchors as ground truth. The ground truth should map to
# two anchors with two intermediate scales at the same location.
gt_boxes = anchor_boxes['3'][0:1, 0, 0:4]
gt_classes = tf.constant([[ground_truth_class_id]], dtype=tf.float32)
gt_attributes = {
attribute_name: tf.constant([[ground_truth_depth]], dtype=tf.float32)
} if has_attribute else {}
(cls_targets, box_targets, att_targets, _,
box_weights) = anchor_labeler.label_anchors(anchor_boxes, gt_boxes,
gt_classes, gt_attributes)
for k, v in cls_targets.items():
cls_targets[k] = v.numpy()
for k, v in box_targets.items():
box_targets[k] = v.numpy()
box_weights = box_weights.numpy()
anchor_locations = np.vstack(
np.where(cls_targets[str(min_level)] > -1)).transpose()
self.assertAllClose(expected_anchor_locations, anchor_locations)
# Two anchor boxes on min_level got matched to the gt_boxes.
self.assertAllClose(tf.reduce_sum(box_weights), 2)
if has_attribute:
self.assertIn(attribute_name, att_targets)
for k, v in att_targets[attribute_name].items():
att_targets[attribute_name][k] = v.numpy()
anchor_locations = np.vstack(
np.where(
att_targets[attribute_name][str(min_level)] > 0.0)).transpose()
self.assertAllClose(expected_anchor_locations, anchor_locations)
else:
self.assertEmpty(att_targets)
@parameterized.parameters(
(3, 7, [.5, 1., 2.], 2, 8, (256, 256)),
(3, 8, [1.], 3, 32, (512, 512)),
(3, 3, [1.], 2, 4, (32, 32)),
)
def testEquivalentResult(self, min_level, max_level, aspect_ratios,
num_scales, anchor_size, image_size):
anchor_gen = anchor.build_anchor_generator(
min_level=min_level,
max_level=max_level,
num_scales=num_scales,
aspect_ratios=aspect_ratios,
anchor_size=anchor_size)
anchors = anchor_gen(image_size)
expected_anchor_gen = anchor.Anchor(min_level, max_level, num_scales,
aspect_ratios, anchor_size, image_size)
expected_anchors = expected_anchor_gen.multilevel_boxes
for k in expected_anchors.keys():
self.assertAllClose(expected_anchors[k], anchors[k])
if __name__ == '__main__':
tf.test.main()
| 7,623 | 39.770053 | 80 | py |
models | models-master/official/vision/ops/spatial_transform_ops.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Spatial transform ops."""
from typing import Dict, Tuple
import numpy as np
import tensorflow as tf
from official.vision.ops.box_ops import bbox2mask
_EPSILON = 1e-8
def _feature_bilinear_interpolation(features: tf.Tensor, kernel_y: tf.Tensor,
kernel_x: tf.Tensor) -> tf.Tensor:
"""Feature bilinear interpolation.
The RoIAlign feature f can be computed by bilinear interpolation
of four neighboring feature points f0, f1, f2, and f3.
f(y, x) = [hy, ly] * [[f00, f01], * [hx, lx]^T
[f10, f11]]
f(y, x) = (hy*hx)f00 + (hy*lx)f01 + (ly*hx)f10 + (lx*ly)f11
f(y, x) = w00*f00 + w01*f01 + w10*f10 + w11*f11
kernel_y = [hy, ly]
kernel_x = [hx, lx]
Args:
features: The features are in shape of [batch_size, num_boxes, output_size *
2, output_size * 2, num_filters].
kernel_y: Tensor of size [batch_size, boxes, output_size, 2, 1].
kernel_x: Tensor of size [batch_size, boxes, output_size, 2, 1].
Returns:
A 5-D tensor representing feature crop of shape
[batch_size, num_boxes, output_size, output_size, num_filters].
"""
features_shape = tf.shape(features)
batch_size, num_boxes, output_size, num_filters = (
features_shape[0], features_shape[1], features_shape[2],
features_shape[4])
output_size = output_size // 2
kernel_y = tf.reshape(kernel_y, [batch_size, num_boxes, output_size * 2, 1])
kernel_x = tf.reshape(kernel_x, [batch_size, num_boxes, 1, output_size * 2])
# Use implicit broadcast to generate the interpolation kernel. The
# multiplier `4` is for avg pooling.
interpolation_kernel = kernel_y * kernel_x * 4
# Interpolate the gathered features with computed interpolation kernels.
features *= tf.cast(
tf.expand_dims(interpolation_kernel, axis=-1), dtype=features.dtype)
features = tf.reshape(
features,
[batch_size * num_boxes, output_size * 2, output_size * 2, num_filters])
features = tf.nn.avg_pool(features, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')
features = tf.reshape(
features, [batch_size, num_boxes, output_size, output_size, num_filters])
return features
def _compute_grid_positions(
boxes: tf.Tensor, boundaries: tf.Tensor, output_size: int,
sample_offset: float) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
"""Computes the grid position w.r.t.
the corresponding feature map.
Args:
boxes: a 3-D tensor of shape [batch_size, num_boxes, 4] encoding the
information of each box w.r.t. the corresponding feature map.
boxes[:, :, 0:2] are the grid position in (y, x) (float) of the top-left
corner of each box. boxes[:, :, 2:4] are the box sizes in (h, w) (float)
in terms of the number of pixels of the corresponding feature map size.
boundaries: a 3-D tensor of shape [batch_size, num_boxes, 2] representing
the boundary (in (y, x)) of the corresponding feature map for each box.
Any resampled grid points that go beyond the bounary will be clipped.
output_size: a scalar indicating the output crop size.
sample_offset: a float number in [0, 1] indicates the subpixel sample offset
from grid point.
Returns:
kernel_y: Tensor of size [batch_size, boxes, output_size, 2, 1].
kernel_x: Tensor of size [batch_size, boxes, output_size, 2, 1].
box_grid_y0y1: Tensor of size [batch_size, boxes, output_size, 2]
box_grid_x0x1: Tensor of size [batch_size, boxes, output_size, 2]
"""
boxes_shape = tf.shape(boxes)
batch_size, num_boxes = boxes_shape[0], boxes_shape[1]
if batch_size is None:
batch_size = tf.shape(boxes)[0]
box_grid_x = []
box_grid_y = []
for i in range(output_size):
box_grid_x.append(boxes[:, :, 1] +
(i + sample_offset) * boxes[:, :, 3] / output_size)
box_grid_y.append(boxes[:, :, 0] +
(i + sample_offset) * boxes[:, :, 2] / output_size)
box_grid_x = tf.stack(box_grid_x, axis=2)
box_grid_y = tf.stack(box_grid_y, axis=2)
box_grid_y0 = tf.floor(box_grid_y)
box_grid_x0 = tf.floor(box_grid_x)
box_grid_x0 = tf.maximum(tf.cast(0., dtype=box_grid_x0.dtype), box_grid_x0)
box_grid_y0 = tf.maximum(tf.cast(0., dtype=box_grid_y0.dtype), box_grid_y0)
box_grid_x0 = tf.minimum(box_grid_x0, tf.expand_dims(boundaries[:, :, 1], -1))
box_grid_x1 = tf.minimum(box_grid_x0 + 1,
tf.expand_dims(boundaries[:, :, 1], -1))
box_grid_y0 = tf.minimum(box_grid_y0, tf.expand_dims(boundaries[:, :, 0], -1))
box_grid_y1 = tf.minimum(box_grid_y0 + 1,
tf.expand_dims(boundaries[:, :, 0], -1))
box_gridx0x1 = tf.stack([box_grid_x0, box_grid_x1], axis=-1)
box_gridy0y1 = tf.stack([box_grid_y0, box_grid_y1], axis=-1)
# The RoIAlign feature f can be computed by bilinear interpolation of four
# neighboring feature points f0, f1, f2, and f3.
# f(y, x) = [hy, ly] * [[f00, f01], * [hx, lx]^T
# [f10, f11]]
# f(y, x) = (hy*hx)f00 + (hy*lx)f01 + (ly*hx)f10 + (lx*ly)f11
# f(y, x) = w00*f00 + w01*f01 + w10*f10 + w11*f11
ly = box_grid_y - box_grid_y0
lx = box_grid_x - box_grid_x0
hy = 1.0 - ly
hx = 1.0 - lx
kernel_y = tf.reshape(
tf.stack([hy, ly], axis=3), [batch_size, num_boxes, output_size, 2, 1])
kernel_x = tf.reshape(
tf.stack([hx, lx], axis=3), [batch_size, num_boxes, output_size, 2, 1])
return kernel_y, kernel_x, box_gridy0y1, box_gridx0x1
def multilevel_crop_and_resize(features: Dict[str, tf.Tensor],
boxes: tf.Tensor,
output_size: int = 7,
sample_offset: float = 0.5) -> tf.Tensor:
"""Crop and resize on multilevel feature pyramid.
Generate the (output_size, output_size) set of pixels for each input box
by first locating the box into the correct feature level, and then cropping
and resizing it using the correspoding feature map of that level.
Args:
features: A dictionary with key as pyramid level and value as features. The
features are in shape of [batch_size, height_l, width_l, num_filters].
boxes: A 3-D Tensor of shape [batch_size, num_boxes, 4]. Each row represents
a box with [y1, x1, y2, x2] in un-normalized coordinates.
output_size: A scalar to indicate the output crop size.
sample_offset: a float number in [0, 1] indicates the subpixel sample offset
from grid point.
Returns:
A 5-D tensor representing feature crop of shape
[batch_size, num_boxes, output_size, output_size, num_filters].
"""
with tf.name_scope('multilevel_crop_and_resize'):
levels = list(features.keys())
min_level = int(min(levels))
max_level = int(max(levels))
features_shape = tf.shape(features[str(min_level)])
batch_size, max_feature_height, max_feature_width, num_filters = (
features_shape[0], features_shape[1], features_shape[2],
features_shape[3])
num_boxes = tf.shape(boxes)[1]
# Stack feature pyramid into a features_all of shape
# [batch_size, levels, height, width, num_filters].
features_all = []
feature_heights = []
feature_widths = []
for level in range(min_level, max_level + 1):
shape = features[str(level)].get_shape().as_list()
feature_heights.append(shape[1])
feature_widths.append(shape[2])
# Concat tensor of [batch_size, height_l * width_l, num_filters] for each
# levels.
features_all.append(
tf.reshape(features[str(level)], [batch_size, -1, num_filters]))
features_r2 = tf.reshape(tf.concat(features_all, 1), [-1, num_filters])
# Calculate height_l * width_l for each level.
level_dim_sizes = [
feature_widths[i] * feature_heights[i]
for i in range(len(feature_widths))
]
# level_dim_offsets is accumulated sum of level_dim_size.
level_dim_offsets = [0]
for i in range(len(feature_widths) - 1):
level_dim_offsets.append(level_dim_offsets[i] + level_dim_sizes[i])
batch_dim_size = level_dim_offsets[-1] + level_dim_sizes[-1]
level_dim_offsets = tf.constant(level_dim_offsets, tf.int32)
height_dim_sizes = tf.constant(feature_widths, tf.int32)
# Assigns boxes to the right level.
box_width = boxes[:, :, 3] - boxes[:, :, 1]
box_height = boxes[:, :, 2] - boxes[:, :, 0]
areas_sqrt = tf.sqrt(
tf.cast(box_height, tf.float32) * tf.cast(box_width, tf.float32))
levels = tf.cast(
tf.math.floordiv(
tf.math.log(tf.math.divide_no_nan(areas_sqrt, 224.0)),
tf.math.log(2.0)) + 4.0,
dtype=tf.int32)
# Maps levels between [min_level, max_level].
levels = tf.minimum(max_level, tf.maximum(levels, min_level))
# Projects box location and sizes to corresponding feature levels.
scale_to_level = tf.cast(
tf.pow(tf.constant(2.0), tf.cast(levels, tf.float32)),
dtype=boxes.dtype)
boxes /= tf.expand_dims(scale_to_level, axis=2)
box_width /= scale_to_level
box_height /= scale_to_level
boxes = tf.concat([boxes[:, :, 0:2],
tf.expand_dims(box_height, -1),
tf.expand_dims(box_width, -1)], axis=-1)
# Maps levels to [0, max_level-min_level].
levels -= min_level
level_strides = tf.pow([[2.0]], tf.cast(levels, tf.float32))
boundary = tf.cast(
tf.concat([
tf.expand_dims(
[[tf.cast(max_feature_height, tf.float32)]] / level_strides - 1,
axis=-1),
tf.expand_dims(
[[tf.cast(max_feature_width, tf.float32)]] / level_strides - 1,
axis=-1),
],
axis=-1), boxes.dtype)
# Compute grid positions.
kernel_y, kernel_x, box_gridy0y1, box_gridx0x1 = _compute_grid_positions(
boxes, boundary, output_size, sample_offset)
x_indices = tf.cast(
tf.reshape(box_gridx0x1, [batch_size, num_boxes, output_size * 2]),
dtype=tf.int32)
y_indices = tf.cast(
tf.reshape(box_gridy0y1, [batch_size, num_boxes, output_size * 2]),
dtype=tf.int32)
batch_size_offset = tf.tile(
tf.reshape(
tf.range(batch_size) * batch_dim_size, [batch_size, 1, 1, 1]),
[1, num_boxes, output_size * 2, output_size * 2])
# Get level offset for each box. Each box belongs to one level.
levels_offset = tf.tile(
tf.reshape(
tf.gather(level_dim_offsets, levels),
[batch_size, num_boxes, 1, 1]),
[1, 1, output_size * 2, output_size * 2])
y_indices_offset = tf.tile(
tf.reshape(
y_indices * tf.expand_dims(tf.gather(height_dim_sizes, levels), -1),
[batch_size, num_boxes, output_size * 2, 1]),
[1, 1, 1, output_size * 2])
x_indices_offset = tf.tile(
tf.reshape(x_indices, [batch_size, num_boxes, 1, output_size * 2]),
[1, 1, output_size * 2, 1])
indices = tf.reshape(
batch_size_offset + levels_offset + y_indices_offset + x_indices_offset,
[-1])
# TODO(wangtao): replace tf.gather with tf.gather_nd and try to get similar
# performance.
features_per_box = tf.reshape(
tf.gather(features_r2, indices),
[batch_size, num_boxes, output_size * 2, output_size * 2, num_filters])
# Bilinear interpolation.
features_per_box = _feature_bilinear_interpolation(
features_per_box, kernel_y, kernel_x)
return features_per_box
def _selective_crop_and_resize(features: tf.Tensor,
boxes: tf.Tensor,
box_levels: tf.Tensor,
boundaries: tf.Tensor,
output_size: int = 7,
sample_offset: float = 0.5,
use_einsum_gather: bool = False) -> tf.Tensor:
"""Crop and resize boxes on a set of feature maps.
Given multiple features maps indexed by different levels, and a set of boxes
where each box is mapped to a certain level, it selectively crops and resizes
boxes from the corresponding feature maps to generate the box features.
We follow the ROIAlign technique (see https://arxiv.org/pdf/1703.06870.pdf,
figure 3 for reference). Specifically, for each feature map, we select an
(output_size, output_size) set of pixels corresponding to the box location,
and then use bilinear interpolation to select the feature value for each
pixel.
For performance, we perform the gather and interpolation on all layers as a
single operation. In this op the multi-level features are first stacked and
gathered into [2*output_size, 2*output_size] feature points. Then bilinear
interpolation is performed on the gathered feature points to generate
[output_size, output_size] RoIAlign feature map.
Here is the step-by-step algorithm:
1. The multi-level features are gathered into a
[batch_size, num_boxes, output_size*2, output_size*2, num_filters]
Tensor. The Tensor contains four neighboring feature points for each
vertex in the output grid.
2. Compute the interpolation kernel of shape
[batch_size, num_boxes, output_size*2, output_size*2]. The last 2 axis
can be seen as stacking 2x2 interpolation kernels for all vertices in the
output grid.
3. Element-wise multiply the gathered features and interpolation kernel.
Then apply 2x2 average pooling to reduce spatial dimension to
output_size.
Args:
features: a 5-D tensor of shape [batch_size, num_levels, max_height,
max_width, num_filters] where cropping and resizing are based.
boxes: a 3-D tensor of shape [batch_size, num_boxes, 4] encoding the
information of each box w.r.t. the corresponding feature map.
boxes[:, :, 0:2] are the grid position in (y, x) (float) of the top-left
corner of each box. boxes[:, :, 2:4] are the box sizes in (h, w) (float)
in terms of the number of pixels of the corresponding feature map size.
box_levels: a 3-D tensor of shape [batch_size, num_boxes, 1] representing
the 0-based corresponding feature level index of each box.
boundaries: a 3-D tensor of shape [batch_size, num_boxes, 2] representing
the boundary (in (y, x)) of the corresponding feature map for each box.
Any resampled grid points that go beyond the bounary will be clipped.
output_size: a scalar indicating the output crop size.
sample_offset: a float number in [0, 1] indicates the subpixel sample offset
from grid point.
use_einsum_gather: use einsum to replace gather or not. Replacing einsum
with gather can improve performance when feature size is not large, einsum
is friendly with model partition as well. Gather's performance is better
when feature size is very large and there are multiple box levels.
Returns:
features_per_box: a 5-D tensor of shape
[batch_size, num_boxes, output_size, output_size, num_filters]
representing the cropped features.
"""
(batch_size, num_levels, max_feature_height, max_feature_width,
num_filters) = features.get_shape().as_list()
if batch_size is None:
batch_size = tf.shape(features)[0]
_, num_boxes, _ = boxes.get_shape().as_list()
kernel_y, kernel_x, box_gridy0y1, box_gridx0x1 = _compute_grid_positions(
boxes, boundaries, output_size, sample_offset)
x_indices = tf.cast(
tf.reshape(box_gridx0x1, [batch_size, num_boxes, output_size * 2]),
dtype=tf.int32)
y_indices = tf.cast(
tf.reshape(box_gridy0y1, [batch_size, num_boxes, output_size * 2]),
dtype=tf.int32)
if use_einsum_gather:
# Blinear interpolation is done during the last two gathers:
# f(y, x) = [hy, ly] * [[f00, f01], * [hx, lx]^T
# [f10, f11]]
# [[f00, f01],
# [f10, f11]] = tf.einsum(tf.einsum(features, y_one_hot), x_one_hot)
# where [hy, ly] and [hx, lx] are the bilinear interpolation kernel.
y_indices = tf.cast(
tf.reshape(box_gridy0y1, [batch_size, num_boxes, output_size, 2]),
dtype=tf.int32)
x_indices = tf.cast(
tf.reshape(box_gridx0x1, [batch_size, num_boxes, output_size, 2]),
dtype=tf.int32)
# shape is [batch_size, num_boxes, output_size, 2, height]
grid_y_one_hot = tf.one_hot(
tf.cast(y_indices, tf.int32), max_feature_height, dtype=kernel_y.dtype)
# shape is [batch_size, num_boxes, output_size, 2, width]
grid_x_one_hot = tf.one_hot(
tf.cast(x_indices, tf.int32), max_feature_width, dtype=kernel_x.dtype)
# shape is [batch_size, num_boxes, output_size, height]
grid_y_weight = tf.reduce_sum(
tf.multiply(grid_y_one_hot, kernel_y), axis=-2)
# shape is [batch_size, num_boxes, output_size, width]
grid_x_weight = tf.reduce_sum(
tf.multiply(grid_x_one_hot, kernel_x), axis=-2)
# Gather for y_axis.
# shape is [batch_size, num_boxes, output_size, width, features]
features_per_box = tf.einsum('bmhwf,bmoh->bmowf', features,
tf.cast(grid_y_weight, features.dtype))
# Gather for x_axis.
# shape is [batch_size, num_boxes, output_size, output_size, features]
features_per_box = tf.einsum('bmhwf,bmow->bmhof', features_per_box,
tf.cast(grid_x_weight, features.dtype))
else:
height_dim_offset = max_feature_width
level_dim_offset = max_feature_height * height_dim_offset
batch_dim_offset = num_levels * level_dim_offset
batch_size_offset = tf.tile(
tf.reshape(
tf.range(batch_size) * batch_dim_offset, [batch_size, 1, 1, 1]),
[1, num_boxes, output_size * 2, output_size * 2])
box_levels_offset = tf.tile(
tf.reshape(box_levels * level_dim_offset,
[batch_size, num_boxes, 1, 1]),
[1, 1, output_size * 2, output_size * 2])
y_indices_offset = tf.tile(
tf.reshape(y_indices * height_dim_offset,
[batch_size, num_boxes, output_size * 2, 1]),
[1, 1, 1, output_size * 2])
x_indices_offset = tf.tile(
tf.reshape(x_indices, [batch_size, num_boxes, 1, output_size * 2]),
[1, 1, output_size * 2, 1])
indices = tf.reshape(
batch_size_offset + box_levels_offset + y_indices_offset +
x_indices_offset, [-1])
features = tf.reshape(features, [-1, num_filters])
# TODO(wangtao): replace tf.gather with tf.gather_nd and try to get similar
# performance.
features_per_box = tf.reshape(
tf.gather(features, indices),
[batch_size, num_boxes, output_size * 2, output_size * 2, num_filters])
features_per_box = _feature_bilinear_interpolation(
features_per_box, kernel_y, kernel_x)
return features_per_box
def crop_mask_in_target_box(masks: tf.Tensor,
boxes: tf.Tensor,
target_boxes: tf.Tensor,
output_size: int,
sample_offset: float = 0.0,
use_einsum: bool = True) -> tf.Tensor:
"""Crop masks in target boxes.
Args:
masks: A tensor with a shape of [batch_size, num_masks, height, width].
boxes: a float tensor representing box cooridnates that tightly enclose
masks with a shape of [batch_size, num_masks, 4] in un-normalized
coordinates. A box is represented by [ymin, xmin, ymax, xmax].
target_boxes: a float tensor representing target box cooridnates for masks
with a shape of [batch_size, num_masks, 4] in un-normalized coordinates. A
box is represented by [ymin, xmin, ymax, xmax].
output_size: A scalar to indicate the output crop size. It currently only
supports to output a square shape outputs.
sample_offset: a float number in [0, 1] indicates the subpixel sample offset
from grid point.
use_einsum: Use einsum to replace gather in selective_crop_and_resize.
Returns:
A 4-D tensor representing feature crop of shape
[batch_size, num_boxes, output_size, output_size].
"""
with tf.name_scope('crop_mask_in_target_box'):
# Cast to float32, as the y_transform and other transform variables may
# overflow in float16
masks = tf.cast(masks, tf.float32)
boxes = tf.cast(boxes, tf.float32)
target_boxes = tf.cast(target_boxes, tf.float32)
batch_size, num_masks, height, width = masks.get_shape().as_list()
if batch_size is None:
batch_size = tf.shape(masks)[0]
masks = tf.reshape(masks, [batch_size * num_masks, height, width, 1])
# Pad zeros on the boundary of masks.
masks = tf.image.pad_to_bounding_box(masks, 2, 2, height + 4, width + 4)
masks = tf.reshape(masks, [batch_size, num_masks, height+4, width+4, 1])
# Projects target box locations and sizes to corresponding cropped
# mask coordinates.
gt_y_min, gt_x_min, gt_y_max, gt_x_max = tf.split(
value=boxes, num_or_size_splits=4, axis=2)
bb_y_min, bb_x_min, bb_y_max, bb_x_max = tf.split(
value=target_boxes, num_or_size_splits=4, axis=2)
y_transform = (bb_y_min - gt_y_min) * height / (
gt_y_max - gt_y_min + _EPSILON) + 2
x_transform = (bb_x_min - gt_x_min) * height / (
gt_x_max - gt_x_min + _EPSILON) + 2
h_transform = (bb_y_max - bb_y_min) * width / (
gt_y_max - gt_y_min + _EPSILON)
w_transform = (bb_x_max - bb_x_min) * width / (
gt_x_max - gt_x_min + _EPSILON)
boundaries = tf.concat(
[tf.ones_like(y_transform) * ((height + 4) - 1),
tf.ones_like(x_transform) * ((width + 4) - 1)],
axis=-1)
boundaries = tf.cast(boundaries, dtype=y_transform.dtype)
# Reshape tensors to have the right shape for selective_crop_and_resize.
trasnformed_boxes = tf.concat(
[y_transform, x_transform, h_transform, w_transform], -1)
levels = tf.tile(tf.reshape(tf.range(num_masks), [1, num_masks]),
[batch_size, 1])
cropped_masks = _selective_crop_and_resize(
masks,
trasnformed_boxes,
levels,
boundaries,
output_size,
sample_offset=sample_offset,
use_einsum_gather=use_einsum)
cropped_masks = tf.squeeze(cropped_masks, axis=-1)
return cropped_masks
def nearest_upsampling(data: tf.Tensor,
scale: int,
use_keras_layer: bool = False) -> tf.Tensor:
"""Nearest neighbor upsampling implementation.
Args:
data: A tensor with a shape of [batch, height_in, width_in, channels].
scale: An integer multiple to scale resolution of input data.
use_keras_layer: If True, use keras Upsampling2D layer.
Returns:
data_up: A tensor with a shape of
[batch, height_in*scale, width_in*scale, channels]. Same dtype as input
data.
"""
if use_keras_layer:
return tf.keras.layers.UpSampling2D(size=(scale, scale),
interpolation='nearest')(data)
with tf.name_scope('nearest_upsampling'):
bs, _, _, c = data.get_shape().as_list()
shape = tf.shape(input=data)
h = shape[1]
w = shape[2]
bs = -1 if bs is None else bs
# Uses reshape to quickly upsample the input. The nearest pixel is selected
# via tiling.
data = tf.tile(
tf.reshape(data, [bs, h, 1, w, 1, c]), [1, 1, scale, 1, scale, 1])
return tf.reshape(data, [bs, h * scale, w * scale, c])
def _gather_rows_from_matrix(input_matrix: tf.Tensor,
row_indices: tf.Tensor) -> tf.Tensor:
"""Gather rows from the input matrix (2-D tensor).
This operation is equivalent to tf.gather(input_matrix, row_indices), but is
implemented in sparse matrix multiplication.
Args:
input_matrix: A 2-D tensor in shape (input_h, input_w) from which to gather
values. The shape must be 2-D, since sparse matrix multiplication is
currently only supported on 2-D matrices.
row_indices: A 1-D int tensor in shape (output_h) which stored the row
indices of the input.
Returns:
A tensor in shape (output_h, input_w) which stores the gathered rows.
"""
input_matrix_shape = input_matrix.get_shape().as_list()
if len(input_matrix_shape) != 2:
raise ValueError(
'Expected the input_matrix tensor (input_h, input_w) has rank == 2, '
'was: %s' % input_matrix_shape)
row_indices_shape = row_indices.get_shape().as_list()
if len(row_indices_shape) != 1:
raise ValueError(
'Expected the row_indices tensor (output_h) has rank == 1, was: %s' %
row_indices_shape)
# (output_h, input_h)
indices_one_hot = tf.one_hot(
row_indices, depth=input_matrix_shape[0], dtype=input_matrix.dtype)
# Matrix multiplication: (output_h, input_h) x (input_h, input_w)
# (output_h, input_w)
return tf.linalg.matmul(indices_one_hot, input_matrix, a_is_sparse=True)
def bilinear_resize_to_bbox(
images: tf.Tensor, bbox: tf.Tensor, output_size: tf.Tensor
) -> tf.Tensor:
"""Bilinear resizes the images to fit into the bounding boxes in the output.
Args:
images: A tensor in shape (batch_size, input_h, input_w, ...) with arbitrary
numbers of channel dimensions.
bbox: A tensor in shape (batch_size, 4), representing the absolute
coordinates (ymin, xmin, ymax, xmax) for each bounding box.
output_size: The size of the output images in (output_h, output_w).
Returns:
A tensor in shape (batch_size, output_h, output_w, ...). The result has the
same dtype as the input if it's float32, float16, bfloat16, otherwise the
result is float32.
"""
images_shape = images.get_shape().as_list()
images_rank = len(images_shape)
if images_rank < 3:
raise ValueError(
'Expected the input images (batch_size, height, width, ...) '
'has rank >= 3, was: %s' % images_shape)
bbox_shape = bbox.get_shape().as_list()
if bbox_shape[-1] != 4:
raise ValueError(
'Expected the last dimension of `bbox` has size == 4, but the shape '
'of `bbox` was: %s' % bbox_shape)
rank_range = list(range(images_rank))
extra_dims = images_shape[3:]
extra_dims_perm = rank_range[3:]
extra_dims_product = 1
for d in extra_dims:
extra_dims_product *= d
input_h = tf.cast(tf.shape(images)[1], tf.float32)
input_w = tf.cast(tf.shape(images)[2], tf.float32)
output_h = output_size[0]
output_w = output_size[1]
bbox = tf.cast(bbox, tf.float32)
# (batch_size, 1)
bbox_ymin = bbox[:, 0:1]
bbox_xmin = bbox[:, 1:2]
bbox_ymax = bbox[:, 2:3]
bbox_xmax = bbox[:, 3:4]
bbox_h = bbox_ymax - bbox_ymin
bbox_w = bbox_xmax - bbox_xmin
scale_h = tf.math.divide_no_nan(input_h, bbox_h)
scale_w = tf.math.divide_no_nan(input_w, bbox_w)
# Generates the output grids.
# (output_h)
output_y_grid = tf.range(output_h, dtype=bbox_ymin.dtype)
# (output_w)
output_x_grid = tf.range(output_w, dtype=bbox_xmin.dtype)
# Computes the input source positions (float) which map to the output grids
# (integer).
# Applies half pixel offset here to ensure the output is center-aligned to the
# input.
# TODO(b/245614786): support align_corners=True.
# (batch_size, output_h)
input_y_pos = tf.clip_by_value(
(output_y_grid - bbox_ymin + 0.5) * scale_h - 0.5, 0.0, input_h - 1.0)
# (batch_size, output_w)
input_x_pos = tf.clip_by_value(
(output_x_grid - bbox_xmin + 0.5) * scale_w - 0.5, 0.0, input_w - 1.0)
# Gets the positions (integer) of the four nearest neighbors of the input
# source position (float).
# (y0, x0): left-top
# (y0, x1): right-top
# (y1, x0): left-bottom
# (y1, x1): right-bottom
# (batch_size, output_h)
input_y0 = tf.cast(
tf.clip_by_value(tf.floor(input_y_pos), 0.0, input_h - 2.0), tf.int32)
input_y1 = input_y0 + 1
# (batch_size, output_w)
input_x0 = tf.cast(
tf.clip_by_value(tf.floor(input_x_pos), 0.0, input_w - 2.0), tf.int32)
input_x1 = input_x0 + 1
# (batch_size, output_h)
output_y_mask = (bbox_ymin <= output_y_grid) & (output_y_grid < bbox_ymax)
# (batch_size, output_w)
output_x_mask = (bbox_xmin <= output_x_grid) & (output_x_grid < bbox_xmax)
# Masks the output pixels outside the bounding box by setting their input
# neighbors to -1. This makes `tf.one_hot` operation produce all zeros at
# these pixels, so as to accelerate the sparse matrix multiplication in
# `_gather_rows_from_matrix`.
# (batch_size, output_h)
input_y0 = tf.where(output_y_mask, input_y0, -tf.ones_like(input_y0))
input_y1 = tf.where(output_y_mask, input_y1, -tf.ones_like(input_y1))
# (batch_size, output_w)
input_x0 = tf.where(output_x_mask, input_x0, -tf.ones_like(input_x0))
input_x1 = tf.where(output_x_mask, input_x1, -tf.ones_like(input_x1))
input_h = tf.cast(input_h, tf.int32)
input_w = tf.cast(input_w, tf.int32)
if images.dtype not in {tf.float32, tf.bfloat16, tf.float16}:
images = tf.cast(images, tf.float32)
if images_rank > 3:
# Reshapes the images since _gather_rows_from_matrix only takes 2-D tensor.
# (batch_size, input_h, input_w * extra_dims_product)
images = tf.reshape(images, [-1, input_h, input_w * extra_dims_product])
# Fetches the rows from the input source images.
# (batch_size, output_h, input_w * extra_dims_product)
val_y0 = tf.map_fn(
lambda x: _gather_rows_from_matrix(x[0], x[1]),
elems=(images, input_y0),
fn_output_signature=images.dtype,
parallel_iterations=32,
)
val_y1 = tf.map_fn(
lambda x: _gather_rows_from_matrix(x[0], x[1]),
elems=(images, input_y1),
fn_output_signature=images.dtype,
parallel_iterations=32,
)
if images_rank > 3:
new_shape = [-1, output_h, input_w] + extra_dims
# (batch_size, output_h, input_w, ...)
val_y0 = tf.reshape(val_y0, new_shape)
val_y1 = tf.reshape(val_y1, new_shape)
# Transposes the tensors for reusing _gather_rows_from_matrix later.
new_perm = [0, 2, 1] + extra_dims_perm
# (batch_size, input_w, output_h, ...)
val_y0 = tf.transpose(val_y0, new_perm)
val_y1 = tf.transpose(val_y1, new_perm)
if images_rank > 3:
new_shape = [-1, input_w, output_h * extra_dims_product]
# (batch_size, input_w, output_h * extra_dims_product)
val_y0 = tf.reshape(val_y0, new_shape)
val_y1 = tf.reshape(val_y1, new_shape)
# Fetches the pixels from the rows using the column indices.
# val_00, val_01, val_10, val_11 store the pixels of the four nearest
# neighbors of the input source position.
# (batch_size, output_w, output_h * extra_dims_product)
val_00 = tf.map_fn(
lambda x: _gather_rows_from_matrix(x[0], x[1]),
elems=(val_y0, input_x0),
fn_output_signature=images.dtype,
parallel_iterations=32,
)
val_01 = tf.map_fn(
lambda x: _gather_rows_from_matrix(x[0], x[1]),
elems=(val_y0, input_x1),
fn_output_signature=images.dtype,
parallel_iterations=32,
)
val_10 = tf.map_fn(
lambda x: _gather_rows_from_matrix(x[0], x[1]),
elems=(val_y1, input_x0),
fn_output_signature=images.dtype,
parallel_iterations=32,
)
val_11 = tf.map_fn(
lambda x: _gather_rows_from_matrix(x[0], x[1]),
elems=(val_y1, input_x1),
fn_output_signature=images.dtype,
parallel_iterations=32,
)
if images_rank > 3:
new_shape = [-1, output_w, output_h] + extra_dims
# (batch_size, output_w, output_h, ...)
val_00 = tf.reshape(val_00, new_shape)
val_01 = tf.reshape(val_01, new_shape)
val_10 = tf.reshape(val_10, new_shape)
val_11 = tf.reshape(val_11, new_shape)
# (..., batch_size, output_h, output_w)
new_perm = extra_dims_perm + [0, 2, 1]
val_00 = tf.transpose(val_00, new_perm)
val_01 = tf.transpose(val_01, new_perm)
val_10 = tf.transpose(val_10, new_perm)
val_11 = tf.transpose(val_11, new_perm)
# (batch_size, output_height, 1)
input_y_pos = tf.cast(input_y_pos[:, :, tf.newaxis], images.dtype)
input_y0 = tf.cast(input_y0[:, :, tf.newaxis], images.dtype)
input_y1 = tf.cast(input_y1[:, :, tf.newaxis], images.dtype)
# (batch_size, 1, output_width)
input_x_pos = tf.cast(input_x_pos[:, tf.newaxis, :], images.dtype)
input_x0 = tf.cast(input_x0[:, tf.newaxis, :], images.dtype)
input_x1 = tf.cast(input_x1[:, tf.newaxis, :], images.dtype)
# Compute the weights of the four nearest neighbors for interpolation.
# (batch_size, output_height, output_width)
weight_00 = (input_y1 - input_y_pos) * (input_x1 - input_x_pos)
weight_01 = (input_y1 - input_y_pos) * (input_x_pos - input_x0)
weight_10 = (input_y_pos - input_y0) * (input_x1 - input_x_pos)
weight_11 = (input_y_pos - input_y0) * (input_x_pos - input_x0)
# (..., batch_size, output_height, output_width)
output_images = (
val_00 * weight_00 + val_01 * weight_01 + val_10 * weight_10 +
val_11 * weight_11)
# (batch_size, output_height, output_width, ...)
return tf.transpose(output_images, np.roll(rank_range, -len(extra_dims)))
def bilinear_resize_with_crop_and_pad(images: tf.Tensor,
rescale_size: tf.Tensor,
crop_offset: tf.Tensor,
crop_size: tf.Tensor,
output_size: tf.Tensor) -> tf.Tensor:
"""Bilinear resizes the images, then crops and finally pads to output size.
Args:
images: A tensor in shape (batch_size, input_h, input_w, ...) with arbitrary
numbers of channel dimensions.
rescale_size: An int tensor in shape (batch_size, 2), representing the sizes
of the rescaled images.
crop_offset: An int tensor in shape (batch_size, 2), representing the
left-top offset of the crop box. Applying negative offsets means adding
extra margins at the left-top.
crop_size: An int tensor in shape (batch_size, 2), representing the sizes of
the cropped images.
output_size: The size of the output image in (output_h, output_w).
Returns:
A tensor in shape (batch_size, output_h, output_w, ...). The result has the
same dtype as the input if it's float32, float16, bfloat16, otherwise the
result is float32.
"""
images_shape = images.get_shape().as_list()
images_rank = len(images_shape)
if images_rank < 3:
raise ValueError(
'Expected the input images (batch_size, height, width, ...) '
'has rank >= 3, was: %s' % images_shape)
num_extra_dims = images_rank - 3
# Rescales the images, applies the offset and pastes to the output canvas.
# (batch_size, 2)
ymin_xmin = -crop_offset
# (batch_size, 2)
ymax_xmax = ymin_xmin + tf.cast(rescale_size, ymin_xmin.dtype)
# (batch_size, 4)
rescale_bbox = tf.concat([ymin_xmin, ymax_xmax], axis=1)
# (batch_size, output_height, output_width, ...)
rescaled_padded_images = bilinear_resize_to_bbox(images, rescale_bbox,
output_size)
# Masks out the pixels outside of the crop box.
# (batch_size, 2)
y0_x0 = tf.broadcast_to(
tf.constant([[0, 0]], dtype=crop_size.dtype), tf.shape(crop_size))
# (batch_size, 4)
crop_bbox = tf.concat([y0_x0, crop_size], axis=1)
# (batch_size, output_height, output_width, ...)
crop_bbox_mask = bbox2mask(
crop_bbox,
image_height=output_size[0],
image_width=output_size[1],
dtype=rescaled_padded_images.dtype)[[...] + [tf.newaxis] * num_extra_dims]
# (batch_size, output_height, output_width, ...)
return rescaled_padded_images * crop_bbox_mask
def bilinear_resize_with_pad(
images: tf.Tensor, rescale_size: tf.Tensor, output_size: tf.Tensor
) -> tf.Tensor:
"""Bilinear resizes the images, then pads to output size.
Args:
images: A tensor in shape (batch_size, input_h, input_w, ...) with arbitrary
numbers of channel dimensions.
rescale_size: An int tensor in shape (2,) or (batch_size, 2), representing
the sizes of the rescaled images.
output_size: The size of the output image in (output_h, output_w).
Returns:
A tensor in shape (batch_size, output_h, output_w, ...). The result has the
same dtype as the input if it's float32, float16, bfloat16, otherwise the
result is float32.
"""
images_shape = images.get_shape().as_list()
images_rank = len(images_shape)
if images_rank < 3:
raise ValueError(
'Expected the input images (batch_size, height, width, ...) '
'has rank >= 3, was: %s' % images_shape
)
batch_size = tf.shape(images)[0]
rescale_size = tf.convert_to_tensor(rescale_size)
if len(rescale_size.get_shape().as_list()) == 1:
rescale_size = tf.broadcast_to(rescale_size, [batch_size, 2])
# Rescales the images, applies the offset and pastes to the output canvas.
# (batch_size, 2)
ymin_xmin = tf.broadcast_to([0, 0], [batch_size, 2])
# (batch_size, 2)
ymax_xmax = tf.cast(ymin_xmin, rescale_size.dtype) + rescale_size
# (batch_size, 4)
rescale_bbox = tf.concat([ymin_xmin, ymax_xmax], axis=1)
# (batch_size, output_height, output_width, ...)
return bilinear_resize_to_bbox(images, rescale_bbox, output_size)
def bilinear_resize(images: tf.Tensor, output_size: tf.Tensor) -> tf.Tensor:
"""Bilinear resizes the images.
Args:
images: A tensor in shape (batch_size, input_h, input_w, ...) with arbitrary
numbers of channel dimensions.
output_size: The size of the output image in (output_h, output_w).
Returns:
A tensor in shape (batch_size, output_h, output_w, ...). The result has the
same dtype as the input if it's float32, float16, bfloat16, otherwise the
result is float32.
"""
return bilinear_resize_with_pad(
images, rescale_size=output_size, output_size=output_size
)
| 38,379 | 40.536797 | 80 | py |
models | models-master/official/vision/losses/maskrcnn_losses_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for maskrcnn_losses."""
from absl.testing import parameterized
import tensorflow as tf
from official.vision.losses import maskrcnn_losses
class MaskrcnnLossesTest(parameterized.TestCase, tf.test.TestCase):
def testRpnScoreLoss(self):
batch_size = 2
height = 32
width = 32
num_anchors = 10
score_outputs = {
'1': tf.random.uniform([batch_size, height, width, num_anchors])
}
score_targets = {
'1':
tf.random.uniform([batch_size, height, width, num_anchors],
minval=-1,
maxval=2,
dtype=tf.int32)
}
loss_fn = maskrcnn_losses.RpnScoreLoss(rpn_batch_size_per_im=8)
self.assertEqual(tf.rank(loss_fn(score_outputs, score_targets)), 0)
def testRpnBoxLoss(self):
batch_size = 2
height = 32
width = 32
num_anchors = 10
box_outputs = {
'1': tf.random.uniform([batch_size, height, width, num_anchors * 4])
}
box_targets = {
'1': tf.random.uniform([batch_size, height, width, num_anchors * 4])
}
loss_fn = maskrcnn_losses.RpnBoxLoss(huber_loss_delta=1. / 9.)
self.assertEqual(tf.rank(loss_fn(box_outputs, box_targets)), 0)
def testRpnBoxLossValidBox(self):
box_outputs = {'1': tf.constant([[[[0.2, 0.2, 1.4, 1.4]]]])}
box_targets = {'1': tf.constant([[[[0., 0., 1., 1.]]]])}
loss_fn = maskrcnn_losses.RpnBoxLoss(huber_loss_delta=1. / 9.)
self.assertAllClose(loss_fn(box_outputs, box_targets), 0.027093, atol=1e-4)
def testRpnBoxLossInvalidBox(self):
box_outputs = {'1': tf.constant([[[[0.2, 0.2, 1.4, 1.4]]]])}
box_targets = {'1': tf.constant([[[[0., 0., 0., 0.]]]])}
loss_fn = maskrcnn_losses.RpnBoxLoss(huber_loss_delta=1. / 9.)
self.assertAllClose(loss_fn(box_outputs, box_targets), 0., atol=1e-4)
@parameterized.parameters(True, False)
def testFastrcnnClassLoss(self, use_binary_cross_entropy):
batch_size = 2
num_boxes = 10
num_classes = 5
class_outputs = tf.random.uniform([batch_size, num_boxes, num_classes])
class_targets = tf.random.uniform([batch_size, num_boxes],
minval=0,
maxval=num_classes + 1,
dtype=tf.int32)
loss_fn = maskrcnn_losses.FastrcnnClassLoss(use_binary_cross_entropy)
self.assertEqual(tf.rank(loss_fn(class_outputs, class_targets)), 0)
def testFastrcnnClassLossTopK(self):
class_targets = tf.constant([[0, 0, 0, 2]])
class_outputs = tf.constant([[
[100.0, 0.0, 0.0],
[100.0, 0.0, 0.0],
[100.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
]])
self.assertAllClose(
maskrcnn_losses.FastrcnnClassLoss(top_k_percent=0.5)(
class_outputs, class_targets
),
0.775718,
atol=1e-4,
)
self.assertAllClose(
maskrcnn_losses.FastrcnnClassLoss(top_k_percent=1.0)(
class_outputs, class_targets
),
0.387861,
atol=1e-4,
)
def testFastrcnnBoxLoss(self):
batch_size = 2
num_boxes = 10
num_classes = 5
box_outputs = tf.random.uniform([batch_size, num_boxes, num_classes * 4])
box_targets = tf.random.uniform([batch_size, num_boxes, 4])
class_targets = tf.random.uniform([batch_size, num_boxes],
minval=0,
maxval=num_classes + 1,
dtype=tf.int32)
loss_fn = maskrcnn_losses.FastrcnnBoxLoss(huber_loss_delta=1.)
self.assertEqual(
tf.rank(loss_fn(box_outputs, class_targets, box_targets)), 0)
def testMaskrcnnLoss(self):
batch_size = 2
num_masks = 10
mask_height = 16
mask_width = 16
num_classes = 5
mask_outputs = tf.random.uniform(
[batch_size, num_masks, mask_height, mask_width])
mask_targets = tf.cast(
tf.random.uniform([batch_size, num_masks, mask_height, mask_width],
minval=0,
maxval=2,
dtype=tf.int32), tf.float32)
select_class_targets = tf.random.uniform([batch_size, num_masks],
minval=0,
maxval=num_classes + 1,
dtype=tf.int32)
loss_fn = maskrcnn_losses.MaskrcnnLoss()
self.assertEqual(
tf.rank(loss_fn(mask_outputs, mask_targets, select_class_targets)), 0)
if __name__ == '__main__':
tf.test.main()
| 5,203 | 35.391608 | 79 | py |
models | models-master/official/vision/losses/segmentation_losses_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for segmentation_losses."""
from absl.testing import parameterized
import tensorflow as tf
from official.vision.losses import segmentation_losses
class SegmentationLossTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(True, False, 1.),
(True, True, 0.5),
(False, True, 1.),
)
def testSegmentationLoss(self, use_groundtruth_dimension,
use_binary_cross_entropy, top_k_percent_pixels):
# [batch, height, width, num_layers]: [2, 3, 4, 1]
labels = tf.random.uniform([2, 3, 4, 1], minval=0, maxval=6, dtype=tf.int32)
# [batch, height, width, num_classes]: [2, 3, 4, 6]
logits = tf.random.uniform([2, 3, 4, 6],
minval=-1,
maxval=1,
dtype=tf.float32)
loss = segmentation_losses.SegmentationLoss(
label_smoothing=0.,
class_weights=[],
ignore_label=255,
use_groundtruth_dimension=use_groundtruth_dimension,
use_binary_cross_entropy=use_binary_cross_entropy,
top_k_percent_pixels=top_k_percent_pixels)(logits, labels)
self.assertEqual(tf.rank(loss), 0)
def testSegmentationLossTopK(self):
labels = tf.constant([[[[0], [0]], [[0], [2]]]])
logits = tf.constant([[[[100., 0., 0.], [100., 0, 0.]],
[[100., 0., 0.], [0., 1., 0.]]]])
loss = segmentation_losses.SegmentationLoss(
label_smoothing=0.,
class_weights=[],
ignore_label=255,
use_groundtruth_dimension=True,
top_k_percent_pixels=0.5)(logits, labels)
self.assertAllClose(loss, 0.775718, atol=1e-4)
def testSegmentationLossTopKWithIgnoreLabel(self):
labels = tf.constant([[[[0], [0]], [[0], [2]]]])
logits = tf.constant([[[[100., 0., 0.], [100., 0, 0.]],
[[100., 0., 0.], [0., 1., 0.]]]])
loss = segmentation_losses.SegmentationLoss(
label_smoothing=0.,
class_weights=[],
ignore_label=0,
use_groundtruth_dimension=True,
top_k_percent_pixels=0.5)(logits, labels)
self.assertAllClose(loss, 1.551429, atol=1e-4)
def testSegmentationLossGroundTruthIsMattingMap(self):
# [batch, height, width, num_layers]: [2, 3, 4, 1]
labels = tf.random.uniform([2, 3, 4, 1],
minval=0,
maxval=1,
dtype=tf.float32)
# [batch, height, width, num_classes]: [2, 3, 4, 2]
logits = tf.random.uniform([2, 3, 4, 2],
minval=-1,
maxval=1,
dtype=tf.float32)
loss = segmentation_losses.SegmentationLoss(
label_smoothing=0.,
class_weights=[],
ignore_label=255,
use_groundtruth_dimension=True,
use_binary_cross_entropy=False,
top_k_percent_pixels=1.)(logits, labels)
self.assertEqual(tf.rank(loss), 0)
if __name__ == '__main__':
tf.test.main()
| 3,650 | 37.840426 | 80 | py |
models | models-master/official/vision/losses/loss_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses utilities for detection models."""
import tensorflow as tf
def multi_level_flatten(multi_level_inputs, last_dim=None):
"""Flattens a multi-level input.
Args:
multi_level_inputs: Ordered Dict with level to [batch, d1, ..., dm].
last_dim: Whether the output should be [batch_size, None], or [batch_size,
None, last_dim]. Defaults to `None`.
Returns:
Concatenated output [batch_size, None], or [batch_size, None, dm]
"""
flattened_inputs = []
batch_size = None
for level in multi_level_inputs.keys():
single_input = multi_level_inputs[level]
if batch_size is None:
batch_size = single_input.shape[0] or tf.shape(single_input)[0]
if last_dim is not None:
flattened_input = tf.reshape(single_input, [batch_size, -1, last_dim])
else:
flattened_input = tf.reshape(single_input, [batch_size, -1])
flattened_inputs.append(flattened_input)
return tf.concat(flattened_inputs, axis=1)
| 1,569 | 35.511628 | 78 | py |
models | models-master/official/vision/losses/maskrcnn_losses.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses for maskrcnn model."""
# Import libraries
import tensorflow as tf
class RpnScoreLoss(object):
"""Region Proposal Network score loss function."""
def __init__(self, rpn_batch_size_per_im):
self._rpn_batch_size_per_im = rpn_batch_size_per_im
self._binary_crossentropy = tf.keras.losses.BinaryCrossentropy(
reduction=tf.keras.losses.Reduction.SUM, from_logits=True)
def __call__(self, score_outputs, labels):
"""Computes total RPN detection loss.
Computes total RPN detection loss including box and score from all levels.
Args:
score_outputs: an OrderDict with keys representing levels and values
representing scores in [batch_size, height, width, num_anchors].
labels: the dictionary that returned from dataloader that includes
ground-truth targets.
Returns:
rpn_score_loss: a scalar tensor representing total score loss.
"""
with tf.name_scope('rpn_loss'):
levels = sorted(score_outputs.keys())
score_losses = []
for level in levels:
score_losses.append(
self._rpn_score_loss(
score_outputs[level],
labels[level],
normalizer=tf.cast(
tf.shape(score_outputs[level])[0] *
self._rpn_batch_size_per_im,
dtype=score_outputs[level].dtype)))
# Sums per level losses to total loss.
return tf.math.add_n(score_losses)
def _rpn_score_loss(self, score_outputs, score_targets, normalizer=1.0):
"""Computes score loss."""
# score_targets has three values:
# (1) score_targets[i]=1, the anchor is a positive sample.
# (2) score_targets[i]=0, negative.
# (3) score_targets[i]=-1, the anchor is don't care (ignore).
with tf.name_scope('rpn_score_loss'):
mask = tf.math.logical_or(tf.math.equal(score_targets, 1),
tf.math.equal(score_targets, 0))
score_targets = tf.math.maximum(score_targets,
tf.zeros_like(score_targets))
score_targets = tf.expand_dims(score_targets, axis=-1)
score_outputs = tf.expand_dims(score_outputs, axis=-1)
score_loss = self._binary_crossentropy(
score_targets, score_outputs, sample_weight=mask)
score_loss /= normalizer
return score_loss
class RpnBoxLoss(object):
"""Region Proposal Network box regression loss function."""
def __init__(self, huber_loss_delta: float):
# The delta is typically around the mean value of regression target.
# for instances, the regression targets of 512x512 input with 6 anchors on
# P2-P6 pyramid is about [0.1, 0.1, 0.2, 0.2].
self._huber_loss = tf.keras.losses.Huber(
delta=huber_loss_delta, reduction=tf.keras.losses.Reduction.SUM)
def __call__(self, box_outputs, labels):
"""Computes total RPN detection loss.
Computes total RPN detection loss including box and score from all levels.
Args:
box_outputs: an OrderDict with keys representing levels and values
representing box regression targets in [batch_size, height, width,
num_anchors * 4].
labels: the dictionary that returned from dataloader that includes
ground-truth targets.
Returns:
rpn_box_loss: a scalar tensor representing total box regression loss.
"""
with tf.name_scope('rpn_loss'):
levels = sorted(box_outputs.keys())
box_losses = []
for level in levels:
box_losses.append(self._rpn_box_loss(box_outputs[level], labels[level]))
# Sum per level losses to total loss.
return tf.add_n(box_losses)
def _rpn_box_loss(self, box_outputs, box_targets, normalizer=1.0):
"""Computes box regression loss."""
with tf.name_scope('rpn_box_loss'):
_, height, width, num_anchors_vertices = box_targets.get_shape().as_list()
# (batch_size, height, width, num_anchors, 4)
reshaped_box_targets = tf.reshape(
box_targets, [-1, height, width, num_anchors_vertices // 4, 4])
# The box is valid if at least one of the ymin, xmin, ymax, ymax is not 0.
# (batch_size, height, width, num_anchors)
valid_mask = tf.reduce_any(
tf.math.abs(reshaped_box_targets) > 1e-6, axis=-1)
# (batch_size, height, width, num_anchors * 4)
valid_mask = tf.cast(
tf.repeat(valid_mask, 4, axis=-1), dtype=box_outputs.dtype)
# (batch_size, height, width, num_anchors * 4, 1)
box_targets = tf.expand_dims(box_targets, axis=-1)
# (batch_size, height, width, num_anchors * 4, 1)
box_outputs = tf.expand_dims(box_outputs, axis=-1)
box_loss = self._huber_loss(
box_targets, box_outputs, sample_weight=valid_mask)
# The loss is normalized by the sum of non-zero weights and additional
# normalizer provided by the function caller. Using + 0.01 here to avoid
# division by zero. For each replica, get the sum of non-zero masks. Then
# get the mean of sums from all replicas. Note there is an extra division
# by `num_replicas` in train_step(). So it is equivalent to normalizing
# the box loss by the global sum of non-zero masks.
replica_context = tf.distribute.get_replica_context()
valid_mask = tf.reduce_sum(valid_mask)
valid_mask_mean = replica_context.all_reduce(
tf.distribute.ReduceOp.MEAN, valid_mask
)
box_loss /= normalizer * (valid_mask_mean + 0.01)
return box_loss
class FastrcnnClassLoss(object):
"""Fast R-CNN classification loss function."""
def __init__(self,
use_binary_cross_entropy: bool = False,
top_k_percent: float = 1.0):
"""Initializes loss computation.
Args:
use_binary_cross_entropy: If true, uses binary cross entropy loss,
otherwise uses categorical cross entropy loss.
top_k_percent: a float, the value lies in [0.0, 1.0]. When its value < 1.,
only aggregate the top k percent of losses. This is useful for hard
example mining.
"""
self._use_binary_cross_entropy = use_binary_cross_entropy
self._top_k_percent = top_k_percent
def __call__(self, class_outputs, class_targets):
"""Computes the class loss (Fast-RCNN branch) of Mask-RCNN.
This function implements the classification loss of the Fast-RCNN.
The classification loss is categorical (or binary) cross entropy on all
RoIs.
Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/modeling/fast_rcnn_heads.py # pylint: disable=line-too-long
Args:
class_outputs: a float tensor representing the class prediction for each
box with a shape of [batch_size, num_boxes, num_classes].
class_targets: a float tensor representing the class label for each box
with a shape of [batch_size, num_boxes].
Returns:
a scalar tensor representing total class loss.
"""
with tf.name_scope('fast_rcnn_loss'):
num_classes = class_outputs.get_shape().as_list()[-1]
class_targets_one_hot = tf.one_hot(
tf.cast(class_targets, dtype=tf.int32),
num_classes,
dtype=class_outputs.dtype)
if self._use_binary_cross_entropy:
# (batch_size, num_boxes, num_classes)
cross_entropy_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=class_targets_one_hot, logits=class_outputs)
else:
# (batch_size, num_boxes)
cross_entropy_loss = tf.nn.softmax_cross_entropy_with_logits(
labels=class_targets_one_hot, logits=class_outputs)
if self._top_k_percent < 1.0:
return self.aggregate_loss_top_k(cross_entropy_loss)
else:
return tf.reduce_mean(cross_entropy_loss)
def aggregate_loss_top_k(self, loss, num_valid_values=None):
"""Aggregate the top-k the greatest loss values.
Args:
loss: a float tensor in shape (batch_size, num_boxes) or (batch_size,
num_boxes, num_classes) which stores the loss values.
num_valid_values: the number of loss values which are not ignored. The
default value is None, which means all the loss values are valid.
Returns:
A 0-D float which stores the overall loss of the batch.
"""
loss = tf.reshape(loss, shape=[-1])
top_k_num = tf.cast(
self._top_k_percent * tf.size(loss, out_type=tf.float32), tf.int32)
top_k_losses, _ = tf.math.top_k(loss, k=top_k_num)
normalizer = tf.cast(top_k_num, loss.dtype)
if num_valid_values is not None:
normalizer = tf.minimum(normalizer, tf.cast(num_valid_values, loss.dtype))
return tf.reduce_sum(top_k_losses) / (normalizer + 1e-5)
class FastrcnnBoxLoss(object):
"""Fast R-CNN box regression loss function."""
def __init__(self,
huber_loss_delta: float,
class_agnostic_bbox_pred: bool = False):
"""Initiate Faster RCNN box loss.
Args:
huber_loss_delta: the delta is typically around the mean value of
regression target. For instances, the regression targets of 512x512
input with 6 anchors on P2-P6 pyramid is about [0.1, 0.1, 0.2, 0.2].
class_agnostic_bbox_pred: if True, class agnostic bounding box prediction
is performed.
"""
self._huber_loss = tf.keras.losses.Huber(
delta=huber_loss_delta, reduction=tf.keras.losses.Reduction.SUM)
self._class_agnostic_bbox_pred = class_agnostic_bbox_pred
def __call__(self, box_outputs, class_targets, box_targets):
"""Computes the box loss (Fast-RCNN branch) of Mask-RCNN.
This function implements the box regression loss of the Fast-RCNN. As the
`box_outputs` produces `num_classes` boxes for each RoI, the reference model
expands `box_targets` to match the shape of `box_outputs` and selects only
the target that the RoI has a maximum overlap. (Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/roi_data/fast_rcnn.py) # pylint: disable=line-too-long
Instead, this function selects the `box_outputs` by the `class_targets` so
that it doesn't expand `box_targets`.
The box loss is smooth L1-loss on only positive samples of RoIs.
Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/modeling/fast_rcnn_heads.py # pylint: disable=line-too-long
Args:
box_outputs: a float tensor representing the box prediction for each box
with a shape of [batch_size, num_boxes, num_classes * 4].
class_targets: a float tensor representing the class label for each box
with a shape of [batch_size, num_boxes].
box_targets: a float tensor representing the box label for each box
with a shape of [batch_size, num_boxes, 4].
Returns:
box_loss: a scalar tensor representing total box regression loss.
"""
with tf.name_scope('fast_rcnn_loss'):
class_targets = tf.cast(class_targets, dtype=tf.int32)
if not self._class_agnostic_bbox_pred:
box_outputs = self._assign_class_targets(box_outputs, class_targets)
return self._fast_rcnn_box_loss(box_outputs, box_targets, class_targets)
def _assign_class_targets(self, box_outputs, class_targets):
"""Selects the box from `box_outputs` based on `class_targets`, with which the box has the maximum overlap."""
_, num_rois, num_class_specific_boxes = box_outputs.get_shape().as_list()
num_classes = num_class_specific_boxes // 4
box_outputs = tf.reshape(box_outputs, [-1, num_rois, num_classes, 4])
class_targets_ont_hot = tf.one_hot(
class_targets, num_classes, dtype=box_outputs.dtype
)
return tf.einsum('bnij,bni->bnj', box_outputs, class_targets_ont_hot)
def _fast_rcnn_box_loss(self, box_outputs, box_targets, class_targets,
normalizer=1.0):
"""Computes box regression loss."""
with tf.name_scope('fast_rcnn_box_loss'):
mask = tf.tile(
tf.expand_dims(tf.greater(class_targets, 0), axis=2), [1, 1, 4])
mask = tf.cast(mask, dtype=box_outputs.dtype)
box_targets = tf.expand_dims(box_targets, axis=-1)
box_outputs = tf.expand_dims(box_outputs, axis=-1)
box_loss = self._huber_loss(box_targets, box_outputs, sample_weight=mask)
# The loss is normalized by the number of ones in mask,
# additional normalizer provided by the user and using 0.01 here to avoid
# division by 0. For each replica, get the sum of non-zero masks. Then
# get the mean of sums from all replicas. Note there is an extra division
# by `num_replicas` in train_step(). So it is equivalent to normalizing
# the box loss by the global sum of non-zero masks.
replica_context = tf.distribute.get_replica_context()
mask = tf.reduce_sum(mask)
mask_mean = replica_context.all_reduce(
tf.distribute.ReduceOp.MEAN, mask
)
box_loss /= normalizer * (mask_mean + 0.01)
return box_loss
class MaskrcnnLoss(object):
"""Mask R-CNN instance segmentation mask loss function."""
def __init__(self):
self._binary_crossentropy = tf.keras.losses.BinaryCrossentropy(
reduction=tf.keras.losses.Reduction.SUM, from_logits=True)
def __call__(self, mask_outputs, mask_targets, select_class_targets):
"""Computes the mask loss of Mask-RCNN.
This function implements the mask loss of Mask-RCNN. As the `mask_outputs`
produces `num_classes` masks for each RoI, the reference model expands
`mask_targets` to match the shape of `mask_outputs` and selects only the
target that the RoI has a maximum overlap. (Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/roi_data/mask_rcnn.py) # pylint: disable=line-too-long
Instead, this implementation selects the `mask_outputs` by the
`class_targets` so that it doesn't expand `mask_targets`. Note that the
selection logic is done in the post-processing of mask_rcnn_fn in
mask_rcnn_architecture.py.
Args:
mask_outputs: a float tensor representing the prediction for each mask,
with a shape of
[batch_size, num_masks, mask_height, mask_width].
mask_targets: a float tensor representing the binary mask of ground truth
labels for each mask with a shape of
[batch_size, num_masks, mask_height, mask_width].
select_class_targets: a tensor with a shape of [batch_size, num_masks],
representing the foreground mask targets.
Returns:
mask_loss: a float tensor representing total mask loss.
"""
with tf.name_scope('mask_rcnn_loss'):
_, _, mask_height, mask_width = mask_outputs.get_shape().as_list()
weights = tf.tile(
tf.greater(select_class_targets, 0)[:, :, tf.newaxis, tf.newaxis],
[1, 1, mask_height, mask_width],
)
weights = tf.cast(weights, dtype=mask_outputs.dtype)
mask_targets = tf.expand_dims(mask_targets, axis=-1)
mask_outputs = tf.expand_dims(mask_outputs, axis=-1)
mask_loss = self._binary_crossentropy(mask_targets, mask_outputs,
sample_weight=weights)
# For each replica, get the sum of non-zero weights. Then get the mean of
# sums from all replicas. Note there is an extra division by
# `num_replicas` in train_step(). So it is equivalent to normalizing the
# mask loss by the global sum of non-zero weights.
replica_context = tf.distribute.get_replica_context()
weights = tf.reduce_sum(weights)
weights_mean = replica_context.all_reduce(
tf.distribute.ReduceOp.MEAN, weights
)
# The loss is normalized by the number of 1's in weights and
# + 0.01 is used to avoid division by zero.
return mask_loss / (weights_mean + 0.01)
| 16,390 | 43.061828 | 186 | py |
models | models-master/official/vision/losses/segmentation_losses.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses used for segmentation models."""
import tensorflow as tf
from official.modeling import tf_utils
from official.vision.dataloaders import utils
EPSILON = 1e-5
class SegmentationLoss:
"""Semantic segmentation loss."""
def __init__(self,
label_smoothing,
class_weights,
ignore_label,
use_groundtruth_dimension,
use_binary_cross_entropy=False,
top_k_percent_pixels=1.0,
gt_is_matting_map=False):
"""Initializes `SegmentationLoss`.
Args:
label_smoothing: A float, if > 0., smooth out one-hot probability by
spreading the amount of probability to all other label classes.
class_weights: A float list containing the weight of each class.
ignore_label: An integer specifying the ignore label.
use_groundtruth_dimension: A boolean, whether to resize the output to
match the dimension of the ground truth.
use_binary_cross_entropy: A boolean, if true, use binary cross entropy
loss, otherwise, use categorical cross entropy.
top_k_percent_pixels: A float, the value lies in [0.0, 1.0]. When its
value < 1., only compute the loss for the top k percent pixels. This is
useful for hard pixel mining.
gt_is_matting_map: If or not the groundtruth mask is a matting map. Note
that the matting map is only supported for 2 class segmentation.
"""
self._label_smoothing = label_smoothing
self._class_weights = class_weights
self._ignore_label = ignore_label
self._use_groundtruth_dimension = use_groundtruth_dimension
self._use_binary_cross_entropy = use_binary_cross_entropy
self._top_k_percent_pixels = top_k_percent_pixels
self._gt_is_matting_map = gt_is_matting_map
def __call__(self, logits, labels, **kwargs):
"""Computes `SegmentationLoss`.
Args:
logits: A float tensor in shape (batch_size, height, width, num_classes)
which is the output of the network.
labels: A tensor in shape (batch_size, height, width, num_layers), which
is the label masks of the ground truth. The num_layers can be > 1 if the
pixels are labeled as multiple classes.
**kwargs: additional keyword arguments.
Returns:
A 0-D float which stores the overall loss of the batch.
"""
_, height, width, num_classes = logits.get_shape().as_list()
output_dtype = logits.dtype
num_layers = labels.get_shape().as_list()[-1]
if not self._use_binary_cross_entropy:
if num_layers > 1:
raise ValueError(
'Groundtruth mask must have only 1 layer if using categorical'
'cross entropy, but got {} layers.'.format(num_layers))
if self._gt_is_matting_map:
if num_classes != 2:
raise ValueError(
'Groundtruth matting map only supports 2 classes, but got {} '
'classes.'.format(num_classes))
if num_layers > 1:
raise ValueError(
'Groundtruth matting map must have only 1 layer, but got {} '
'layers.'.format(num_layers))
class_weights = (
self._class_weights if self._class_weights else [1] * num_classes)
if num_classes != len(class_weights):
raise ValueError(
'Length of class_weights should be {}'.format(num_classes))
class_weights = tf.constant(class_weights, dtype=output_dtype)
if not self._gt_is_matting_map:
labels = tf.cast(labels, tf.int32)
if self._use_groundtruth_dimension:
# TODO(arashwan): Test using align corners to match deeplab alignment.
logits = tf.image.resize(
logits, tf.shape(labels)[1:3], method=tf.image.ResizeMethod.BILINEAR)
else:
labels = tf.image.resize(
labels, (height, width),
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
valid_mask = tf.not_equal(tf.cast(labels, tf.int32), self._ignore_label)
# (batch_size, height, width, num_classes)
labels_with_prob = self.get_labels_with_prob(logits, labels, valid_mask,
**kwargs)
# (batch_size, height, width)
valid_mask = tf.cast(tf.reduce_any(valid_mask, axis=-1), dtype=output_dtype)
if self._use_binary_cross_entropy:
# (batch_size, height, width, num_classes)
cross_entropy_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels_with_prob, logits=logits)
# (batch_size, height, width, num_classes)
cross_entropy_loss *= class_weights
num_valid_values = tf.reduce_sum(valid_mask) * tf.cast(
num_classes, output_dtype)
# (batch_size, height, width, num_classes)
cross_entropy_loss *= valid_mask[..., tf.newaxis]
else:
# (batch_size, height, width)
cross_entropy_loss = tf.nn.softmax_cross_entropy_with_logits(
labels=labels_with_prob, logits=logits)
# If groundtruth is matting map, binarize the value to create the weight
# mask
if self._gt_is_matting_map:
labels = utils.binarize_matting_map(labels)
# (batch_size, height, width)
weight_mask = tf.einsum(
'...y,y->...',
tf.one_hot(
tf.cast(tf.squeeze(labels, axis=-1), tf.int32),
depth=num_classes,
dtype=output_dtype), class_weights)
cross_entropy_loss *= weight_mask
num_valid_values = tf.reduce_sum(valid_mask)
cross_entropy_loss *= valid_mask
if self._top_k_percent_pixels < 1.0:
return self.aggregate_loss_top_k(cross_entropy_loss, num_valid_values)
else:
return tf.reduce_sum(cross_entropy_loss) / (num_valid_values + EPSILON)
def get_labels_with_prob(self, logits, labels, valid_mask, **unused_kwargs):
"""Get a tensor representing the probability of each class for each pixel.
This method can be overridden in subclasses for customizing loss function.
Args:
logits: A float tensor in shape (batch_size, height, width, num_classes)
which is the output of the network.
labels: A tensor in shape (batch_size, height, width, num_layers), which
is the label masks of the ground truth. The num_layers can be > 1 if the
pixels are labeled as multiple classes.
valid_mask: A bool tensor in shape (batch_size, height, width, num_layers)
which indicates the ignored labels in each ground truth layer.
**unused_kwargs: Unused keyword arguments.
Returns:
A float tensor in shape (batch_size, height, width, num_classes).
"""
num_classes = logits.get_shape().as_list()[-1]
if self._gt_is_matting_map:
# (batch_size, height, width, num_classes=2)
train_labels = tf.concat([1 - labels, labels], axis=-1)
else:
labels = tf.cast(labels, tf.int32)
# Assign pixel with ignore label to class -1, which will be ignored by
# tf.one_hot operation.
# (batch_size, height, width, num_masks)
labels = tf.where(valid_mask, labels, -tf.ones_like(labels))
if self._use_binary_cross_entropy:
# (batch_size, height, width, num_masks, num_classes)
one_hot_labels_per_mask = tf.one_hot(
labels,
depth=num_classes,
on_value=True,
off_value=False,
dtype=tf.bool,
axis=-1)
# Aggregate all one-hot labels to get a binary mask in shape
# (batch_size, height, width, num_classes), which represents all the
# classes that a pixel is labeled as.
# For example, if a pixel is labeled as "window" (id=1) and also being a
# part of the "building" (id=3), then its train_labels are [0,1,0,1].
train_labels = tf.cast(
tf.reduce_any(one_hot_labels_per_mask, axis=-2), dtype=logits.dtype)
else:
# (batch_size, height, width, num_classes)
train_labels = tf.one_hot(
tf.squeeze(labels, axis=-1), depth=num_classes, dtype=logits.dtype)
return train_labels * (
1 - self._label_smoothing) + self._label_smoothing / num_classes
def aggregate_loss_top_k(self, pixelwise_loss, num_valid_pixels=None):
"""Aggregate the top-k greatest pixelwise loss.
Args:
pixelwise_loss: a float tensor in shape (batch_size, height, width) which
stores the loss of each pixel.
num_valid_pixels: the number of pixels which are not ignored. If None, all
the pixels are valid.
Returns:
A 0-D float which stores the overall loss of the batch.
"""
pixelwise_loss = tf.reshape(pixelwise_loss, shape=[-1])
top_k_pixels = tf.cast(
self._top_k_percent_pixels
* tf.cast(tf.size(pixelwise_loss), tf.float32),
tf.int32,
)
top_k_losses, _ = tf.math.top_k(pixelwise_loss, k=top_k_pixels)
normalizer = tf.cast(top_k_pixels, top_k_losses.dtype)
if num_valid_pixels is not None:
normalizer = tf.minimum(normalizer,
tf.cast(num_valid_pixels, top_k_losses.dtype))
return tf.reduce_sum(top_k_losses) / (normalizer + EPSILON)
def get_actual_mask_scores(logits, labels, ignore_label):
"""Gets actual mask scores."""
_, height, width, num_classes = logits.get_shape().as_list()
batch_size = tf.shape(logits)[0]
logits = tf.stop_gradient(logits)
labels = tf.image.resize(
labels, (height, width), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
predicted_labels = tf.argmax(logits, -1, output_type=tf.int32)
flat_predictions = tf.reshape(predicted_labels, [batch_size, -1])
flat_labels = tf.cast(tf.reshape(labels, [batch_size, -1]), tf.int32)
one_hot_predictions = tf.one_hot(
flat_predictions, num_classes, on_value=True, off_value=False)
one_hot_labels = tf.one_hot(
flat_labels, num_classes, on_value=True, off_value=False)
keep_mask = tf.not_equal(flat_labels, ignore_label)
keep_mask = tf.expand_dims(keep_mask, 2)
overlap = tf.logical_and(one_hot_predictions, one_hot_labels)
overlap = tf.logical_and(overlap, keep_mask)
overlap = tf.reduce_sum(tf.cast(overlap, tf.float32), axis=1)
union = tf.logical_or(one_hot_predictions, one_hot_labels)
union = tf.logical_and(union, keep_mask)
union = tf.reduce_sum(tf.cast(union, tf.float32), axis=1)
actual_scores = tf.divide(overlap, tf.maximum(union, EPSILON))
return actual_scores
class MaskScoringLoss:
"""Mask Scoring loss."""
def __init__(self, ignore_label):
self._ignore_label = ignore_label
self._mse_loss = tf.keras.losses.MeanSquaredError(
reduction=tf.keras.losses.Reduction.NONE)
def __call__(self, predicted_scores, logits, labels):
actual_scores = get_actual_mask_scores(logits, labels, self._ignore_label)
loss = tf_utils.safe_mean(self._mse_loss(actual_scores, predicted_scores))
return loss
| 11,414 | 40.358696 | 80 | py |
models | models-master/official/vision/losses/focal_loss.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses used for detection models."""
import tensorflow as tf
class FocalLoss(tf.keras.losses.Loss):
"""Implements a Focal loss for classification problems.
Reference:
[Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002).
"""
def __init__(self,
alpha,
gamma,
reduction=tf.keras.losses.Reduction.AUTO,
name=None):
"""Initializes `FocalLoss`.
Args:
alpha: The `alpha` weight factor for binary class imbalance.
gamma: The `gamma` focusing parameter to re-weight loss.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the op. Defaults to 'retinanet_class_loss'.
"""
self._alpha = alpha
self._gamma = gamma
super(FocalLoss, self).__init__(reduction=reduction, name=name)
def call(self, y_true, y_pred):
"""Invokes the `FocalLoss`.
Args:
y_true: A tensor of size [batch, num_anchors, num_classes]
y_pred: A tensor of size [batch, num_anchors, num_classes]
Returns:
Summed loss float `Tensor`.
"""
with tf.name_scope('focal_loss'):
y_true = tf.cast(y_true, dtype=tf.float32)
y_pred = tf.cast(y_pred, dtype=tf.float32)
positive_label_mask = tf.equal(y_true, 1.0)
cross_entropy = (
tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=y_pred))
probs = tf.sigmoid(y_pred)
probs_gt = tf.where(positive_label_mask, probs, 1.0 - probs)
# With small gamma, the implementation could produce NaN during back prop.
modulator = tf.pow(1.0 - probs_gt, self._gamma)
loss = modulator * cross_entropy
weighted_loss = tf.where(positive_label_mask, self._alpha * loss,
(1.0 - self._alpha) * loss)
return weighted_loss
def get_config(self):
config = {
'alpha': self._alpha,
'gamma': self._gamma,
}
base_config = super(FocalLoss, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 3,229 | 37 | 80 | py |
models | models-master/official/vision/losses/retinanet_losses.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses used for detection models."""
# Import libraries
import tensorflow as tf
def focal_loss(logits, targets, alpha, gamma):
"""Compute the focal loss between `logits` and the golden `target` values.
Focal loss = -(1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
Args:
logits: A float32 tensor of size
[batch, d_1, ..., d_k, n_classes].
targets: A float32 tensor of size
[batch, d_1, ..., d_k, n_classes].
alpha: A float32 scalar multiplying alpha to the loss from positive examples
and (1-alpha) to the loss from negative examples.
gamma: A float32 scalar modulating loss from hard and easy examples.
Returns:
loss: A float32 Tensor of size
[batch, d_1, ..., d_k, n_classes] representing
normalized loss on the prediction map.
"""
with tf.name_scope('focal_loss'):
positive_label_mask = tf.equal(targets, 1.0)
cross_entropy = (
tf.nn.sigmoid_cross_entropy_with_logits(labels=targets, logits=logits))
probs = tf.sigmoid(logits)
probs_gt = tf.where(positive_label_mask, probs, 1.0 - probs)
# With small gamma, the implementation could produce NaN during back prop.
modulator = tf.pow(1.0 - probs_gt, gamma)
loss = modulator * cross_entropy
weighted_loss = tf.where(positive_label_mask, alpha * loss,
(1.0 - alpha) * loss)
return weighted_loss
class FocalLoss(tf.keras.losses.Loss):
"""Implements a Focal loss for classification problems.
Reference:
[Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002).
"""
def __init__(self,
alpha,
gamma,
num_classes,
reduction=tf.keras.losses.Reduction.AUTO,
name=None):
"""Initializes `FocalLoss`.
Args:
alpha: The `alpha` weight factor for binary class imbalance.
gamma: The `gamma` focusing parameter to re-weight loss.
num_classes: Number of foreground classes.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the op. Defaults to 'retinanet_class_loss'.
"""
self._num_classes = num_classes
self._alpha = alpha
self._gamma = gamma
super(FocalLoss, self).__init__(reduction=reduction, name=name)
def call(self, y_true, y_pred):
"""Invokes the `FocalLoss`.
Args:
y_true: Ordered Dict with level to [batch, height, width, num_anchors].
for example,
{3: tf.Tensor(shape=[32, 512, 512, 9], dtype=tf.float32),
4: tf.Tensor([shape=32, 256, 256, 9, dtype=tf.float32])}
y_pred: Ordered Dict with level to [batch, height, width, num_anchors *
num_classes]. for example,
{3: tf.Tensor(shape=[32, 512, 512, 9], dtype=tf.int64),
4: tf.Tensor(shape=[32, 256, 256, 9 * 21], dtype=tf.int64)}
Returns:
Summed loss float `Tensor`.
"""
flattened_cls_outputs = []
flattened_labels = []
batch_size = None
for level in y_pred.keys():
cls_output = y_pred[level]
label = y_true[level]
if batch_size is None:
batch_size = cls_output.shape[0] or tf.shape(cls_output)[0]
flattened_cls_outputs.append(
tf.reshape(cls_output, [batch_size, -1, self._num_classes]))
flattened_labels.append(tf.reshape(label, [batch_size, -1]))
cls_outputs = tf.concat(flattened_cls_outputs, axis=1)
labels = tf.concat(flattened_labels, axis=1)
cls_targets_one_hot = tf.one_hot(labels, self._num_classes)
return focal_loss(
tf.cast(cls_outputs, dtype=tf.float32),
tf.cast(cls_targets_one_hot, dtype=tf.float32), self._alpha,
self._gamma)
def get_config(self):
config = {
'alpha': self._alpha,
'gamma': self._gamma,
'num_classes': self._num_classes,
}
base_config = super(FocalLoss, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RetinanetBoxLoss(tf.keras.losses.Loss):
"""RetinaNet box Huber loss."""
def __init__(self,
delta,
reduction=tf.keras.losses.Reduction.AUTO,
name=None):
"""Initializes `RetinanetBoxLoss`.
Args:
delta: A float, the point where the Huber loss function changes from a
quadratic to linear.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the op. Defaults to 'retinanet_class_loss'.
"""
self._huber_loss = tf.keras.losses.Huber(
delta=delta, reduction=tf.keras.losses.Reduction.NONE)
self._delta = delta
super(RetinanetBoxLoss, self).__init__(reduction=reduction, name=name)
def call(self, y_true, y_pred):
"""Computes box detection loss.
Computes total detection loss including box and class loss from all levels.
Args:
y_true: Ordered Dict with level to [batch, height, width,
num_anchors * 4] for example,
{3: tf.Tensor(shape=[32, 512, 512, 9 * 4], dtype=tf.float32),
4: tf.Tensor([shape=32, 256, 256, 9 * 4, dtype=tf.float32])}
y_pred: Ordered Dict with level to [batch, height, width,
num_anchors * 4]. for example,
{3: tf.Tensor(shape=[32, 512, 512, 9 * 4], dtype=tf.int64),
4: tf.Tensor(shape=[32, 256, 256, 9 * 4], dtype=tf.int64)}
Returns:
an integer tensor representing total box regression loss.
"""
# Sums all positives in a batch for normalization and avoids zero
# num_positives_sum, which would lead to inf loss during training
flattened_box_outputs = []
flattened_labels = []
batch_size = None
for level in y_pred.keys():
box_output = y_pred[level]
label = y_true[level]
if batch_size is None:
batch_size = box_output.shape[0] or tf.shape(box_output)[0]
flattened_box_outputs.append(tf.reshape(box_output, [batch_size, -1, 4]))
flattened_labels.append(tf.reshape(label, [batch_size, -1, 4]))
box_outputs = tf.concat(flattened_box_outputs, axis=1)
labels = tf.concat(flattened_labels, axis=1)
loss = self._huber_loss(labels, box_outputs)
return loss
def get_config(self):
config = {
'delta': self._delta,
}
base_config = super(RetinanetBoxLoss, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 8,097 | 38.120773 | 80 | py |
models | models-master/official/vision/losses/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/recommendation/data_preprocessing.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocess dataset and construct any necessary artifacts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import time
import timeit
import typing
from typing import Dict, Text, Tuple
from absl import logging
import numpy as np
import pandas as pd
import tensorflow as tf
from official.recommendation import constants as rconst
from official.recommendation import data_pipeline
from official.recommendation import movielens
_EXPECTED_CACHE_KEYS = (rconst.TRAIN_USER_KEY, rconst.TRAIN_ITEM_KEY,
rconst.EVAL_USER_KEY, rconst.EVAL_ITEM_KEY,
rconst.USER_MAP, rconst.ITEM_MAP)
def read_dataframe(
raw_rating_path: Text
) -> Tuple[Dict[int, int], Dict[int, int], pd.DataFrame]:
"""Read in data CSV, and output DataFrame for downstream processing.
This function reads in the raw CSV of positive items, and performs three
preprocessing transformations:
1) Filter out all users who have not rated at least a certain number
of items. (Typically 20 items)
2) Zero index the users and items such that the largest user_id is
`num_users - 1` and the largest item_id is `num_items - 1`
3) Sort the dataframe by user_id, with timestamp as a secondary sort key.
This allows the dataframe to be sliced by user in-place, and for the last
item to be selected simply by calling the `-1` index of a user's slice.
Args:
raw_rating_path: The path to the CSV which contains the raw dataset.
Returns:
A dict mapping raw user IDs to regularized user IDs, a dict mapping raw
item IDs to regularized item IDs, and a filtered, zero-index remapped,
sorted dataframe.
"""
with tf.io.gfile.GFile(raw_rating_path) as f:
df = pd.read_csv(f)
# Get the info of users who have more than 20 ratings on items
grouped = df.groupby(movielens.USER_COLUMN)
df = grouped.filter(
lambda x: len(x) >= rconst.MIN_NUM_RATINGS) # type: pd.DataFrame
original_users = df[movielens.USER_COLUMN].unique()
original_items = df[movielens.ITEM_COLUMN].unique()
# Map the ids of user and item to 0 based index for following processing
logging.info("Generating user_map and item_map...")
user_map = {user: index for index, user in enumerate(original_users)}
item_map = {item: index for index, item in enumerate(original_items)}
df[movielens.USER_COLUMN] = df[movielens.USER_COLUMN].apply(
lambda user: user_map[user])
df[movielens.ITEM_COLUMN] = df[movielens.ITEM_COLUMN].apply(
lambda item: item_map[item])
num_users = len(original_users)
num_items = len(original_items)
assert num_users <= np.iinfo(rconst.USER_DTYPE).max
assert num_items <= np.iinfo(rconst.ITEM_DTYPE).max
assert df[movielens.USER_COLUMN].max() == num_users - 1
assert df[movielens.ITEM_COLUMN].max() == num_items - 1
# This sort is used to shard the dataframe by user, and later to select
# the last item for a user to be used in validation.
logging.info("Sorting by user, timestamp...")
# This sort is equivalent to
# df.sort_values([movielens.USER_COLUMN, movielens.TIMESTAMP_COLUMN],
# inplace=True)
# except that the order of items with the same user and timestamp are
# sometimes different. For some reason, this sort results in a better
# hit-rate during evaluation, matching the performance of the MLPerf
# reference implementation.
df.sort_values(by=movielens.TIMESTAMP_COLUMN, inplace=True)
df.sort_values([movielens.USER_COLUMN, movielens.TIMESTAMP_COLUMN],
inplace=True,
kind="mergesort")
# The dataframe does not reconstruct indices in the sort or filter steps.
return user_map, item_map, df.reset_index()
def _filter_index_sort(raw_rating_path: Text,
cache_path: Text) -> Tuple[pd.DataFrame, bool]:
"""Read in data CSV, and output structured data.
This function reads in the raw CSV of positive items, and performs three
preprocessing transformations:
1) Filter out all users who have not rated at least a certain number
of items. (Typically 20 items)
2) Zero index the users and items such that the largest user_id is
`num_users - 1` and the largest item_id is `num_items - 1`
3) Sort the dataframe by user_id, with timestamp as a secondary sort key.
This allows the dataframe to be sliced by user in-place, and for the last
item to be selected simply by calling the `-1` index of a user's slice.
While all of these transformations are performed by Pandas (and are therefore
single-threaded), they only take ~2 minutes, and the overhead to apply a
MapReduce pattern to parallel process the dataset adds significant complexity
for no computational gain. For a larger dataset parallelizing this
preprocessing could yield speedups. (Also, this preprocessing step is only
performed once for an entire run.
Args:
raw_rating_path: The path to the CSV which contains the raw dataset.
cache_path: The path to the file where results of this function are saved.
Returns:
A filtered, zero-index remapped, sorted dataframe, a dict mapping raw user
IDs to regularized user IDs, and a dict mapping raw item IDs to regularized
item IDs.
"""
valid_cache = tf.io.gfile.exists(cache_path)
if valid_cache:
with tf.io.gfile.GFile(cache_path, "rb") as f:
cached_data = pickle.load(f)
# (nnigania)disabled this check as the dataset is not expected to change
# cache_age = time.time() - cached_data.get("create_time", 0)
# if cache_age > rconst.CACHE_INVALIDATION_SEC:
# valid_cache = False
for key in _EXPECTED_CACHE_KEYS:
if key not in cached_data:
valid_cache = False
if not valid_cache:
logging.info("Removing stale raw data cache file.")
tf.io.gfile.remove(cache_path)
if valid_cache:
data = cached_data
else:
user_map, item_map, df = read_dataframe(raw_rating_path)
grouped = df.groupby(movielens.USER_COLUMN, group_keys=False)
eval_df, train_df = grouped.tail(1), grouped.apply(lambda x: x.iloc[:-1])
data = {
rconst.TRAIN_USER_KEY:
train_df[movielens.USER_COLUMN].values.astype(rconst.USER_DTYPE),
rconst.TRAIN_ITEM_KEY:
train_df[movielens.ITEM_COLUMN].values.astype(rconst.ITEM_DTYPE),
rconst.EVAL_USER_KEY:
eval_df[movielens.USER_COLUMN].values.astype(rconst.USER_DTYPE),
rconst.EVAL_ITEM_KEY:
eval_df[movielens.ITEM_COLUMN].values.astype(rconst.ITEM_DTYPE),
rconst.USER_MAP:
user_map,
rconst.ITEM_MAP:
item_map,
"create_time":
time.time(),
}
logging.info("Writing raw data cache.")
with tf.io.gfile.GFile(cache_path, "wb") as f:
pickle.dump(data, f, protocol=4)
# TODO(robieta): MLPerf cache clear.
return data, valid_cache
def instantiate_pipeline(dataset,
data_dir,
params,
constructor_type=None,
deterministic=False,
epoch_dir=None,
generate_data_offline=False):
# type: (str, str, dict, typing.Optional[str], bool, typing.Optional[str], bool) -> (int, int, data_pipeline.BaseDataConstructor)
"""Load and digest data CSV into a usable form.
Args:
dataset: The name of the dataset to be used.
data_dir: The root directory of the dataset.
params: dict of parameters for the run.
constructor_type: The name of the constructor subclass that should be used
for the input pipeline.
deterministic: Tell the data constructor to produce deterministically.
epoch_dir: Directory in which to store the training epochs.
generate_data_offline: Boolean, whether current pipeline is done offline or
while training.
"""
logging.info("Beginning data preprocessing.")
st = timeit.default_timer()
raw_rating_path = os.path.join(data_dir, dataset, movielens.RATINGS_FILE)
cache_path = os.path.join(data_dir, dataset, rconst.RAW_CACHE_FILE)
raw_data, _ = _filter_index_sort(raw_rating_path, cache_path)
user_map, item_map = raw_data["user_map"], raw_data["item_map"]
num_users, num_items = movielens.DATASET_TO_NUM_USERS_AND_ITEMS[dataset]
if num_users != len(user_map):
raise ValueError("Expected to find {} users, but found {}".format(
num_users, len(user_map)))
if num_items != len(item_map):
raise ValueError("Expected to find {} items, but found {}".format(
num_items, len(item_map)))
producer = data_pipeline.get_constructor(constructor_type or "materialized")(
maximum_number_epochs=params["train_epochs"],
num_users=num_users,
num_items=num_items,
user_map=user_map,
item_map=item_map,
train_pos_users=raw_data[rconst.TRAIN_USER_KEY],
train_pos_items=raw_data[rconst.TRAIN_ITEM_KEY],
train_batch_size=params["batch_size"],
batches_per_train_step=params["batches_per_step"],
num_train_negatives=params["num_neg"],
eval_pos_users=raw_data[rconst.EVAL_USER_KEY],
eval_pos_items=raw_data[rconst.EVAL_ITEM_KEY],
eval_batch_size=params["eval_batch_size"],
batches_per_eval_step=params["batches_per_step"],
stream_files=params["stream_files"],
deterministic=deterministic,
epoch_dir=epoch_dir,
create_data_offline=generate_data_offline)
run_time = timeit.default_timer() - st
logging.info(
"Data preprocessing complete. Time: {:.1f} sec.".format(run_time))
print(producer)
return num_users, num_items, producer
| 10,308 | 37.755639 | 131 | py |
models | models-master/official/recommendation/constants.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Central location for NCF specific values."""
import sys
import numpy as np
from official.recommendation import movielens
# ==============================================================================
# == Main Thread Data Processing ===============================================
# ==============================================================================
# Keys for data shards
TRAIN_USER_KEY = "train_{}".format(movielens.USER_COLUMN)
TRAIN_ITEM_KEY = "train_{}".format(movielens.ITEM_COLUMN)
TRAIN_LABEL_KEY = "train_labels"
MASK_START_INDEX = "mask_start_index"
VALID_POINT_MASK = "valid_point_mask"
EVAL_USER_KEY = "eval_{}".format(movielens.USER_COLUMN)
EVAL_ITEM_KEY = "eval_{}".format(movielens.ITEM_COLUMN)
USER_MAP = "user_map"
ITEM_MAP = "item_map"
USER_DTYPE = np.int32
ITEM_DTYPE = np.int32
# In both datasets, each user has at least 20 ratings.
MIN_NUM_RATINGS = 20
# The number of negative examples attached with a positive example
# when performing evaluation.
NUM_EVAL_NEGATIVES = 999
# keys for evaluation metrics
TOP_K = 10 # Top-k list for evaluation
HR_KEY = "HR"
NDCG_KEY = "NDCG"
DUPLICATE_MASK = "duplicate_mask"
# Metric names
HR_METRIC_NAME = "HR_METRIC"
NDCG_METRIC_NAME = "NDCG_METRIC"
# Trying to load a cache created in py2 when running in py3 will cause an
# error due to differences in unicode handling.
RAW_CACHE_FILE = "raw_data_cache_py{}.pickle".format(sys.version_info[0])
CACHE_INVALIDATION_SEC = 3600 * 24
# ==============================================================================
# == Data Generation ===========================================================
# ==============================================================================
CYCLES_TO_BUFFER = 3 # The number of train cycles worth of data to "run ahead"
# of the main training loop.
# Number of batches to run per epoch when using synthetic data. At high batch
# sizes, we run for more batches than with real data, which is good since
# running more batches reduces noise when measuring the average batches/second.
SYNTHETIC_BATCHES_PER_EPOCH = 2000
# Only used when StreamingFilesDataset is used.
NUM_FILE_SHARDS = 16
TRAIN_FOLDER_TEMPLATE = "training_cycle_{}"
EVAL_FOLDER = "eval_data"
SHARD_TEMPLATE = "shard_{}.tfrecords"
| 2,877 | 34.975 | 80 | py |
models | models-master/official/recommendation/neumf_model.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines NeuMF model for NCF framework.
Some abbreviations used in the code base:
NeuMF: Neural Matrix Factorization
NCF: Neural Collaborative Filtering
GMF: Generalized Matrix Factorization
MLP: Multi-Layer Perceptron
GMF applies a linear kernel to model the latent feature interactions, and MLP
uses a nonlinear kernel to learn the interaction function from data. NeuMF model
is a fused model of GMF and MLP to better model the complex user-item
interactions, and unifies the strengths of linearity of MF and non-linearity of
MLP for modeling the user-item latent structures.
In NeuMF model, it allows GMF and MLP to learn separate embeddings, and combine
the two models by concatenating their last hidden layer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow import estimator as tf_estimator
from typing import Any, Dict, Text
from official.recommendation import constants as rconst
from official.recommendation import movielens
from official.recommendation import ncf_common
from official.recommendation import stat_utils
def sparse_to_dense_grads(grads_and_vars):
"""Convert sparse gradients to dense gradients.
All sparse gradients, which are represented as instances of tf.IndexedSlices,
are converted to dense Tensors. Dense gradients, which are represents as
Tensors, are unchanged.
The purpose of this conversion is that for small embeddings, which are used by
this model, applying dense gradients with the AdamOptimizer is faster than
applying sparse gradients.
Args
grads_and_vars: A list of (gradient, variable) tuples. Each gradient can
be a Tensor or an IndexedSlices. Tensors are unchanged, and IndexedSlices
are converted to dense Tensors.
Returns:
The same list of (gradient, variable) as `grads_and_vars`, except each
IndexedSlices gradient is converted to a Tensor.
"""
# Calling convert_to_tensor changes IndexedSlices into Tensors, and leaves
# Tensors unchanged.
return [(tf.convert_to_tensor(g), v) for g, v in grads_and_vars]
def neumf_model_fn(features, labels, mode, params):
"""Model Function for NeuMF estimator."""
if params.get("use_seed"):
tf.set_random_seed(stat_utils.random_int32())
users = features[movielens.USER_COLUMN]
items = features[movielens.ITEM_COLUMN]
user_input = tf.keras.layers.Input(tensor=users)
item_input = tf.keras.layers.Input(tensor=items)
logits = construct_model(user_input, item_input, params).output
# Softmax with the first column of zeros is equivalent to sigmoid.
softmax_logits = ncf_common.convert_to_softmax_logits(logits)
if mode == tf_estimator.ModeKeys.EVAL:
duplicate_mask = tf.cast(features[rconst.DUPLICATE_MASK], tf.float32)
return _get_estimator_spec_with_metrics(
logits,
softmax_logits,
duplicate_mask,
params["num_neg"],
params["match_mlperf"],
use_tpu_spec=params["use_tpu"])
elif mode == tf_estimator.ModeKeys.TRAIN:
labels = tf.cast(labels, tf.int32)
valid_pt_mask = features[rconst.VALID_POINT_MASK]
optimizer = tf.compat.v1.train.AdamOptimizer(
learning_rate=params["learning_rate"],
beta1=params["beta1"],
beta2=params["beta2"],
epsilon=params["epsilon"])
if params["use_tpu"]:
optimizer = tf.compat.v1.tpu.CrossShardOptimizer(optimizer)
loss = tf.compat.v1.losses.sparse_softmax_cross_entropy(
labels=labels,
logits=softmax_logits,
weights=tf.cast(valid_pt_mask, tf.float32))
tf.identity(loss, name="cross_entropy")
global_step = tf.compat.v1.train.get_global_step()
tvars = tf.compat.v1.trainable_variables()
gradients = optimizer.compute_gradients(
loss, tvars, colocate_gradients_with_ops=True)
gradients = sparse_to_dense_grads(gradients)
minimize_op = optimizer.apply_gradients(
gradients, global_step=global_step, name="train")
update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
train_op = tf.group(minimize_op, update_ops)
return tf_estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
else:
raise NotImplementedError
def _strip_first_and_last_dimension(x, batch_size):
return tf.reshape(x[0, :], (batch_size,))
def construct_model(user_input: tf.Tensor, item_input: tf.Tensor,
params: Dict[Text, Any]) -> tf.keras.Model:
"""Initialize NeuMF model.
Args:
user_input: keras input layer for users
item_input: keras input layer for items
params: Dict of hyperparameters.
Raises:
ValueError: if the first model layer is not even.
Returns:
model: a keras Model for computing the logits
"""
num_users = params["num_users"]
num_items = params["num_items"]
model_layers = params["model_layers"]
mf_regularization = params["mf_regularization"]
mlp_reg_layers = params["mlp_reg_layers"]
mf_dim = params["mf_dim"]
if model_layers[0] % 2 != 0:
raise ValueError("The first layer size should be multiple of 2!")
# Initializer for embedding layers
embedding_initializer = "glorot_uniform"
def mf_slice_fn(x):
x = tf.squeeze(x, [1])
return x[:, :mf_dim]
def mlp_slice_fn(x):
x = tf.squeeze(x, [1])
return x[:, mf_dim:]
# It turns out to be significantly more effecient to store the MF and MLP
# embedding portions in the same table, and then slice as needed.
embedding_user = tf.keras.layers.Embedding(
num_users,
mf_dim + model_layers[0] // 2,
embeddings_initializer=embedding_initializer,
embeddings_regularizer=tf.keras.regularizers.l2(mf_regularization),
input_length=1,
name="embedding_user")(
user_input)
embedding_item = tf.keras.layers.Embedding(
num_items,
mf_dim + model_layers[0] // 2,
embeddings_initializer=embedding_initializer,
embeddings_regularizer=tf.keras.regularizers.l2(mf_regularization),
input_length=1,
name="embedding_item")(
item_input)
# GMF part
mf_user_latent = tf.keras.layers.Lambda(
mf_slice_fn, name="embedding_user_mf")(
embedding_user)
mf_item_latent = tf.keras.layers.Lambda(
mf_slice_fn, name="embedding_item_mf")(
embedding_item)
# MLP part
mlp_user_latent = tf.keras.layers.Lambda(
mlp_slice_fn, name="embedding_user_mlp")(
embedding_user)
mlp_item_latent = tf.keras.layers.Lambda(
mlp_slice_fn, name="embedding_item_mlp")(
embedding_item)
# Element-wise multiply
mf_vector = tf.keras.layers.multiply([mf_user_latent, mf_item_latent])
# Concatenation of two latent features
mlp_vector = tf.keras.layers.concatenate([mlp_user_latent, mlp_item_latent])
num_layer = len(model_layers) # Number of layers in the MLP
for layer in xrange(1, num_layer):
model_layer = tf.keras.layers.Dense(
model_layers[layer],
kernel_regularizer=tf.keras.regularizers.l2(mlp_reg_layers[layer]),
activation="relu")
mlp_vector = model_layer(mlp_vector)
# Concatenate GMF and MLP parts
predict_vector = tf.keras.layers.concatenate([mf_vector, mlp_vector])
# Final prediction layer
logits = tf.keras.layers.Dense(
1,
activation=None,
kernel_initializer="lecun_uniform",
name=movielens.RATING_COLUMN)(
predict_vector)
# Print model topology.
model = tf.keras.models.Model([user_input, item_input], logits)
model.summary()
sys.stdout.flush()
return model
def _get_estimator_spec_with_metrics(logits: tf.Tensor,
softmax_logits: tf.Tensor,
duplicate_mask: tf.Tensor,
num_training_neg: int,
match_mlperf: bool = False,
use_tpu_spec: bool = False):
"""Returns a EstimatorSpec that includes the metrics."""
cross_entropy, \
metric_fn, \
in_top_k, \
ndcg, \
metric_weights = compute_eval_loss_and_metrics_helper(
logits,
softmax_logits,
duplicate_mask,
num_training_neg,
match_mlperf)
if use_tpu_spec:
return tf_estimator.tpu.TPUEstimatorSpec(
mode=tf_estimator.ModeKeys.EVAL,
loss=cross_entropy,
eval_metrics=(metric_fn, [in_top_k, ndcg, metric_weights]))
return tf_estimator.EstimatorSpec(
mode=tf_estimator.ModeKeys.EVAL,
loss=cross_entropy,
eval_metric_ops=metric_fn(in_top_k, ndcg, metric_weights))
def compute_eval_loss_and_metrics_helper(logits: tf.Tensor,
softmax_logits: tf.Tensor,
duplicate_mask: tf.Tensor,
num_training_neg: int,
match_mlperf: bool = False):
"""Model evaluation with HR and NDCG metrics.
The evaluation protocol is to rank the test interacted item (truth items)
among the randomly chosen 999 items that are not interacted by the user.
The performance of the ranked list is judged by Hit Ratio (HR) and Normalized
Discounted Cumulative Gain (NDCG).
For evaluation, the ranked list is truncated at 10 for both metrics. As such,
the HR intuitively measures whether the test item is present on the top-10
list, and the NDCG accounts for the position of the hit by assigning higher
scores to hits at top ranks. Both metrics are calculated for each test user,
and the average scores are reported.
If `match_mlperf` is True, then the HR and NDCG computations are done in a
slightly unusual way to match the MLPerf reference implementation.
Specifically, if the evaluation negatives contain duplicate items, it will be
treated as if the item only appeared once. Effectively, for duplicate items in
a row, the predicted score for all but one of the items will be set to
-infinity
For example, suppose we have that following inputs:
logits_by_user: [[ 2, 3, 3],
[ 5, 4, 4]]
items_by_user: [[10, 20, 20],
[30, 40, 40]]
# Note: items_by_user is not explicitly present. Instead the relevant \
information is contained within `duplicate_mask`
top_k: 2
Then with match_mlperf=True, the HR would be 2/2 = 1.0. With
match_mlperf=False, the HR would be 1/2 = 0.5. This is because each user has
predicted scores for only 2 unique items: 10 and 20 for the first user, and 30
and 40 for the second. Therefore, with match_mlperf=True, it's guaranteed the
first item's score is in the top 2. With match_mlperf=False, this function
would compute the first user's first item is not in the top 2, because item 20
has a higher score, and item 20 occurs twice.
Args:
logits: A tensor containing the predicted logits for each user. The shape of
logits is (num_users_per_batch * (1 + NUM_EVAL_NEGATIVES),) Logits for a
user are grouped, and the last element of the group is the true element.
softmax_logits: The same tensor, but with zeros left-appended.
duplicate_mask: A vector with the same shape as logits, with a value of 1 if
the item corresponding to the logit at that position has already appeared
for that user.
num_training_neg: The number of negatives per positive during training.
match_mlperf: Use the MLPerf reference convention for computing rank.
Returns:
cross_entropy: the loss
metric_fn: the metrics function
in_top_k: hit rate metric
ndcg: ndcg metric
metric_weights: metric weights
"""
in_top_k, ndcg, metric_weights, logits_by_user = compute_top_k_and_ndcg(
logits, duplicate_mask, match_mlperf)
# Examples are provided by the eval Dataset in a structured format, so eval
# labels can be reconstructed on the fly.
eval_labels = tf.reshape(
shape=(-1,),
tensor=tf.one_hot(
tf.zeros(shape=(logits_by_user.shape[0],), dtype=tf.int32) +
rconst.NUM_EVAL_NEGATIVES,
logits_by_user.shape[1],
dtype=tf.int32))
eval_labels_float = tf.cast(eval_labels, tf.float32)
# During evaluation, the ratio of negatives to positives is much higher
# than during training. (Typically 999 to 1 vs. 4 to 1) By adjusting the
# weights for the negative examples we compute a loss which is consistent with
# the training data. (And provides apples-to-apples comparison)
negative_scale_factor = num_training_neg / rconst.NUM_EVAL_NEGATIVES
example_weights = ((eval_labels_float +
(1 - eval_labels_float) * negative_scale_factor) *
(1 + rconst.NUM_EVAL_NEGATIVES) / (1 + num_training_neg))
# Tile metric weights back to logit dimensions
expanded_metric_weights = tf.reshape(
tf.tile(metric_weights[:, tf.newaxis],
(1, rconst.NUM_EVAL_NEGATIVES + 1)), (-1,))
# ignore padded examples
example_weights *= tf.cast(expanded_metric_weights, tf.float32)
cross_entropy = tf.compat.v1.losses.sparse_softmax_cross_entropy(
logits=softmax_logits, labels=eval_labels, weights=example_weights)
def metric_fn(top_k_tensor, ndcg_tensor, weight_tensor):
return {
rconst.HR_KEY:
tf.compat.v1.metrics.mean(
top_k_tensor, weights=weight_tensor,
name=rconst.HR_METRIC_NAME),
rconst.NDCG_KEY:
tf.compat.v1.metrics.mean(
ndcg_tensor,
weights=weight_tensor,
name=rconst.NDCG_METRIC_NAME)
}
return cross_entropy, metric_fn, in_top_k, ndcg, metric_weights
def compute_top_k_and_ndcg(logits: tf.Tensor,
duplicate_mask: tf.Tensor,
match_mlperf: bool = False):
"""Compute inputs of metric calculation.
Args:
logits: A tensor containing the predicted logits for each user. The shape of
logits is (num_users_per_batch * (1 + NUM_EVAL_NEGATIVES),) Logits for a
user are grouped, and the first element of the group is the true element.
duplicate_mask: A vector with the same shape as logits, with a value of 1 if
the item corresponding to the logit at that position has already appeared
for that user.
match_mlperf: Use the MLPerf reference convention for computing rank.
Returns:
is_top_k, ndcg and weights, all of which has size (num_users_in_batch,), and
logits_by_user which has size
(num_users_in_batch, (rconst.NUM_EVAL_NEGATIVES + 1)).
"""
logits_by_user = tf.reshape(logits, (-1, rconst.NUM_EVAL_NEGATIVES + 1))
duplicate_mask_by_user = tf.cast(
tf.reshape(duplicate_mask, (-1, rconst.NUM_EVAL_NEGATIVES + 1)),
logits_by_user.dtype)
if match_mlperf:
# Set duplicate logits to the min value for that dtype. The MLPerf
# reference dedupes during evaluation.
logits_by_user *= (1 - duplicate_mask_by_user)
logits_by_user += duplicate_mask_by_user * logits_by_user.dtype.min
# Determine the location of the first element in each row after the elements
# are sorted.
sort_indices = tf.argsort(logits_by_user, axis=1, direction="DESCENDING")
# Use matrix multiplication to extract the position of the true item from the
# tensor of sorted indices. This approach is chosen because both GPUs and TPUs
# perform matrix multiplications very quickly. This is similar to np.argwhere.
# However this is a special case because the target will only appear in
# sort_indices once.
one_hot_position = tf.cast(
tf.equal(sort_indices, rconst.NUM_EVAL_NEGATIVES), tf.int32)
sparse_positions = tf.multiply(
one_hot_position,
tf.range(logits_by_user.shape[1])[tf.newaxis, :])
position_vector = tf.reduce_sum(sparse_positions, axis=1)
in_top_k = tf.cast(tf.less(position_vector, rconst.TOP_K), tf.float32)
ndcg = tf.math.log(2.) / tf.math.log(tf.cast(position_vector, tf.float32) + 2)
ndcg *= in_top_k
# If a row is a padded row, all but the first element will be a duplicate.
metric_weights = tf.not_equal(
tf.reduce_sum(duplicate_mask_by_user, axis=1), rconst.NUM_EVAL_NEGATIVES)
return in_top_k, ndcg, metric_weights, logits_by_user
| 16,940 | 37.155405 | 80 | py |
models | models-master/official/recommendation/data_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test NCF data pipeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import hashlib
import os
import mock
import numpy as np
import scipy.stats
import tensorflow as tf
from official.recommendation import constants as rconst
from official.recommendation import data_preprocessing
from official.recommendation import movielens
from official.recommendation import popen_helper
DATASET = "ml-test"
NUM_USERS = 1000
NUM_ITEMS = 2000
NUM_PTS = 50000
BATCH_SIZE = 2048
EVAL_BATCH_SIZE = 4000
NUM_NEG = 4
END_TO_END_TRAIN_MD5 = "b218738e915e825d03939c5e305a2698"
END_TO_END_EVAL_MD5 = "d753d0f3186831466d6e218163a9501e"
FRESH_RANDOMNESS_MD5 = "63d0dff73c0e5f1048fbdc8c65021e22"
def mock_download(*args, **kwargs):
return
# The forkpool used by data producers interacts badly with the threading
# used by TestCase. Without this patch tests will hang, and no amount
# of diligent closing and joining within the producer will prevent it.
@mock.patch.object(popen_helper, "get_forkpool", popen_helper.get_fauxpool)
class BaseTest(tf.test.TestCase):
def setUp(self):
tf.compat.v1.disable_eager_execution()
self.temp_data_dir = self.get_temp_dir()
ratings_folder = os.path.join(self.temp_data_dir, DATASET)
tf.io.gfile.makedirs(ratings_folder)
np.random.seed(0)
raw_user_ids = np.arange(NUM_USERS * 3)
np.random.shuffle(raw_user_ids)
raw_user_ids = raw_user_ids[:NUM_USERS]
raw_item_ids = np.arange(NUM_ITEMS * 3)
np.random.shuffle(raw_item_ids)
raw_item_ids = raw_item_ids[:NUM_ITEMS]
users = np.random.choice(raw_user_ids, NUM_PTS)
items = np.random.choice(raw_item_ids, NUM_PTS)
scores = np.random.randint(low=0, high=5, size=NUM_PTS)
times = np.random.randint(low=1000000000, high=1200000000, size=NUM_PTS)
self.rating_file = os.path.join(ratings_folder, movielens.RATINGS_FILE)
self.seen_pairs = set()
self.holdout = {}
with tf.io.gfile.GFile(self.rating_file, "w") as f:
f.write("user_id,item_id,rating,timestamp\n")
for usr, itm, scr, ts in zip(users, items, scores, times):
pair = (usr, itm)
if pair in self.seen_pairs:
continue
self.seen_pairs.add(pair)
if usr not in self.holdout or (ts, itm) > self.holdout[usr]:
self.holdout[usr] = (ts, itm)
f.write("{},{},{},{}\n".format(usr, itm, scr, ts))
movielens.download = mock_download
movielens.NUM_RATINGS[DATASET] = NUM_PTS
movielens.DATASET_TO_NUM_USERS_AND_ITEMS[DATASET] = (NUM_USERS, NUM_ITEMS)
def make_params(self, train_epochs=1):
return {
"train_epochs": train_epochs,
"batches_per_step": 1,
"use_seed": False,
"batch_size": BATCH_SIZE,
"eval_batch_size": EVAL_BATCH_SIZE,
"num_neg": NUM_NEG,
"match_mlperf": True,
"use_tpu": False,
"use_xla_for_gpu": False,
"stream_files": False,
}
def test_preprocessing(self):
# For the most part the necessary checks are performed within
# _filter_index_sort()
cache_path = os.path.join(self.temp_data_dir, "test_cache.pickle")
data, valid_cache = data_preprocessing._filter_index_sort(
self.rating_file, cache_path=cache_path)
assert len(data[rconst.USER_MAP]) == NUM_USERS
assert len(data[rconst.ITEM_MAP]) == NUM_ITEMS
def drain_dataset(self, dataset, g):
# type: (tf.data.Dataset, tf.Graph) -> list
with self.session(graph=g) as sess:
with g.as_default():
batch = tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
output = []
while True:
try:
output.append(sess.run(batch))
except tf.errors.OutOfRangeError:
break
return output
def _test_end_to_end(self, constructor_type):
params = self.make_params(train_epochs=1)
_, _, producer = data_preprocessing.instantiate_pipeline(
dataset=DATASET,
data_dir=self.temp_data_dir,
params=params,
constructor_type=constructor_type,
deterministic=True)
producer.start()
producer.join()
assert producer._fatal_exception is None
user_inv_map = {v: k for k, v in producer.user_map.items()}
item_inv_map = {v: k for k, v in producer.item_map.items()}
# ==========================================================================
# == Training Data =========================================================
# ==========================================================================
g = tf.Graph()
with g.as_default():
input_fn = producer.make_input_fn(is_training=True)
dataset = input_fn(params)
first_epoch = self.drain_dataset(dataset=dataset, g=g)
counts = defaultdict(int)
train_examples = {
True: set(),
False: set(),
}
md5 = hashlib.md5()
for features, labels in first_epoch:
data_list = [
features[movielens.USER_COLUMN].flatten(),
features[movielens.ITEM_COLUMN].flatten(),
features[rconst.VALID_POINT_MASK].flatten(),
labels.flatten()
]
for i in data_list:
md5.update(i.tobytes())
for u, i, v, l in zip(*data_list):
if not v:
continue # ignore padding
u_raw = user_inv_map[u]
i_raw = item_inv_map[i]
if ((u_raw, i_raw) in self.seen_pairs) != l:
# The evaluation item is not considered during false negative
# generation, so it will occasionally appear as a negative example
# during training.
assert not l
self.assertEqual(i_raw, self.holdout[u_raw][1])
train_examples[l].add((u_raw, i_raw))
counts[(u_raw, i_raw)] += 1
self.assertRegexpMatches(md5.hexdigest(), END_TO_END_TRAIN_MD5)
num_positives_seen = len(train_examples[True])
self.assertEqual(producer._train_pos_users.shape[0], num_positives_seen)
# This check is more heuristic because negatives are sampled with
# replacement. It only checks that negative generation is reasonably random.
self.assertGreater(
len(train_examples[False]) / NUM_NEG / num_positives_seen, 0.9)
# This checks that the samples produced are independent by checking the
# number of duplicate entries. If workers are not properly independent there
# will be lots of repeated pairs.
self.assertLess(np.mean(list(counts.values())), 1.1)
# ==========================================================================
# == Eval Data =============================================================
# ==========================================================================
with g.as_default():
input_fn = producer.make_input_fn(is_training=False)
dataset = input_fn(params)
eval_data = self.drain_dataset(dataset=dataset, g=g)
current_user = None
md5 = hashlib.md5()
for features in eval_data:
data_list = [
features[movielens.USER_COLUMN].flatten(),
features[movielens.ITEM_COLUMN].flatten(),
features[rconst.DUPLICATE_MASK].flatten()
]
for i in data_list:
md5.update(i.tobytes())
for idx, (u, i, d) in enumerate(zip(*data_list)):
u_raw = user_inv_map[u]
i_raw = item_inv_map[i]
if current_user is None:
current_user = u
# Ensure that users appear in blocks, as the evaluation logic expects
# this structure.
self.assertEqual(u, current_user)
# The structure of evaluation data is 999 negative examples followed
# by the holdout positive.
if not (idx + 1) % (rconst.NUM_EVAL_NEGATIVES + 1):
# Check that the last element in each chunk is the holdout item.
self.assertEqual(i_raw, self.holdout[u_raw][1])
current_user = None
elif i_raw == self.holdout[u_raw][1]:
# Because the holdout item is not given to the negative generation
# process, it can appear as a negative. In that case, it should be
# masked out as a duplicate. (Since the true positive is placed at
# the end and would therefore lose the tie.)
assert d
else:
# Otherwise check that the other 999 points for a user are selected
# from the negatives.
assert (u_raw, i_raw) not in self.seen_pairs
self.assertRegexpMatches(md5.hexdigest(), END_TO_END_EVAL_MD5)
def _test_fresh_randomness(self, constructor_type):
train_epochs = 5
params = self.make_params(train_epochs=train_epochs)
_, _, producer = data_preprocessing.instantiate_pipeline(
dataset=DATASET,
data_dir=self.temp_data_dir,
params=params,
constructor_type=constructor_type,
deterministic=True)
producer.start()
results = []
g = tf.Graph()
with g.as_default():
for _ in range(train_epochs):
input_fn = producer.make_input_fn(is_training=True)
dataset = input_fn(params)
results.extend(self.drain_dataset(dataset=dataset, g=g))
producer.join()
assert producer._fatal_exception is None
positive_counts, negative_counts = defaultdict(int), defaultdict(int)
md5 = hashlib.md5()
for features, labels in results:
data_list = [
features[movielens.USER_COLUMN].flatten(),
features[movielens.ITEM_COLUMN].flatten(),
features[rconst.VALID_POINT_MASK].flatten(),
labels.flatten()
]
for i in data_list:
md5.update(i.tobytes())
for u, i, v, l in zip(*data_list):
if not v:
continue # ignore padding
if l:
positive_counts[(u, i)] += 1
else:
negative_counts[(u, i)] += 1
self.assertRegexpMatches(md5.hexdigest(), FRESH_RANDOMNESS_MD5)
# The positive examples should appear exactly once each epoch
self.assertAllEqual(
list(positive_counts.values()), [train_epochs for _ in positive_counts])
# The threshold for the negatives is heuristic, but in general repeats are
# expected, but should not appear too frequently.
pair_cardinality = NUM_USERS * NUM_ITEMS
neg_pair_cardinality = pair_cardinality - len(self.seen_pairs)
# Approximation for the expectation number of times that a particular
# negative will appear in a given epoch. Implicit in this calculation is the
# treatment of all negative pairs as equally likely. Normally is not
# necessarily reasonable; however the generation in self.setUp() will
# approximate this behavior sufficiently for heuristic testing.
e_sample = len(self.seen_pairs) * NUM_NEG / neg_pair_cardinality
# The frequency of occurance of a given negative pair should follow an
# approximately binomial distribution in the limit that the cardinality of
# the negative pair set >> number of samples per epoch.
approx_pdf = scipy.stats.binom.pmf(
k=np.arange(train_epochs + 1), n=train_epochs, p=e_sample)
# Tally the actual observed counts.
count_distribution = [0 for _ in range(train_epochs + 1)]
for i in negative_counts.values():
i = min([i, train_epochs]) # round down tail for simplicity.
count_distribution[i] += 1
count_distribution[0] = neg_pair_cardinality - sum(count_distribution[1:])
# Check that the frequency of negative pairs is approximately binomial.
for i in range(train_epochs + 1):
if approx_pdf[i] < 0.05:
continue # Variance will be high at the tails.
observed_fraction = count_distribution[i] / neg_pair_cardinality
deviation = (2 * abs(observed_fraction - approx_pdf[i]) /
(observed_fraction + approx_pdf[i]))
self.assertLess(deviation, 0.2)
def test_end_to_end_materialized(self):
self._test_end_to_end("materialized")
def test_end_to_end_bisection(self):
self._test_end_to_end("bisection")
def test_fresh_randomness_materialized(self):
self._test_fresh_randomness("materialized")
def test_fresh_randomness_bisection(self):
self._test_fresh_randomness("bisection")
if __name__ == "__main__":
tf.test.main()
| 12,821 | 34.518006 | 80 | py |
models | models-master/official/recommendation/ncf_keras_main.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NCF framework to train and evaluate the NeuMF model.
The NeuMF model assembles both MF and MLP models under the NCF framework. Check
`neumf_model.py` for more details about the models.
"""
import json
import os
# pylint: disable=g-bad-import-order
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
# pylint: enable=g-bad-import-order
from official.common import distribute_utils
from official.recommendation import constants as rconst
from official.recommendation import movielens
from official.recommendation import ncf_common
from official.recommendation import ncf_input_pipeline
from official.recommendation import neumf_model
from official.utils.flags import core as flags_core
from official.utils.misc import keras_utils
from official.utils.misc import model_helpers
FLAGS = flags.FLAGS
def metric_fn(logits, dup_mask, match_mlperf):
dup_mask = tf.cast(dup_mask, tf.float32)
logits = tf.slice(logits, [0, 1], [-1, -1])
in_top_k, _, metric_weights, _ = neumf_model.compute_top_k_and_ndcg(
logits, dup_mask, match_mlperf)
metric_weights = tf.cast(metric_weights, tf.float32)
return in_top_k, metric_weights
class MetricLayer(tf.keras.layers.Layer):
"""Custom layer of metrics for NCF model."""
def __init__(self, match_mlperf):
super(MetricLayer, self).__init__()
self.match_mlperf = match_mlperf
def get_config(self):
return {"match_mlperf": self.match_mlperf}
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
def call(self, inputs, training=False):
logits, dup_mask = inputs
if training:
hr_sum = 0.0
hr_count = 0.0
else:
metric, metric_weights = metric_fn(logits, dup_mask, self.match_mlperf)
hr_sum = tf.reduce_sum(metric * metric_weights)
hr_count = tf.reduce_sum(metric_weights)
self.add_metric(hr_sum, name="hr_sum", aggregation="mean")
self.add_metric(hr_count, name="hr_count", aggregation="mean")
return logits
class LossLayer(tf.keras.layers.Layer):
"""Pass-through loss layer for NCF model."""
def __init__(self, loss_normalization_factor):
# The loss may overflow in float16, so we use float32 instead.
super(LossLayer, self).__init__(dtype="float32")
self.loss_normalization_factor = loss_normalization_factor
self.loss = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction="sum")
def get_config(self):
return {"loss_normalization_factor": self.loss_normalization_factor}
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
def call(self, inputs):
logits, labels, valid_pt_mask_input = inputs
loss = self.loss(
y_true=labels, y_pred=logits, sample_weight=valid_pt_mask_input)
loss = loss * (1.0 / self.loss_normalization_factor)
self.add_loss(loss)
return logits
class IncrementEpochCallback(tf.keras.callbacks.Callback):
"""A callback to increase the requested epoch for the data producer.
The reason why we need this is because we can only buffer a limited amount of
data. So we keep a moving window to represent the buffer. This is to move the
one of the window's boundaries for each epoch.
"""
def __init__(self, producer):
self._producer = producer
def on_epoch_begin(self, epoch, logs=None):
self._producer.increment_request_epoch()
class CustomEarlyStopping(tf.keras.callbacks.Callback):
"""Stop training has reached a desired hit rate."""
def __init__(self, monitor, desired_value):
super(CustomEarlyStopping, self).__init__()
self.monitor = monitor
self.desired = desired_value
self.stopped_epoch = 0
def on_epoch_end(self, epoch, logs=None):
current = self.get_monitor_value(logs)
if current and current >= self.desired:
self.stopped_epoch = epoch
self.model.stop_training = True
def on_train_end(self, logs=None):
if self.stopped_epoch > 0:
print("Epoch %05d: early stopping" % (self.stopped_epoch + 1))
def get_monitor_value(self, logs):
logs = logs or {}
monitor_value = logs.get(self.monitor)
if monitor_value is None:
logging.warning(
"Early stopping conditioned on metric `%s` "
"which is not available. Available metrics are: %s", self.monitor,
",".join(list(logs.keys())))
return monitor_value
def _get_keras_model(params):
"""Constructs and returns the model."""
batch_size = params["batch_size"]
user_input = tf.keras.layers.Input(
shape=(1,), name=movielens.USER_COLUMN, dtype=tf.int32)
item_input = tf.keras.layers.Input(
shape=(1,), name=movielens.ITEM_COLUMN, dtype=tf.int32)
valid_pt_mask_input = tf.keras.layers.Input(
shape=(1,), name=rconst.VALID_POINT_MASK, dtype=tf.bool)
dup_mask_input = tf.keras.layers.Input(
shape=(1,), name=rconst.DUPLICATE_MASK, dtype=tf.int32)
label_input = tf.keras.layers.Input(
shape=(1,), name=rconst.TRAIN_LABEL_KEY, dtype=tf.bool)
base_model = neumf_model.construct_model(user_input, item_input, params)
logits = base_model.output
zeros = tf.keras.layers.Lambda(lambda x: x * 0)(logits)
softmax_logits = tf.keras.layers.concatenate([zeros, logits], axis=-1)
# Custom training loop calculates loss and metric as a part of
# training/evaluation step function.
if not params["keras_use_ctl"]:
softmax_logits = MetricLayer(
params["match_mlperf"])([softmax_logits, dup_mask_input])
# TODO(b/134744680): Use model.add_loss() instead once the API is well
# supported.
softmax_logits = LossLayer(batch_size)(
[softmax_logits, label_input, valid_pt_mask_input])
keras_model = tf.keras.Model(
inputs={
movielens.USER_COLUMN: user_input,
movielens.ITEM_COLUMN: item_input,
rconst.VALID_POINT_MASK: valid_pt_mask_input,
rconst.DUPLICATE_MASK: dup_mask_input,
rconst.TRAIN_LABEL_KEY: label_input
},
outputs=softmax_logits)
keras_model.summary()
return keras_model
def run_ncf(_):
"""Run NCF training and eval with Keras."""
keras_utils.set_session_config(enable_xla=FLAGS.enable_xla)
if FLAGS.seed is not None:
print("Setting tf seed")
tf.random.set_seed(FLAGS.seed)
model_helpers.apply_clean(FLAGS)
if FLAGS.dtype == "fp16" and FLAGS.fp16_implementation == "keras":
tf.keras.mixed_precision.set_global_policy("mixed_float16")
strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=FLAGS.distribution_strategy,
num_gpus=FLAGS.num_gpus,
tpu_address=FLAGS.tpu)
params = ncf_common.parse_flags(FLAGS)
params["distribute_strategy"] = strategy
params["use_tpu"] = (FLAGS.distribution_strategy == "tpu")
if params["use_tpu"] and not params["keras_use_ctl"]:
logging.error("Custom training loop must be used when using TPUStrategy.")
return
batch_size = params["batch_size"]
time_callback = keras_utils.TimeHistory(batch_size, FLAGS.log_steps)
callbacks = [time_callback]
producer, input_meta_data = None, None
generate_input_online = params["train_dataset_path"] is None
if generate_input_online:
# Start data producing thread.
num_users, num_items, _, _, producer = ncf_common.get_inputs(params)
producer.start()
per_epoch_callback = IncrementEpochCallback(producer)
callbacks.append(per_epoch_callback)
else:
assert params["eval_dataset_path"] and params["input_meta_data_path"]
with tf.io.gfile.GFile(params["input_meta_data_path"], "rb") as reader:
input_meta_data = json.loads(reader.read().decode("utf-8"))
num_users = input_meta_data["num_users"]
num_items = input_meta_data["num_items"]
params["num_users"], params["num_items"] = num_users, num_items
if FLAGS.early_stopping:
early_stopping_callback = CustomEarlyStopping(
"val_HR_METRIC", desired_value=FLAGS.hr_threshold)
callbacks.append(early_stopping_callback)
(train_input_dataset, eval_input_dataset, num_train_steps,
num_eval_steps) = ncf_input_pipeline.create_ncf_input_data(
params, producer, input_meta_data, strategy)
steps_per_epoch = None if generate_input_online else num_train_steps
with distribute_utils.get_strategy_scope(strategy):
keras_model = _get_keras_model(params)
optimizer = tf.keras.optimizers.Adam(
learning_rate=params["learning_rate"],
beta_1=params["beta1"],
beta_2=params["beta2"],
epsilon=params["epsilon"])
if FLAGS.fp16_implementation == "graph_rewrite":
optimizer = \
tf.compat.v1.train.experimental.enable_mixed_precision_graph_rewrite(
optimizer,
loss_scale=flags_core.get_loss_scale(FLAGS,
default_for_fp16="dynamic"))
elif FLAGS.dtype == "fp16":
loss_scale = flags_core.get_loss_scale(FLAGS, default_for_fp16="dynamic")
# Note Model.compile automatically wraps the optimizer with a
# LossScaleOptimizer using dynamic loss scaling. We explicitly wrap it
# here for the case where a custom training loop or fixed loss scale is
# used.
if loss_scale == "dynamic":
optimizer = tf.keras.mixed_precision.LossScaleOptimizer(optimizer)
else:
optimizer = tf.keras.mixed_precision.LossScaleOptimizer(
optimizer, dynamic=False, initial_scale=loss_scale)
if params["keras_use_ctl"]:
train_loss, eval_results = run_ncf_custom_training(
params,
strategy,
keras_model,
optimizer,
callbacks,
train_input_dataset,
eval_input_dataset,
num_train_steps,
num_eval_steps,
generate_input_online=generate_input_online)
else:
keras_model.compile(optimizer=optimizer, run_eagerly=FLAGS.run_eagerly)
if not FLAGS.ml_perf:
# Create Tensorboard summary and checkpoint callbacks.
summary_dir = os.path.join(FLAGS.model_dir, "summaries")
summary_callback = tf.keras.callbacks.TensorBoard(
summary_dir, profile_batch=0)
checkpoint_path = os.path.join(FLAGS.model_dir, "checkpoint")
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
checkpoint_path, save_weights_only=True)
callbacks += [summary_callback, checkpoint_callback]
history = keras_model.fit(
train_input_dataset,
epochs=FLAGS.train_epochs,
steps_per_epoch=steps_per_epoch,
callbacks=callbacks,
validation_data=eval_input_dataset,
validation_steps=num_eval_steps,
verbose=2)
logging.info("Training done. Start evaluating")
eval_loss_and_metrics = keras_model.evaluate(
eval_input_dataset, steps=num_eval_steps, verbose=2)
logging.info("Keras evaluation is done.")
# Keras evaluate() API returns scalar loss and metric values from
# evaluation as a list. Here, the returned list would contain
# [evaluation loss, hr sum, hr count].
eval_hit_rate = eval_loss_and_metrics[1] / eval_loss_and_metrics[2]
# Format evaluation result into [eval loss, eval hit accuracy].
eval_results = [eval_loss_and_metrics[0], eval_hit_rate]
if history and history.history:
train_history = history.history
train_loss = train_history["loss"][-1]
stats = build_stats(train_loss, eval_results, time_callback)
return stats
def run_ncf_custom_training(params,
strategy,
keras_model,
optimizer,
callbacks,
train_input_dataset,
eval_input_dataset,
num_train_steps,
num_eval_steps,
generate_input_online=True):
"""Runs custom training loop.
Args:
params: Dictionary containing training parameters.
strategy: Distribution strategy to be used for distributed training.
keras_model: Model used for training.
optimizer: Optimizer used for training.
callbacks: Callbacks to be invoked between batches/epochs.
train_input_dataset: tf.data.Dataset used for training.
eval_input_dataset: tf.data.Dataset used for evaluation.
num_train_steps: Total number of steps to run for training.
num_eval_steps: Total number of steps to run for evaluation.
generate_input_online: Whether input data was generated by data producer.
When data is generated by data producer, then train dataset must be
re-initialized after every epoch.
Returns:
A tuple of train loss and a list of training and evaluation results.
"""
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
reduction="sum", from_logits=True)
train_input_iterator = iter(
strategy.experimental_distribute_dataset(train_input_dataset))
def train_step(train_iterator):
"""Called once per step to train the model."""
def step_fn(features):
"""Computes loss and applied gradient per replica."""
with tf.GradientTape() as tape:
softmax_logits = keras_model(features)
# The loss can overflow in float16, so we cast to float32.
softmax_logits = tf.cast(softmax_logits, "float32")
labels = features[rconst.TRAIN_LABEL_KEY]
loss = loss_object(
labels,
softmax_logits,
sample_weight=features[rconst.VALID_POINT_MASK])
loss *= (1.0 / params["batch_size"])
if FLAGS.dtype == "fp16":
loss = optimizer.get_scaled_loss(loss)
grads = tape.gradient(loss, keras_model.trainable_variables)
if FLAGS.dtype == "fp16":
grads = optimizer.get_unscaled_gradients(grads)
# Converting gradients to dense form helps in perf on GPU for NCF
grads = neumf_model.sparse_to_dense_grads(
list(zip(grads, keras_model.trainable_variables)))
optimizer.apply_gradients(grads)
return loss
per_replica_losses = strategy.run(step_fn, args=(next(train_iterator),))
mean_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
return mean_loss
def eval_step(eval_iterator):
"""Called once per eval step to compute eval metrics."""
def step_fn(features):
"""Computes eval metrics per replica."""
softmax_logits = keras_model(features)
in_top_k, metric_weights = metric_fn(softmax_logits,
features[rconst.DUPLICATE_MASK],
params["match_mlperf"])
hr_sum = tf.reduce_sum(in_top_k * metric_weights)
hr_count = tf.reduce_sum(metric_weights)
return hr_sum, hr_count
per_replica_hr_sum, per_replica_hr_count = (
strategy.run(step_fn, args=(next(eval_iterator),)))
hr_sum = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_hr_sum, axis=None)
hr_count = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_hr_count, axis=None)
return hr_sum, hr_count
if not FLAGS.run_eagerly:
train_step = tf.function(train_step)
eval_step = tf.function(eval_step)
for callback in callbacks:
callback.on_train_begin()
# Not writing tensorboard summaries if running in MLPerf.
if FLAGS.ml_perf:
eval_summary_writer, train_summary_writer = None, None
else:
summary_dir = os.path.join(FLAGS.model_dir, "summaries")
eval_summary_writer = tf.summary.create_file_writer(
os.path.join(summary_dir, "eval"))
train_summary_writer = tf.summary.create_file_writer(
os.path.join(summary_dir, "train"))
train_loss = 0
for epoch in range(FLAGS.train_epochs):
for cb in callbacks:
cb.on_epoch_begin(epoch)
# As NCF dataset is sampled with randomness, not repeating
# data elements in each epoch has significant impact on
# convergence. As so, offline-generated TF record files
# contains all epoch worth of data. Thus we do not need
# to initialize dataset when reading from tf record files.
if generate_input_online:
train_input_iterator = iter(
strategy.experimental_distribute_dataset(train_input_dataset))
train_loss = 0
for step in range(num_train_steps):
current_step = step + epoch * num_train_steps
for c in callbacks:
c.on_batch_begin(current_step)
train_loss += train_step(train_input_iterator)
# Write train loss once in every 1000 steps.
if train_summary_writer and step % 1000 == 0:
with train_summary_writer.as_default():
tf.summary.scalar(
"training_loss", train_loss / (step + 1), step=current_step)
for c in callbacks:
c.on_batch_end(current_step)
train_loss /= num_train_steps
logging.info("Done training epoch %s, epoch loss=%.3f", epoch + 1,
train_loss)
eval_input_iterator = iter(
strategy.experimental_distribute_dataset(eval_input_dataset))
hr_sum = 0.0
hr_count = 0.0
for _ in range(num_eval_steps):
step_hr_sum, step_hr_count = eval_step(eval_input_iterator)
hr_sum += step_hr_sum
hr_count += step_hr_count
logging.info("Done eval epoch %s, hit_rate=%.3f", epoch + 1,
hr_sum / hr_count)
if eval_summary_writer:
with eval_summary_writer.as_default():
tf.summary.scalar("hit_rate", hr_sum / hr_count, step=current_step)
if (FLAGS.early_stopping and
float(hr_sum / hr_count) > params["hr_threshold"]):
break
for c in callbacks:
c.on_train_end()
# Saving the model at the end of training.
if not FLAGS.ml_perf:
checkpoint = tf.train.Checkpoint(model=keras_model, optimizer=optimizer)
checkpoint_path = os.path.join(FLAGS.model_dir, "ctl_checkpoint")
checkpoint.save(checkpoint_path)
logging.info("Saving model as TF checkpoint: %s", checkpoint_path)
return train_loss, [None, hr_sum / hr_count]
def build_stats(loss, eval_result, time_callback):
"""Normalizes and returns dictionary of stats.
Args:
loss: The final loss at training time.
eval_result: Output of the eval step. Assumes first value is eval_loss and
second value is accuracy_top_1.
time_callback: Time tracking callback likely used during keras.fit.
Returns:
Dictionary of normalized results.
"""
stats = {}
if loss:
stats["loss"] = loss
if eval_result:
stats["eval_loss"] = eval_result[0]
stats["eval_hit_rate"] = eval_result[1]
if time_callback:
timestamp_log = time_callback.timestamp_log
stats["step_timestamp_log"] = timestamp_log
stats["train_finish_time"] = time_callback.train_finish_time
if len(timestamp_log) > 1:
stats["avg_exp_per_second"] = (
time_callback.batch_size * time_callback.log_steps *
(len(time_callback.timestamp_log) - 1) /
(timestamp_log[-1].timestamp - timestamp_log[0].timestamp))
return stats
def main(_):
logging.info("Result is %s", run_ncf(FLAGS))
if __name__ == "__main__":
ncf_common.define_ncf_flags()
app.run(main)
| 19,840 | 34.367201 | 79 | py |
models | models-master/official/recommendation/data_pipeline.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Asynchronous data producer for the NCF pipeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import functools
import os
import sys
import tempfile
import threading
import time
import timeit
import traceback
import typing
from absl import logging
import numpy as np
from six.moves import queue
import tensorflow as tf
from tensorflow.python.tpu.datasets import StreamingFilesDataset
from official.recommendation import constants as rconst
from official.recommendation import movielens
from official.recommendation import popen_helper
from official.recommendation import stat_utils
SUMMARY_TEMPLATE = """General:
{spacer}Num users: {num_users}
{spacer}Num items: {num_items}
Training:
{spacer}Positive count: {train_pos_ct}
{spacer}Batch size: {train_batch_size} {multiplier}
{spacer}Batch count per epoch: {train_batch_ct}
Eval:
{spacer}Positive count: {eval_pos_ct}
{spacer}Batch size: {eval_batch_size} {multiplier}
{spacer}Batch count per epoch: {eval_batch_ct}"""
class DatasetManager(object):
"""Helper class for handling TensorFlow specific data tasks.
This class takes the (relatively) framework agnostic work done by the data
constructor classes and handles the TensorFlow specific portions (TFRecord
management, tf.Dataset creation, etc.).
"""
def __init__(self,
is_training,
stream_files,
batches_per_epoch,
shard_root=None,
deterministic=False,
num_train_epochs=None):
# type: (bool, bool, int, typing.Optional[str], bool, int) -> None
"""Constructs a `DatasetManager` instance.
Args:
is_training: Boolean of whether the data provided is training or
evaluation data. This determines whether to reuse the data (if
is_training=False) and the exact structure to use when storing and
yielding data.
stream_files: Boolean indicating whether data should be serialized and
written to file shards.
batches_per_epoch: The number of batches in a single epoch.
shard_root: The base directory to be used when stream_files=True.
deterministic: Forgo non-deterministic speedups. (i.e. sloppy=True)
num_train_epochs: Number of epochs to generate. If None, then each call to
`get_dataset()` increments the number of epochs requested.
"""
self._is_training = is_training
self._deterministic = deterministic
self._stream_files = stream_files
self._writers = []
self._write_locks = [
threading.RLock() for _ in range(rconst.NUM_FILE_SHARDS)
] if stream_files else []
self._batches_per_epoch = batches_per_epoch
self._epochs_completed = 0
self._epochs_requested = num_train_epochs if num_train_epochs else 0
self._shard_root = shard_root
self._result_queue = queue.Queue()
self._result_reuse = []
@property
def current_data_root(self):
subdir = (
rconst.TRAIN_FOLDER_TEMPLATE.format(self._epochs_completed)
if self._is_training else rconst.EVAL_FOLDER)
return os.path.join(self._shard_root, subdir)
def buffer_reached(self):
# Only applicable for training.
return (self._epochs_completed - self._epochs_requested >=
rconst.CYCLES_TO_BUFFER and self._is_training)
@staticmethod
def serialize(data):
"""Convert NumPy arrays into a TFRecords entry."""
def create_int_feature(values):
values = np.squeeze(values)
return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
feature_dict = {
k: create_int_feature(v.astype(np.int64)) for k, v in data.items()
}
return tf.train.Example(features=tf.train.Features(
feature=feature_dict)).SerializeToString()
@staticmethod
def deserialize(serialized_data, batch_size=None, is_training=True):
"""Convert serialized TFRecords into tensors.
Args:
serialized_data: A tensor containing serialized records.
batch_size: The data arrives pre-batched, so batch size is needed to
deserialize the data.
is_training: Boolean, whether data to deserialize to training data or
evaluation data.
"""
def _get_feature_map(batch_size, is_training=True):
"""Returns data format of the serialized tf record file."""
if is_training:
return {
movielens.USER_COLUMN:
tf.io.FixedLenFeature([batch_size, 1], dtype=tf.int64),
movielens.ITEM_COLUMN:
tf.io.FixedLenFeature([batch_size, 1], dtype=tf.int64),
rconst.VALID_POINT_MASK:
tf.io.FixedLenFeature([batch_size, 1], dtype=tf.int64),
"labels":
tf.io.FixedLenFeature([batch_size, 1], dtype=tf.int64)
}
else:
return {
movielens.USER_COLUMN:
tf.io.FixedLenFeature([batch_size, 1], dtype=tf.int64),
movielens.ITEM_COLUMN:
tf.io.FixedLenFeature([batch_size, 1], dtype=tf.int64),
rconst.DUPLICATE_MASK:
tf.io.FixedLenFeature([batch_size, 1], dtype=tf.int64)
}
features = tf.io.parse_single_example(
serialized_data, _get_feature_map(batch_size, is_training=is_training))
users = tf.cast(features[movielens.USER_COLUMN], rconst.USER_DTYPE)
items = tf.cast(features[movielens.ITEM_COLUMN], rconst.ITEM_DTYPE)
if is_training:
valid_point_mask = tf.cast(features[rconst.VALID_POINT_MASK], tf.bool)
fake_dup_mask = tf.zeros_like(users)
return {
movielens.USER_COLUMN:
users,
movielens.ITEM_COLUMN:
items,
rconst.VALID_POINT_MASK:
valid_point_mask,
rconst.TRAIN_LABEL_KEY:
tf.reshape(tf.cast(features["labels"], tf.bool), (batch_size, 1)),
rconst.DUPLICATE_MASK:
fake_dup_mask
}
else:
labels = tf.cast(tf.zeros_like(users), tf.bool)
fake_valid_pt_mask = tf.cast(tf.zeros_like(users), tf.bool)
return {
movielens.USER_COLUMN:
users,
movielens.ITEM_COLUMN:
items,
rconst.DUPLICATE_MASK:
tf.cast(features[rconst.DUPLICATE_MASK], tf.bool),
rconst.VALID_POINT_MASK:
fake_valid_pt_mask,
rconst.TRAIN_LABEL_KEY:
labels
}
def put(self, index, data):
# type: (int, dict) -> None
"""Store data for later consumption.
Because there are several paths for storing and yielding data (queues,
lists, files) the data producer simply provides the data in a standard
format at which point the dataset manager handles storing it in the correct
form.
Args:
index: Used to select shards when writing to files.
data: A dict of the data to be stored. This method mutates data, and
therefore expects to be the only consumer.
"""
if self._is_training:
mask_start_index = data.pop(rconst.MASK_START_INDEX)
batch_size = data[movielens.ITEM_COLUMN].shape[0]
data[rconst.VALID_POINT_MASK] = np.expand_dims(
np.less(np.arange(batch_size), mask_start_index), -1)
if self._stream_files:
example_bytes = self.serialize(data)
with self._write_locks[index % rconst.NUM_FILE_SHARDS]:
self._writers[index % rconst.NUM_FILE_SHARDS].write(example_bytes)
else:
self._result_queue.put((
data, data.pop("labels")) if self._is_training else data)
def start_construction(self):
if self._stream_files:
tf.io.gfile.makedirs(self.current_data_root)
template = os.path.join(self.current_data_root, rconst.SHARD_TEMPLATE)
self._writers = [
tf.io.TFRecordWriter(template.format(i))
for i in range(rconst.NUM_FILE_SHARDS)
]
def end_construction(self):
if self._stream_files:
[writer.close() for writer in self._writers]
self._writers = []
self._result_queue.put(self.current_data_root)
self._epochs_completed += 1
def data_generator(self, epochs_between_evals):
"""Yields examples during local training."""
assert not self._stream_files
assert self._is_training or epochs_between_evals == 1
if self._is_training:
for _ in range(self._batches_per_epoch * epochs_between_evals):
yield self._result_queue.get(timeout=300)
else:
if self._result_reuse:
assert len(self._result_reuse) == self._batches_per_epoch
for i in self._result_reuse:
yield i
else:
# First epoch.
for _ in range(self._batches_per_epoch * epochs_between_evals):
result = self._result_queue.get(timeout=300)
self._result_reuse.append(result)
yield result
def increment_request_epoch(self):
self._epochs_requested += 1
def get_dataset(self, batch_size, epochs_between_evals):
"""Construct the dataset to be used for training and eval.
For local training, data is provided through Dataset.from_generator. For
remote training (TPUs) the data is first serialized to files and then sent
to the TPU through a StreamingFilesDataset.
Args:
batch_size: The per-replica batch size of the dataset.
epochs_between_evals: How many epochs worth of data to yield. (Generator
mode only.)
"""
self.increment_request_epoch()
if self._stream_files:
if epochs_between_evals > 1:
raise ValueError("epochs_between_evals > 1 not supported for file "
"based dataset.")
epoch_data_dir = self._result_queue.get(timeout=300)
if not self._is_training:
self._result_queue.put(epoch_data_dir) # Eval data is reused.
file_pattern = os.path.join(epoch_data_dir,
rconst.SHARD_TEMPLATE.format("*"))
dataset = StreamingFilesDataset(
files=file_pattern,
worker_job=popen_helper.worker_job(),
num_parallel_reads=rconst.NUM_FILE_SHARDS,
num_epochs=1,
sloppy=not self._deterministic)
map_fn = functools.partial(
self.deserialize,
batch_size=batch_size,
is_training=self._is_training)
dataset = dataset.map(map_fn, num_parallel_calls=16)
else:
types = {
movielens.USER_COLUMN: rconst.USER_DTYPE,
movielens.ITEM_COLUMN: rconst.ITEM_DTYPE
}
shapes = {
movielens.USER_COLUMN: tf.TensorShape([batch_size, 1]),
movielens.ITEM_COLUMN: tf.TensorShape([batch_size, 1])
}
if self._is_training:
types[rconst.VALID_POINT_MASK] = bool
shapes[rconst.VALID_POINT_MASK] = tf.TensorShape([batch_size, 1])
types = (types, bool)
shapes = (shapes, tf.TensorShape([batch_size, 1]))
else:
types[rconst.DUPLICATE_MASK] = bool
shapes[rconst.DUPLICATE_MASK] = tf.TensorShape([batch_size, 1])
data_generator = functools.partial(
self.data_generator, epochs_between_evals=epochs_between_evals)
dataset = tf.data.Dataset.from_generator(
generator=data_generator, output_types=types, output_shapes=shapes)
return dataset.prefetch(16)
def make_input_fn(self, batch_size):
"""Create an input_fn which checks for batch size consistency."""
def input_fn(params):
"""Returns batches for training."""
# Estimator passes batch_size during training and eval_batch_size during
# eval.
param_batch_size = (
params["batch_size"] if self._is_training else
params.get("eval_batch_size") or params["batch_size"])
if batch_size != param_batch_size:
raise ValueError("producer batch size ({}) differs from params batch "
"size ({})".format(batch_size, param_batch_size))
epochs_between_evals = (
params.get("epochs_between_evals", 1) if self._is_training else 1)
return self.get_dataset(
batch_size=batch_size, epochs_between_evals=epochs_between_evals)
return input_fn
class BaseDataConstructor(threading.Thread):
"""Data constructor base class.
This class manages the control flow for constructing data. It is not meant
to be used directly, but instead subclasses should implement the following
two methods:
self.construct_lookup_variables
self.lookup_negative_items
"""
def __init__(
self,
maximum_number_epochs, # type: int
num_users, # type: int
num_items, # type: int
user_map, # type: dict
item_map, # type: dict
train_pos_users, # type: np.ndarray
train_pos_items, # type: np.ndarray
train_batch_size, # type: int
batches_per_train_step, # type: int
num_train_negatives, # type: int
eval_pos_users, # type: np.ndarray
eval_pos_items, # type: np.ndarray
eval_batch_size, # type: int
batches_per_eval_step, # type: int
stream_files, # type: bool
deterministic=False, # type: bool
epoch_dir=None, # type: str
num_train_epochs=None, # type: int
create_data_offline=False # type: bool
):
# General constants
self._maximum_number_epochs = maximum_number_epochs
self._num_users = num_users
self._num_items = num_items
self.user_map = user_map
self.item_map = item_map
self._train_pos_users = train_pos_users
self._train_pos_items = train_pos_items
self.train_batch_size = train_batch_size
self._num_train_negatives = num_train_negatives
self._batches_per_train_step = batches_per_train_step
self._eval_pos_users = eval_pos_users
self._eval_pos_items = eval_pos_items
self.eval_batch_size = eval_batch_size
self.num_train_epochs = num_train_epochs
self.create_data_offline = create_data_offline
# Training
if self._train_pos_users.shape != self._train_pos_items.shape:
raise ValueError(
"User positives ({}) is different from item positives ({})".format(
self._train_pos_users.shape, self._train_pos_items.shape))
(self._train_pos_count,) = self._train_pos_users.shape
self._elements_in_epoch = (1 + num_train_negatives) * self._train_pos_count
self.train_batches_per_epoch = self._count_batches(self._elements_in_epoch,
train_batch_size,
batches_per_train_step)
# Evaluation
if eval_batch_size % (1 + rconst.NUM_EVAL_NEGATIVES):
raise ValueError("Eval batch size {} is not divisible by {}".format(
eval_batch_size, 1 + rconst.NUM_EVAL_NEGATIVES))
self._eval_users_per_batch = int(eval_batch_size //
(1 + rconst.NUM_EVAL_NEGATIVES))
self._eval_elements_in_epoch = num_users * (1 + rconst.NUM_EVAL_NEGATIVES)
self.eval_batches_per_epoch = self._count_batches(
self._eval_elements_in_epoch, eval_batch_size, batches_per_eval_step)
# Intermediate artifacts
self._current_epoch_order = np.empty(shape=(0,))
self._shuffle_iterator = None
self._shuffle_with_forkpool = not stream_files
if stream_files:
self._shard_root = epoch_dir or tempfile.mkdtemp(prefix="ncf_")
if not create_data_offline:
atexit.register(tf.io.gfile.rmtree, self._shard_root)
else:
self._shard_root = None
self._train_dataset = DatasetManager(True, stream_files,
self.train_batches_per_epoch,
self._shard_root, deterministic,
num_train_epochs)
self._eval_dataset = DatasetManager(False, stream_files,
self.eval_batches_per_epoch,
self._shard_root, deterministic,
num_train_epochs)
# Threading details
super(BaseDataConstructor, self).__init__()
self.daemon = True
self._stop_loop = False
self._fatal_exception = None
self.deterministic = deterministic
def __str__(self):
multiplier = ("(x{} devices)".format(self._batches_per_train_step)
if self._batches_per_train_step > 1 else "")
summary = SUMMARY_TEMPLATE.format(
spacer=" ",
num_users=self._num_users,
num_items=self._num_items,
train_pos_ct=self._train_pos_count,
train_batch_size=self.train_batch_size,
train_batch_ct=self.train_batches_per_epoch,
eval_pos_ct=self._num_users,
eval_batch_size=self.eval_batch_size,
eval_batch_ct=self.eval_batches_per_epoch,
multiplier=multiplier)
return super(BaseDataConstructor, self).__str__() + "\n" + summary
@staticmethod
def _count_batches(example_count, batch_size, batches_per_step):
"""Determine the number of batches, rounding up to fill all devices."""
x = (example_count + batch_size - 1) // batch_size
return (x + batches_per_step - 1) // batches_per_step * batches_per_step
def stop_loop(self):
self._stop_loop = True
def construct_lookup_variables(self):
"""Perform any one time pre-compute work."""
raise NotImplementedError
def lookup_negative_items(self, **kwargs):
"""Randomly sample negative items for given users."""
raise NotImplementedError
def _run(self):
atexit.register(self.stop_loop)
self._start_shuffle_iterator()
self.construct_lookup_variables()
self._construct_training_epoch()
self._construct_eval_epoch()
for _ in range(self._maximum_number_epochs - 1):
self._construct_training_epoch()
self.stop_loop()
def run(self):
try:
self._run()
except Exception as e:
# The Thread base class swallows stack traces, so unfortunately it is
# necessary to catch and re-raise to get debug output
traceback.print_exc()
self._fatal_exception = e
sys.stderr.flush()
raise
def _start_shuffle_iterator(self):
if self._shuffle_with_forkpool:
pool = popen_helper.get_forkpool(3, closing=False)
else:
pool = popen_helper.get_threadpool(1, closing=False)
atexit.register(pool.close)
args = [(self._elements_in_epoch, stat_utils.random_int32())
for _ in range(self._maximum_number_epochs)]
imap = pool.imap if self.deterministic else pool.imap_unordered
self._shuffle_iterator = imap(stat_utils.permutation, args)
def _get_training_batch(self, i):
"""Construct a single batch of training data.
Args:
i: The index of the batch. This is used when stream_files=True to assign
data to file shards.
"""
batch_indices = self._current_epoch_order[i *
self.train_batch_size:(i + 1) *
self.train_batch_size]
(mask_start_index,) = batch_indices.shape
batch_ind_mod = np.mod(batch_indices, self._train_pos_count)
users = self._train_pos_users[batch_ind_mod]
negative_indices = np.greater_equal(batch_indices, self._train_pos_count)
negative_users = users[negative_indices]
negative_items = self.lookup_negative_items(negative_users=negative_users)
items = self._train_pos_items[batch_ind_mod]
items[negative_indices] = negative_items
labels = np.logical_not(negative_indices)
# Pad last partial batch
pad_length = self.train_batch_size - mask_start_index
if pad_length:
# We pad with arange rather than zeros because the network will still
# compute logits for padded examples, and padding with zeros would create
# a very "hot" embedding key which can have performance implications.
user_pad = np.arange(pad_length, dtype=users.dtype) % self._num_users
item_pad = np.arange(pad_length, dtype=items.dtype) % self._num_items
label_pad = np.zeros(shape=(pad_length,), dtype=labels.dtype)
users = np.concatenate([users, user_pad])
items = np.concatenate([items, item_pad])
labels = np.concatenate([labels, label_pad])
self._train_dataset.put(
i, {
movielens.USER_COLUMN:
np.reshape(users, (self.train_batch_size, 1)),
movielens.ITEM_COLUMN:
np.reshape(items, (self.train_batch_size, 1)),
rconst.MASK_START_INDEX:
np.array(mask_start_index, dtype=np.int32),
"labels":
np.reshape(labels, (self.train_batch_size, 1)),
})
def _wait_to_construct_train_epoch(self):
count = 0
while self._train_dataset.buffer_reached() and not self._stop_loop:
time.sleep(0.01)
count += 1
if count >= 100 and np.log10(count) == np.round(np.log10(count)):
logging.info(
"Waited {} times for training data to be consumed".format(count))
def _construct_training_epoch(self):
"""Loop to construct a batch of training data."""
if not self.create_data_offline:
self._wait_to_construct_train_epoch()
start_time = timeit.default_timer()
if self._stop_loop:
return
self._train_dataset.start_construction()
map_args = list(range(self.train_batches_per_epoch))
self._current_epoch_order = next(self._shuffle_iterator)
get_pool = (
popen_helper.get_fauxpool
if self.deterministic else popen_helper.get_threadpool)
with get_pool(6) as pool:
pool.map(self._get_training_batch, map_args)
self._train_dataset.end_construction()
logging.info("Epoch construction complete. Time: {:.1f} seconds".format(
timeit.default_timer() - start_time))
@staticmethod
def _assemble_eval_batch(users, positive_items, negative_items,
users_per_batch):
"""Construct duplicate_mask and structure data accordingly.
The positive items should be last so that they lose ties. However, they
should not be masked out if the true eval positive happens to be
selected as a negative. So instead, the positive is placed in the first
position, and then switched with the last element after the duplicate
mask has been computed.
Args:
users: An array of users in a batch. (should be identical along axis 1)
positive_items: An array (batch_size x 1) of positive item indices.
negative_items: An array of negative item indices.
users_per_batch: How many users should be in the batch. This is passed as
an argument so that ncf_test.py can use this method.
Returns:
User, item, and duplicate_mask arrays.
"""
items = np.concatenate([positive_items, negative_items], axis=1)
# We pad the users and items here so that the duplicate mask calculation
# will include padding. The metric function relies on all padded elements
# except the positive being marked as duplicate to mask out padded points.
if users.shape[0] < users_per_batch:
pad_rows = users_per_batch - users.shape[0]
padding = np.zeros(shape=(pad_rows, users.shape[1]), dtype=np.int32)
users = np.concatenate([users, padding.astype(users.dtype)], axis=0)
items = np.concatenate([items, padding.astype(items.dtype)], axis=0)
duplicate_mask = stat_utils.mask_duplicates(items, axis=1).astype(bool)
items[:, (0, -1)] = items[:, (-1, 0)]
duplicate_mask[:, (0, -1)] = duplicate_mask[:, (-1, 0)]
assert users.shape == items.shape == duplicate_mask.shape
return users, items, duplicate_mask
def _get_eval_batch(self, i):
"""Construct a single batch of evaluation data.
Args:
i: The index of the batch.
"""
low_index = i * self._eval_users_per_batch
high_index = (i + 1) * self._eval_users_per_batch
users = np.repeat(
self._eval_pos_users[low_index:high_index, np.newaxis],
1 + rconst.NUM_EVAL_NEGATIVES,
axis=1)
positive_items = self._eval_pos_items[low_index:high_index, np.newaxis]
negative_items = (
self.lookup_negative_items(negative_users=users[:, :-1]).reshape(
-1, rconst.NUM_EVAL_NEGATIVES))
users, items, duplicate_mask = self._assemble_eval_batch(
users, positive_items, negative_items, self._eval_users_per_batch)
self._eval_dataset.put(
i, {
movielens.USER_COLUMN:
np.reshape(users.flatten(), (self.eval_batch_size, 1)),
movielens.ITEM_COLUMN:
np.reshape(items.flatten(), (self.eval_batch_size, 1)),
rconst.DUPLICATE_MASK:
np.reshape(duplicate_mask.flatten(), (self.eval_batch_size, 1)),
})
def _construct_eval_epoch(self):
"""Loop to construct data for evaluation."""
if self._stop_loop:
return
start_time = timeit.default_timer()
self._eval_dataset.start_construction()
map_args = [i for i in range(self.eval_batches_per_epoch)]
get_pool = (
popen_helper.get_fauxpool
if self.deterministic else popen_helper.get_threadpool)
with get_pool(6) as pool:
pool.map(self._get_eval_batch, map_args)
self._eval_dataset.end_construction()
logging.info("Eval construction complete. Time: {:.1f} seconds".format(
timeit.default_timer() - start_time))
def make_input_fn(self, is_training):
# It isn't feasible to provide a foolproof check, so this is designed to
# catch most failures rather than provide an exhaustive guard.
if self._fatal_exception is not None:
raise ValueError("Fatal exception in the data production loop: {}".format(
self._fatal_exception))
return (self._train_dataset.make_input_fn(self.train_batch_size)
if is_training else self._eval_dataset.make_input_fn(
self.eval_batch_size))
def increment_request_epoch(self):
self._train_dataset.increment_request_epoch()
class DummyConstructor(threading.Thread):
"""Class for running with synthetic data."""
def __init__(self, *args, **kwargs):
super(DummyConstructor, self).__init__(*args, **kwargs)
self.train_batches_per_epoch = rconst.SYNTHETIC_BATCHES_PER_EPOCH
self.eval_batches_per_epoch = rconst.SYNTHETIC_BATCHES_PER_EPOCH
def run(self):
pass
def stop_loop(self):
pass
def increment_request_epoch(self):
pass
@staticmethod
def make_input_fn(is_training):
"""Construct training input_fn that uses synthetic data."""
def input_fn(params):
"""Returns dummy input batches for training."""
# Estimator passes batch_size during training and eval_batch_size during
# eval.
batch_size = (
params["batch_size"] if is_training else
params.get("eval_batch_size") or params["batch_size"])
num_users = params["num_users"]
num_items = params["num_items"]
users = tf.random.uniform([batch_size, 1],
dtype=tf.int32,
minval=0,
maxval=num_users)
items = tf.random.uniform([batch_size, 1],
dtype=tf.int32,
minval=0,
maxval=num_items)
if is_training:
valid_point_mask = tf.cast(
tf.random.uniform([batch_size, 1],
dtype=tf.int32,
minval=0,
maxval=2), tf.bool)
labels = tf.cast(
tf.random.uniform([batch_size, 1],
dtype=tf.int32,
minval=0,
maxval=2), tf.bool)
data = {
movielens.USER_COLUMN: users,
movielens.ITEM_COLUMN: items,
rconst.VALID_POINT_MASK: valid_point_mask,
}, labels
else:
dupe_mask = tf.cast(
tf.random.uniform([batch_size, 1],
dtype=tf.int32,
minval=0,
maxval=2), tf.bool)
data = {
movielens.USER_COLUMN: users,
movielens.ITEM_COLUMN: items,
rconst.DUPLICATE_MASK: dupe_mask,
}
dataset = tf.data.Dataset.from_tensors(data).repeat(
rconst.SYNTHETIC_BATCHES_PER_EPOCH * params["batches_per_step"])
dataset = dataset.prefetch(32)
return dataset
return input_fn
class MaterializedDataConstructor(BaseDataConstructor):
"""Materialize a table of negative examples for fast negative generation.
This class creates a table (num_users x num_items) containing all of the
negative examples for each user. This table is conceptually ragged; that is to
say the items dimension will have a number of unused elements at the end equal
to the number of positive elements for a given user. For instance:
num_users = 3
num_items = 5
positives = [[1, 3], [0], [1, 2, 3, 4]]
will generate a negative table:
[
[0 2 4 int32max int32max],
[1 2 3 4 int32max],
[0 int32max int32max int32max int32max],
]
and a vector of per-user negative counts, which in this case would be:
[3, 4, 1]
When sampling negatives, integers are (nearly) uniformly selected from the
range [0, per_user_neg_count[user]) which gives a column_index, at which
point the negative can be selected as:
negative_table[user, column_index]
This technique will not scale; however MovieLens is small enough that even
a pre-compute which is quadratic in problem size will still fit in memory. A
more scalable lookup method is in the works.
"""
def __init__(self, *args, **kwargs):
super(MaterializedDataConstructor, self).__init__(*args, **kwargs)
self._negative_table = None
self._per_user_neg_count = None
def construct_lookup_variables(self):
# Materialize negatives for fast lookup sampling.
start_time = timeit.default_timer()
inner_bounds = np.argwhere(self._train_pos_users[1:] -
self._train_pos_users[:-1])[:, 0] + 1
(upper_bound,) = self._train_pos_users.shape
index_bounds = [0] + inner_bounds.tolist() + [upper_bound]
self._negative_table = np.zeros(
shape=(self._num_users, self._num_items), dtype=rconst.ITEM_DTYPE)
# Set the table to the max value to make sure the embedding lookup will fail
# if we go out of bounds, rather than just overloading item zero.
self._negative_table += np.iinfo(rconst.ITEM_DTYPE).max
assert self._num_items < np.iinfo(rconst.ITEM_DTYPE).max
# Reuse arange during generation. np.delete will make a copy.
full_set = np.arange(self._num_items, dtype=rconst.ITEM_DTYPE)
self._per_user_neg_count = np.zeros(
shape=(self._num_users,), dtype=np.int32)
# Threading does not improve this loop. For some reason, the np.delete
# call does not parallelize well. Multiprocessing incurs too much
# serialization overhead to be worthwhile.
for i in range(self._num_users):
positives = self._train_pos_items[index_bounds[i]:index_bounds[i + 1]]
negatives = np.delete(full_set, positives)
self._per_user_neg_count[i] = self._num_items - positives.shape[0]
self._negative_table[i, :self._per_user_neg_count[i]] = negatives
logging.info("Negative sample table built. Time: {:.1f} seconds".format(
timeit.default_timer() - start_time))
def lookup_negative_items(self, negative_users, **kwargs):
negative_item_choice = stat_utils.very_slightly_biased_randint(
self._per_user_neg_count[negative_users])
return self._negative_table[negative_users, negative_item_choice]
class BisectionDataConstructor(BaseDataConstructor):
"""Use bisection to index within positive examples.
This class tallies the number of negative items which appear before each
positive item for a user. This means that in order to select the ith negative
item for a user, it only needs to determine which two positive items bound
it at which point the item id for the ith negative is a simply algebraic
expression.
"""
def __init__(self, *args, **kwargs):
super(BisectionDataConstructor, self).__init__(*args, **kwargs)
self.index_bounds = None
self._sorted_train_pos_items = None
self._total_negatives = None
def _index_segment(self, user):
lower, upper = self.index_bounds[user:user + 2]
items = self._sorted_train_pos_items[lower:upper]
negatives_since_last_positive = np.concatenate(
[items[0][np.newaxis], items[1:] - items[:-1] - 1])
return np.cumsum(negatives_since_last_positive)
def construct_lookup_variables(self):
start_time = timeit.default_timer()
inner_bounds = np.argwhere(self._train_pos_users[1:] -
self._train_pos_users[:-1])[:, 0] + 1
(upper_bound,) = self._train_pos_users.shape
self.index_bounds = np.array([0] + inner_bounds.tolist() + [upper_bound])
# Later logic will assume that the users are in sequential ascending order.
assert np.array_equal(self._train_pos_users[self.index_bounds[:-1]],
np.arange(self._num_users))
self._sorted_train_pos_items = self._train_pos_items.copy()
for i in range(self._num_users):
lower, upper = self.index_bounds[i:i + 2]
self._sorted_train_pos_items[lower:upper].sort()
self._total_negatives = np.concatenate(
[self._index_segment(i) for i in range(self._num_users)])
logging.info("Negative total vector built. Time: {:.1f} seconds".format(
timeit.default_timer() - start_time))
def lookup_negative_items(self, negative_users, **kwargs):
output = np.zeros(shape=negative_users.shape, dtype=rconst.ITEM_DTYPE) - 1
left_index = self.index_bounds[negative_users]
right_index = self.index_bounds[negative_users + 1] - 1
num_positives = right_index - left_index + 1
num_negatives = self._num_items - num_positives
neg_item_choice = stat_utils.very_slightly_biased_randint(num_negatives)
# Shortcuts:
# For points where the negative is greater than or equal to the tally before
# the last positive point there is no need to bisect. Instead the item id
# corresponding to the negative item choice is simply:
# last_postive_index + 1 + (neg_choice - last_negative_tally)
# Similarly, if the selection is less than the tally at the first positive
# then the item_id is simply the selection.
#
# Because MovieLens organizes popular movies into low integers (which is
# preserved through the preprocessing), the first shortcut is very
# efficient, allowing ~60% of samples to bypass the bisection. For the same
# reason, the second shortcut is rarely triggered (<0.02%) and is therefore
# not worth implementing.
use_shortcut = neg_item_choice >= self._total_negatives[right_index]
output[use_shortcut] = (
self._sorted_train_pos_items[right_index] + 1 +
(neg_item_choice - self._total_negatives[right_index]))[use_shortcut]
if np.all(use_shortcut):
# The bisection code is ill-posed when there are no elements.
return output
not_use_shortcut = np.logical_not(use_shortcut)
left_index = left_index[not_use_shortcut]
right_index = right_index[not_use_shortcut]
neg_item_choice = neg_item_choice[not_use_shortcut]
num_loops = np.max(
np.ceil(np.log2(num_positives[not_use_shortcut])).astype(np.int32))
for i in range(num_loops):
mid_index = (left_index + right_index) // 2
right_criteria = self._total_negatives[mid_index] > neg_item_choice
left_criteria = np.logical_not(right_criteria)
right_index[right_criteria] = mid_index[right_criteria]
left_index[left_criteria] = mid_index[left_criteria]
# Expected state after bisection pass:
# The right index is the smallest index whose tally is greater than the
# negative item choice index.
assert np.all((right_index - left_index) <= 1)
output[not_use_shortcut] = (
self._sorted_train_pos_items[right_index] -
(self._total_negatives[right_index] - neg_item_choice))
assert np.all(output >= 0)
return output
def get_constructor(name):
if name == "bisection":
return BisectionDataConstructor
if name == "materialized":
return MaterializedDataConstructor
raise ValueError("Unrecognized constructor: {}".format(name))
| 37,206 | 36.850458 | 80 | py |
models | models-master/official/recommendation/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/recommendation/create_ncf_data.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Binary to generate training/evaluation dataset for NCF model."""
import json
# pylint: disable=g-bad-import-order
# Import libraries
from absl import app
from absl import flags
import tensorflow as tf
# pylint: enable=g-bad-import-order
from official.recommendation import movielens
from official.recommendation import data_preprocessing
flags.DEFINE_string(
"data_dir", None,
"The input data dir at which training and evaluation tf record files "
"will be saved.")
flags.DEFINE_string("meta_data_file_path", None,
"The path in which input meta data will be written.")
flags.DEFINE_enum("dataset", "ml-20m", ["ml-1m", "ml-20m"],
"Dataset to be trained/evaluated.")
flags.DEFINE_enum(
"constructor_type", "bisection", ["bisection", "materialized"],
"Strategy to use for generating false negatives. materialized has a "
"precompute that scales badly, but a faster per-epoch construction "
"time and can be faster on very large systems.")
flags.DEFINE_integer("num_train_epochs", 14,
"Total number of training epochs to generate.")
flags.DEFINE_integer(
"num_negative_samples", 4,
"Number of negative instances to pair with positive instance.")
flags.DEFINE_integer(
"train_prebatch_size", 99000,
"Batch size to be used for prebatching the dataset "
"for training.")
flags.DEFINE_integer(
"eval_prebatch_size", 99000,
"Batch size to be used for prebatching the dataset "
"for training.")
FLAGS = flags.FLAGS
def prepare_raw_data(flag_obj):
"""Downloads and prepares raw data for data generation."""
movielens.download(flag_obj.dataset, flag_obj.data_dir)
data_processing_params = {
"train_epochs": flag_obj.num_train_epochs,
"batch_size": flag_obj.train_prebatch_size,
"eval_batch_size": flag_obj.eval_prebatch_size,
"batches_per_step": 1,
"stream_files": True,
"num_neg": flag_obj.num_negative_samples,
}
num_users, num_items, producer = data_preprocessing.instantiate_pipeline(
dataset=flag_obj.dataset,
data_dir=flag_obj.data_dir,
params=data_processing_params,
constructor_type=flag_obj.constructor_type,
epoch_dir=flag_obj.data_dir,
generate_data_offline=True)
# pylint: disable=protected-access
input_metadata = {
"num_users": num_users,
"num_items": num_items,
"constructor_type": flag_obj.constructor_type,
"num_train_elements": producer._elements_in_epoch,
"num_eval_elements": producer._eval_elements_in_epoch,
"num_train_epochs": flag_obj.num_train_epochs,
"train_prebatch_size": flag_obj.train_prebatch_size,
"eval_prebatch_size": flag_obj.eval_prebatch_size,
"num_train_steps": producer.train_batches_per_epoch,
"num_eval_steps": producer.eval_batches_per_epoch,
}
# pylint: enable=protected-access
return producer, input_metadata
def generate_data():
"""Creates NCF train/eval dataset and writes input metadata as a file."""
producer, input_metadata = prepare_raw_data(FLAGS)
producer.run()
with tf.io.gfile.GFile(FLAGS.meta_data_file_path, "w") as writer:
writer.write(json.dumps(input_metadata, indent=4) + "\n")
def main(_):
generate_data()
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("meta_data_file_path")
app.run(main)
| 4,008 | 33.86087 | 75 | py |
models | models-master/official/recommendation/ncf_common.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common functionalities used by both Keras and Estimator implementations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
from official.common import distribute_utils
from official.recommendation import constants as rconst
from official.recommendation import data_pipeline
from official.recommendation import data_preprocessing
from official.recommendation import movielens
from official.utils.flags import core as flags_core
FLAGS = flags.FLAGS
def get_inputs(params):
"""Returns some parameters used by the model."""
if FLAGS.download_if_missing and not FLAGS.use_synthetic_data:
movielens.download(FLAGS.dataset, FLAGS.data_dir)
if FLAGS.seed is not None:
np.random.seed(FLAGS.seed)
if FLAGS.use_synthetic_data:
producer = data_pipeline.DummyConstructor()
num_users, num_items = movielens.DATASET_TO_NUM_USERS_AND_ITEMS[
FLAGS.dataset]
num_train_steps = rconst.SYNTHETIC_BATCHES_PER_EPOCH
num_eval_steps = rconst.SYNTHETIC_BATCHES_PER_EPOCH
else:
num_users, num_items, producer = data_preprocessing.instantiate_pipeline(
dataset=FLAGS.dataset,
data_dir=FLAGS.data_dir,
params=params,
constructor_type=FLAGS.constructor_type,
deterministic=FLAGS.seed is not None)
num_train_steps = producer.train_batches_per_epoch
num_eval_steps = producer.eval_batches_per_epoch
return num_users, num_items, num_train_steps, num_eval_steps, producer
def parse_flags(flags_obj):
"""Convenience function to turn flags into params."""
num_gpus = flags_core.get_num_gpus(flags_obj)
batch_size = flags_obj.batch_size
eval_batch_size = flags_obj.eval_batch_size or flags_obj.batch_size
return {
"train_epochs": flags_obj.train_epochs,
"batches_per_step": 1,
"use_seed": flags_obj.seed is not None,
"batch_size": batch_size,
"eval_batch_size": eval_batch_size,
"learning_rate": flags_obj.learning_rate,
"mf_dim": flags_obj.num_factors,
"model_layers": [int(layer) for layer in flags_obj.layers],
"mf_regularization": flags_obj.mf_regularization,
"mlp_reg_layers": [float(reg) for reg in flags_obj.mlp_regularization],
"num_neg": flags_obj.num_neg,
"distribution_strategy": flags_obj.distribution_strategy,
"num_gpus": num_gpus,
"use_tpu": flags_obj.tpu is not None,
"tpu": flags_obj.tpu,
"tpu_zone": flags_obj.tpu_zone,
"tpu_gcp_project": flags_obj.tpu_gcp_project,
"beta1": flags_obj.beta1,
"beta2": flags_obj.beta2,
"epsilon": flags_obj.epsilon,
"match_mlperf": flags_obj.ml_perf,
"epochs_between_evals": flags_obj.epochs_between_evals,
"keras_use_ctl": flags_obj.keras_use_ctl,
"hr_threshold": flags_obj.hr_threshold,
"stream_files": flags_obj.tpu is not None,
"train_dataset_path": flags_obj.train_dataset_path,
"eval_dataset_path": flags_obj.eval_dataset_path,
"input_meta_data_path": flags_obj.input_meta_data_path,
}
def get_v1_distribution_strategy(params):
"""Returns the distribution strategy to use."""
if params["use_tpu"]:
# Some of the networking libraries are quite chatty.
for name in [
"googleapiclient.discovery", "googleapiclient.discovery_cache",
"oauth2client.transport"
]:
logging.getLogger(name).setLevel(logging.ERROR)
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=params["tpu"],
zone=params["tpu_zone"],
project=params["tpu_gcp_project"],
coordinator_name="coordinator")
logging.info("Issuing reset command to TPU to ensure a clean state.")
tf.Session.reset(tpu_cluster_resolver.get_master())
# Estimator looks at the master it connects to for MonitoredTrainingSession
# by reading the `TF_CONFIG` environment variable, and the coordinator
# is used by StreamingFilesDataset.
tf_config_env = {
"session_master":
tpu_cluster_resolver.get_master(),
"eval_session_master":
tpu_cluster_resolver.get_master(),
"coordinator":
tpu_cluster_resolver.cluster_spec().as_dict()["coordinator"]
}
os.environ["TF_CONFIG"] = json.dumps(tf_config_env)
distribution = tf.distribute.TPUStrategy(
tpu_cluster_resolver, steps_per_run=100)
else:
distribution = distribute_utils.get_distribution_strategy(
num_gpus=params["num_gpus"])
return distribution
def define_ncf_flags():
"""Add flags for running ncf_main."""
# Add common flags
flags_core.define_base(
model_dir=True,
clean=True,
train_epochs=True,
epochs_between_evals=True,
export_dir=False,
run_eagerly=True,
stop_threshold=True,
num_gpu=True,
distribution_strategy=True)
flags_core.define_performance(
synthetic_data=True,
dtype=True,
fp16_implementation=True,
loss_scale=True,
enable_xla=True,
)
flags_core.define_device(tpu=True)
flags_core.define_benchmark()
flags.adopt_module_key_flags(flags_core)
movielens.define_flags()
flags_core.set_defaults(
model_dir="/tmp/ncf/",
data_dir="/tmp/movielens-data/",
dataset=movielens.ML_1M,
train_epochs=2,
batch_size=99000,
tpu=None)
# Add ncf-specific flags
flags.DEFINE_boolean(
name="download_if_missing",
default=True,
help=flags_core.help_wrap(
"Download data to data_dir if it is not already present."))
flags.DEFINE_integer(
name="eval_batch_size",
default=None,
help=flags_core.help_wrap(
"The batch size used for evaluation. This should generally be larger"
"than the training batch size as the lack of back propagation during"
"evaluation can allow for larger batch sizes to fit in memory. If not"
"specified, the training batch size (--batch_size) will be used."))
flags.DEFINE_integer(
name="num_factors",
default=8,
help=flags_core.help_wrap("The Embedding size of MF model."))
# Set the default as a list of strings to be consistent with input arguments
flags.DEFINE_list(
name="layers",
default=["64", "32", "16", "8"],
help=flags_core.help_wrap(
"The sizes of hidden layers for MLP. Example "
"to specify different sizes of MLP layers: --layers=32,16,8,4"))
flags.DEFINE_float(
name="mf_regularization",
default=0.,
help=flags_core.help_wrap(
"The regularization factor for MF embeddings. The factor is used by "
"regularizer which allows to apply penalties on layer parameters or "
"layer activity during optimization."))
flags.DEFINE_list(
name="mlp_regularization",
default=["0.", "0.", "0.", "0."],
help=flags_core.help_wrap(
"The regularization factor for each MLP layer. See mf_regularization "
"help for more info about regularization factor."))
flags.DEFINE_integer(
name="num_neg",
default=4,
help=flags_core.help_wrap(
"The Number of negative instances to pair with a positive instance."))
flags.DEFINE_float(
name="learning_rate",
default=0.001,
help=flags_core.help_wrap("The learning rate."))
flags.DEFINE_float(
name="beta1",
default=0.9,
help=flags_core.help_wrap("beta1 hyperparameter for the Adam optimizer."))
flags.DEFINE_float(
name="beta2",
default=0.999,
help=flags_core.help_wrap("beta2 hyperparameter for the Adam optimizer."))
flags.DEFINE_float(
name="epsilon",
default=1e-8,
help=flags_core.help_wrap("epsilon hyperparameter for the Adam "
"optimizer."))
flags.DEFINE_float(
name="hr_threshold",
default=1.0,
help=flags_core.help_wrap(
"If passed, training will stop when the evaluation metric HR is "
"greater than or equal to hr_threshold. For dataset ml-1m, the "
"desired hr_threshold is 0.68 which is the result from the paper; "
"For dataset ml-20m, the threshold can be set as 0.95 which is "
"achieved by MLPerf implementation."))
flags.DEFINE_enum(
name="constructor_type",
default="bisection",
enum_values=["bisection", "materialized"],
case_sensitive=False,
help=flags_core.help_wrap(
"Strategy to use for generating false negatives. materialized has a"
"precompute that scales badly, but a faster per-epoch construction"
"time and can be faster on very large systems."))
flags.DEFINE_string(
name="train_dataset_path",
default=None,
help=flags_core.help_wrap("Path to training data."))
flags.DEFINE_string(
name="eval_dataset_path",
default=None,
help=flags_core.help_wrap("Path to evaluation data."))
flags.DEFINE_string(
name="input_meta_data_path",
default=None,
help=flags_core.help_wrap("Path to input meta data file."))
flags.DEFINE_bool(
name="ml_perf",
default=False,
help=flags_core.help_wrap(
"If set, changes the behavior of the model slightly to match the "
"MLPerf reference implementations here: \n"
"https://github.com/mlperf/reference/tree/master/recommendation/"
"pytorch\n"
"The two changes are:\n"
"1. When computing the HR and NDCG during evaluation, remove "
"duplicate user-item pairs before the computation. This results in "
"better HRs and NDCGs.\n"
"2. Use a different soring algorithm when sorting the input data, "
"which performs better due to the fact the sorting algorithms are "
"not stable."))
flags.DEFINE_bool(
name="output_ml_perf_compliance_logging",
default=False,
help=flags_core.help_wrap(
"If set, output the MLPerf compliance logging. This is only useful "
"if one is running the model for MLPerf. See "
"https://github.com/mlperf/policies/blob/master/training_rules.adoc"
"#submission-compliance-logs for details. This uses sudo and so may "
"ask for your password, as root access is needed to clear the system "
"caches, which is required for MLPerf compliance."))
flags.DEFINE_integer(
name="seed",
default=None,
help=flags_core.help_wrap(
"This value will be used to seed both NumPy and TensorFlow."))
@flags.validator(
"eval_batch_size",
"eval_batch_size must be at least {}".format(rconst.NUM_EVAL_NEGATIVES +
1))
def eval_size_check(eval_batch_size):
return (eval_batch_size is None or
int(eval_batch_size) > rconst.NUM_EVAL_NEGATIVES)
flags.DEFINE_bool(
name="early_stopping",
default=False,
help=flags_core.help_wrap(
"If True, we stop the training when it reaches hr_threshold"))
flags.DEFINE_bool(
name="keras_use_ctl",
default=False,
help=flags_core.help_wrap(
"If True, we use a custom training loop for keras."))
def convert_to_softmax_logits(logits):
"""Convert the logits returned by the base model to softmax logits.
Args:
logits: used to create softmax.
Returns:
Softmax with the first column of zeros is equivalent to sigmoid.
"""
softmax_logits = tf.concat([logits * 0, logits], axis=1)
return softmax_logits
| 12,278 | 33.784703 | 80 | py |
models | models-master/official/recommendation/movielens.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Download and extract the MovieLens dataset from GroupLens website.
Download the dataset, and perform basic preprocessing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tempfile
import zipfile
# pylint: disable=g-bad-import-order
# Import libraries
import numpy as np
import pandas as pd
import six
from six.moves import urllib # pylint: disable=redefined-builtin
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
# pylint: enable=g-bad-import-order
from official.utils.flags import core as flags_core
ML_1M = "ml-1m"
ML_20M = "ml-20m"
DATASETS = [ML_1M, ML_20M]
RATINGS_FILE = "ratings.csv"
MOVIES_FILE = "movies.csv"
# URL to download dataset
_DATA_URL = "https://files.grouplens.org/datasets/movielens/"
GENRE_COLUMN = "genres"
ITEM_COLUMN = "item_id" # movies
RATING_COLUMN = "rating"
TIMESTAMP_COLUMN = "timestamp"
TITLE_COLUMN = "titles"
USER_COLUMN = "user_id"
GENRES = [
'Action', 'Adventure', 'Animation', "Children", 'Comedy', 'Crime',
'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', "IMAX", 'Musical',
'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western'
]
N_GENRE = len(GENRES)
RATING_COLUMNS = [USER_COLUMN, ITEM_COLUMN, RATING_COLUMN, TIMESTAMP_COLUMN]
MOVIE_COLUMNS = [ITEM_COLUMN, TITLE_COLUMN, GENRE_COLUMN]
# Note: Users are indexed [1, k], not [0, k-1]
NUM_USER_IDS = {
ML_1M: 6040,
ML_20M: 138493,
}
# Note: Movies are indexed [1, k], not [0, k-1]
# Both the 1m and 20m datasets use the same movie set.
NUM_ITEM_IDS = 3952
MAX_RATING = 5
NUM_RATINGS = {
ML_1M: 1000209,
ML_20M: 20000263
}
DATASET_TO_NUM_USERS_AND_ITEMS = {ML_1M: (6040, 3706), ML_20M: (138493, 26744)}
def _download_and_clean(dataset, data_dir):
"""Download MovieLens dataset in a standard format.
This function downloads the specified MovieLens format and coerces it into a
standard format. The only difference between the ml-1m and ml-20m datasets
after this point (other than size, of course) is that the 1m dataset uses
whole number ratings while the 20m dataset allows half integer ratings.
"""
if dataset not in DATASETS:
raise ValueError("dataset {} is not in {{{}}}".format(
dataset, ",".join(DATASETS)))
data_subdir = os.path.join(data_dir, dataset)
expected_files = ["{}.zip".format(dataset), RATINGS_FILE, MOVIES_FILE]
tf.io.gfile.makedirs(data_subdir)
if set(expected_files).intersection(
tf.io.gfile.listdir(data_subdir)) == set(expected_files):
logging.info("Dataset {} has already been downloaded".format(dataset))
return
url = "{}{}.zip".format(_DATA_URL, dataset)
temp_dir = tempfile.mkdtemp()
try:
zip_path = os.path.join(temp_dir, "{}.zip".format(dataset))
zip_path, _ = urllib.request.urlretrieve(url, zip_path)
statinfo = os.stat(zip_path)
# A new line to clear the carriage return from download progress
# logging.info is not applicable here
print()
logging.info(
"Successfully downloaded {} {} bytes".format(
zip_path, statinfo.st_size))
zipfile.ZipFile(zip_path, "r").extractall(temp_dir)
if dataset == ML_1M:
_regularize_1m_dataset(temp_dir)
else:
_regularize_20m_dataset(temp_dir)
for fname in tf.io.gfile.listdir(temp_dir):
if not tf.io.gfile.exists(os.path.join(data_subdir, fname)):
tf.io.gfile.copy(os.path.join(temp_dir, fname),
os.path.join(data_subdir, fname))
else:
logging.info("Skipping copy of {}, as it already exists in the "
"destination folder.".format(fname))
finally:
tf.io.gfile.rmtree(temp_dir)
def _transform_csv(input_path, output_path, names, skip_first, separator=","):
"""Transform csv to a regularized format.
Args:
input_path: The path of the raw csv.
output_path: The path of the cleaned csv.
names: The csv column names.
skip_first: Boolean of whether to skip the first line of the raw csv.
separator: Character used to separate fields in the raw csv.
"""
if six.PY2:
names = [six.ensure_text(n, "utf-8") for n in names]
with tf.io.gfile.GFile(output_path, "wb") as f_out, \
tf.io.gfile.GFile(input_path, "rb") as f_in:
# Write column names to the csv.
f_out.write(",".join(names).encode("utf-8"))
f_out.write(b"\n")
for i, line in enumerate(f_in):
if i == 0 and skip_first:
continue # ignore existing labels in the csv
line = six.ensure_text(line, "utf-8", errors="ignore")
fields = line.split(separator)
if separator != ",":
fields = ['"{}"'.format(field) if "," in field else field
for field in fields]
f_out.write(",".join(fields).encode("utf-8"))
def _regularize_1m_dataset(temp_dir):
"""
ratings.dat
The file has no header row, and each line is in the following format:
UserID::MovieID::Rating::Timestamp
- UserIDs range from 1 and 6040
- MovieIDs range from 1 and 3952
- Ratings are made on a 5-star scale (whole-star ratings only)
- Timestamp is represented in seconds since midnight Coordinated Universal
Time (UTC) of January 1, 1970.
- Each user has at least 20 ratings
movies.dat
Each line has the following format:
MovieID::Title::Genres
- MovieIDs range from 1 and 3952
"""
working_dir = os.path.join(temp_dir, ML_1M)
_transform_csv(
input_path=os.path.join(working_dir, "ratings.dat"),
output_path=os.path.join(temp_dir, RATINGS_FILE),
names=RATING_COLUMNS, skip_first=False, separator="::")
_transform_csv(
input_path=os.path.join(working_dir, "movies.dat"),
output_path=os.path.join(temp_dir, MOVIES_FILE),
names=MOVIE_COLUMNS, skip_first=False, separator="::")
tf.io.gfile.rmtree(working_dir)
def _regularize_20m_dataset(temp_dir):
"""
ratings.csv
Each line of this file after the header row represents one rating of one
movie by one user, and has the following format:
userId,movieId,rating,timestamp
- The lines within this file are ordered first by userId, then, within user,
by movieId.
- Ratings are made on a 5-star scale, with half-star increments
(0.5 stars - 5.0 stars).
- Timestamps represent seconds since midnight Coordinated Universal Time
(UTC) of January 1, 1970.
- All the users had rated at least 20 movies.
movies.csv
Each line has the following format:
MovieID,Title,Genres
- MovieIDs range from 1 and 3952
"""
working_dir = os.path.join(temp_dir, ML_20M)
_transform_csv(
input_path=os.path.join(working_dir, "ratings.csv"),
output_path=os.path.join(temp_dir, RATINGS_FILE),
names=RATING_COLUMNS, skip_first=True, separator=",")
_transform_csv(
input_path=os.path.join(working_dir, "movies.csv"),
output_path=os.path.join(temp_dir, MOVIES_FILE),
names=MOVIE_COLUMNS, skip_first=True, separator=",")
tf.io.gfile.rmtree(working_dir)
def download(dataset, data_dir):
if dataset:
_download_and_clean(dataset, data_dir)
else:
_ = [_download_and_clean(d, data_dir) for d in DATASETS]
def ratings_csv_to_dataframe(data_dir, dataset):
with tf.io.gfile.GFile(os.path.join(data_dir, dataset, RATINGS_FILE)) as f:
return pd.read_csv(f, encoding="utf-8")
def csv_to_joint_dataframe(data_dir, dataset):
ratings = ratings_csv_to_dataframe(data_dir, dataset)
with tf.io.gfile.GFile(os.path.join(data_dir, dataset, MOVIES_FILE)) as f:
movies = pd.read_csv(f, encoding="utf-8")
df = ratings.merge(movies, on=ITEM_COLUMN)
df[RATING_COLUMN] = df[RATING_COLUMN].astype(np.float32)
return df
def integerize_genres(dataframe):
"""Replace genre string with a binary vector.
Args:
dataframe: a pandas dataframe of movie data.
Returns:
The transformed dataframe.
"""
def _map_fn(entry):
entry.replace("Children's", "Children") # naming difference.
movie_genres = entry.split("|")
output = np.zeros((len(GENRES),), dtype=np.int64)
for i, genre in enumerate(GENRES):
if genre in movie_genres:
output[i] = 1
return output
dataframe[GENRE_COLUMN] = dataframe[GENRE_COLUMN].apply(_map_fn)
return dataframe
def define_flags():
"""Add flags specifying data usage arguments."""
flags.DEFINE_enum(
name="dataset",
default=None,
enum_values=DATASETS,
case_sensitive=False,
help=flags_core.help_wrap("Dataset to be trained and evaluated."))
def define_data_download_flags():
"""Add flags specifying data download and usage arguments."""
flags.DEFINE_string(
name="data_dir", default="/tmp/movielens-data/",
help=flags_core.help_wrap(
"Directory to download and extract data."))
define_flags()
def main(_):
"""Download and extract the data from GroupLens website."""
download(flags.FLAGS.dataset, flags.FLAGS.data_dir)
if __name__ == "__main__":
define_data_download_flags()
FLAGS = flags.FLAGS
app.run(main)
| 9,734 | 29.517241 | 80 | py |
models | models-master/official/recommendation/popen_helper.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper file for running the async data generation process in OSS."""
import contextlib
import multiprocessing
import multiprocessing.pool
def get_forkpool(num_workers, init_worker=None, closing=True):
pool = multiprocessing.Pool(processes=num_workers, initializer=init_worker)
return contextlib.closing(pool) if closing else pool
def get_threadpool(num_workers, init_worker=None, closing=True):
pool = multiprocessing.pool.ThreadPool(processes=num_workers,
initializer=init_worker)
return contextlib.closing(pool) if closing else pool
class FauxPool(object):
"""Mimic a pool using for loops.
This class is used in place of proper pools when true determinism is desired
for testing or debugging.
"""
def __init__(self, *args, **kwargs):
pass
def map(self, func, iterable, chunksize=None):
return [func(i) for i in iterable]
def imap(self, func, iterable, chunksize=1):
for i in iterable:
yield func(i)
def close(self):
pass
def terminate(self):
pass
def join(self):
pass
def get_fauxpool(num_workers, init_worker=None, closing=True):
pool = FauxPool(processes=num_workers, initializer=init_worker)
return contextlib.closing(pool) if closing else pool
def worker_job():
return "worker"
| 1,917 | 28.507692 | 78 | py |
models | models-master/official/recommendation/stat_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Statistics utility functions of NCF."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def random_int32():
return np.random.randint(low=0, high=np.iinfo(np.int32).max, dtype=np.int32)
def permutation(args):
"""Fork safe permutation function.
This function can be called within a multiprocessing worker and give
appropriately random results.
Args:
args: A size two tuple that will unpacked into the size of the permutation
and the random seed. This form is used because starmap is not universally
available.
Returns:
A NumPy array containing a random permutation.
"""
x, seed = args
# If seed is None NumPy will seed randomly.
state = np.random.RandomState(seed=seed) # pylint: disable=no-member
output = np.arange(x, dtype=np.int32)
state.shuffle(output)
return output
def very_slightly_biased_randint(max_val_vector):
sample_dtype = np.uint64
out_dtype = max_val_vector.dtype
samples = np.random.randint(
low=0,
high=np.iinfo(sample_dtype).max,
size=max_val_vector.shape,
dtype=sample_dtype)
return np.mod(samples, max_val_vector.astype(sample_dtype)).astype(out_dtype)
def mask_duplicates(x, axis=1): # type: (np.ndarray, int) -> np.ndarray
"""Identify duplicates from sampling with replacement.
Args:
x: A 2D NumPy array of samples
axis: The axis along which to de-dupe.
Returns:
A NumPy array with the same shape as x with one if an element appeared
previously along axis 1, else zero.
"""
if axis != 1:
raise NotImplementedError
x_sort_ind = np.argsort(x, axis=1, kind="mergesort")
sorted_x = x[np.arange(x.shape[0])[:, np.newaxis], x_sort_ind]
# compute the indices needed to map values back to their original position.
inv_x_sort_ind = np.argsort(x_sort_ind, axis=1, kind="mergesort")
# Compute the difference of adjacent sorted elements.
diffs = sorted_x[:, :-1] - sorted_x[:, 1:]
# We are only interested in whether an element is zero. Therefore left padding
# with ones to restore the original shape is sufficient.
diffs = np.concatenate(
[np.ones((diffs.shape[0], 1), dtype=diffs.dtype), diffs], axis=1)
# Duplicate values will have a difference of zero. By definition the first
# element is never a duplicate.
return np.where(diffs[np.arange(x.shape[0])[:, np.newaxis], inv_x_sort_ind],
0, 1)
| 3,076 | 32.086022 | 80 | py |
models | models-master/official/recommendation/ncf_input_pipeline.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NCF model input pipeline."""
import functools
# pylint: disable=g-bad-import-order
import tensorflow as tf
# pylint: enable=g-bad-import-order
from official.recommendation import constants as rconst
from official.recommendation import data_pipeline
from official.recommendation import movielens
def create_dataset_from_tf_record_files(input_file_pattern,
pre_batch_size,
batch_size,
is_training=True,
rebatch=False):
"""Creates dataset from (tf)records files for training/evaluation."""
if pre_batch_size != batch_size:
raise ValueError("Pre-batch ({}) size is not equal to batch "
"size ({})".format(pre_batch_size, batch_size))
files = tf.data.Dataset.list_files(input_file_pattern, shuffle=is_training)
dataset = files.interleave(
tf.data.TFRecordDataset,
cycle_length=16,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
decode_fn = functools.partial(
data_pipeline.DatasetManager.deserialize,
batch_size=pre_batch_size,
is_training=is_training)
dataset = dataset.map(
decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if rebatch:
# A workaround for TPU Pod evaluation dataset.
# TODO (b/162341937) remove once it's fixed.
dataset = dataset.unbatch()
dataset = dataset.batch(pre_batch_size)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def create_dataset_from_data_producer(producer, params):
"""Return dataset online-generating data."""
def preprocess_train_input(features, labels):
"""Pre-process the training data.
This is needed because
- The label needs to be extended to be used in the loss fn
- We need the same inputs for training and eval so adding fake inputs
for DUPLICATE_MASK in training data.
Args:
features: Dictionary of features for training.
labels: Training labels.
Returns:
Processed training features.
"""
fake_dup_mask = tf.zeros_like(features[movielens.USER_COLUMN])
features[rconst.DUPLICATE_MASK] = fake_dup_mask
features[rconst.TRAIN_LABEL_KEY] = labels
return features
train_input_fn = producer.make_input_fn(is_training=True)
train_input_dataset = train_input_fn(params).map(preprocess_train_input)
def preprocess_eval_input(features):
"""Pre-process the eval data.
This is needed because:
- The label needs to be extended to be used in the loss fn
- We need the same inputs for training and eval so adding fake inputs
for VALID_PT_MASK in eval data.
Args:
features: Dictionary of features for evaluation.
Returns:
Processed evaluation features.
"""
labels = tf.cast(tf.zeros_like(features[movielens.USER_COLUMN]), tf.bool)
fake_valid_pt_mask = tf.cast(
tf.zeros_like(features[movielens.USER_COLUMN]), tf.bool)
features[rconst.VALID_POINT_MASK] = fake_valid_pt_mask
features[rconst.TRAIN_LABEL_KEY] = labels
return features
eval_input_fn = producer.make_input_fn(is_training=False)
eval_input_dataset = eval_input_fn(params).map(preprocess_eval_input)
return train_input_dataset, eval_input_dataset
def create_ncf_input_data(params,
producer=None,
input_meta_data=None,
strategy=None):
"""Creates NCF training/evaluation dataset.
Args:
params: Dictionary containing parameters for train/evaluation data.
producer: Instance of BaseDataConstructor that generates data online. Must
not be None when params['train_dataset_path'] or
params['eval_dataset_path'] is not specified.
input_meta_data: A dictionary of input metadata to be used when reading data
from tf record files. Must be specified when params["train_input_dataset"]
is specified.
strategy: Distribution strategy used for distributed training. If specified,
used to assert that evaluation batch size is correctly a multiple of total
number of devices used.
Returns:
(training dataset, evaluation dataset, train steps per epoch,
eval steps per epoch)
Raises:
ValueError: If data is being generated online for when using TPU's.
"""
# NCF evaluation metric calculation logic assumes that evaluation data
# sample size are in multiples of (1 + number of negative samples in
# evaluation) for each device. As so, evaluation batch size must be a
# multiple of (number of replicas * (1 + number of negative samples)).
num_devices = strategy.num_replicas_in_sync if strategy else 1
if (params["eval_batch_size"] % (num_devices *
(1 + rconst.NUM_EVAL_NEGATIVES))):
raise ValueError("Evaluation batch size must be divisible by {} "
"times {}".format(num_devices,
(1 + rconst.NUM_EVAL_NEGATIVES)))
if params["train_dataset_path"]:
assert params["eval_dataset_path"]
train_dataset = create_dataset_from_tf_record_files(
params["train_dataset_path"],
input_meta_data["train_prebatch_size"],
params["batch_size"],
is_training=True,
rebatch=False)
# Re-batch evaluation dataset for TPU Pods.
# TODO (b/162341937) remove once it's fixed.
eval_rebatch = (params["use_tpu"] and strategy.num_replicas_in_sync > 8)
eval_dataset = create_dataset_from_tf_record_files(
params["eval_dataset_path"],
input_meta_data["eval_prebatch_size"],
params["eval_batch_size"],
is_training=False,
rebatch=eval_rebatch)
num_train_steps = int(input_meta_data["num_train_steps"])
num_eval_steps = int(input_meta_data["num_eval_steps"])
else:
if params["use_tpu"]:
raise ValueError("TPU training does not support data producer yet. "
"Use pre-processed data.")
assert producer
# Start retrieving data from producer.
train_dataset, eval_dataset = create_dataset_from_data_producer(
producer, params)
num_train_steps = producer.train_batches_per_epoch
num_eval_steps = producer.eval_batches_per_epoch
return train_dataset, eval_dataset, num_train_steps, num_eval_steps
| 6,964 | 36.648649 | 80 | py |
models | models-master/official/recommendation/ncf_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests NCF."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import tensorflow as tf
from tensorflow.python.eager import context # pylint: disable=ungrouped-imports
from official.recommendation import constants as rconst
from official.recommendation import ncf_common
from official.recommendation import ncf_keras_main
from official.utils.testing import integration
NUM_TRAIN_NEG = 4
class NcfTest(tf.test.TestCase):
@classmethod
def setUpClass(cls): # pylint: disable=invalid-name
super(NcfTest, cls).setUpClass()
ncf_common.define_ncf_flags()
def setUp(self):
super().setUp()
self.top_k_old = rconst.TOP_K
self.num_eval_negatives_old = rconst.NUM_EVAL_NEGATIVES
rconst.NUM_EVAL_NEGATIVES = 2
def tearDown(self):
super().tearDown()
rconst.NUM_EVAL_NEGATIVES = self.num_eval_negatives_old
rconst.TOP_K = self.top_k_old
_BASE_END_TO_END_FLAGS = ['-batch_size', '1044', '-train_epochs', '1']
@unittest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100)
def test_end_to_end_keras_no_dist_strat(self):
integration.run_synthetic(
ncf_keras_main.main,
tmp_root=self.get_temp_dir(),
extra_flags=self._BASE_END_TO_END_FLAGS +
['-distribution_strategy', 'off'])
@unittest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100)
def test_end_to_end_keras_dist_strat(self):
integration.run_synthetic(
ncf_keras_main.main,
tmp_root=self.get_temp_dir(),
extra_flags=self._BASE_END_TO_END_FLAGS + ['-num_gpus', '0'])
@unittest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100)
def test_end_to_end_keras_dist_strat_ctl(self):
flags = (
self._BASE_END_TO_END_FLAGS + ['-num_gpus', '0'] +
['-keras_use_ctl', 'True'])
integration.run_synthetic(
ncf_keras_main.main, tmp_root=self.get_temp_dir(), extra_flags=flags)
@unittest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100)
def test_end_to_end_keras_1_gpu_dist_strat_fp16(self):
if context.num_gpus() < 1:
self.skipTest(
'{} GPUs are not available for this test. {} GPUs are available'
.format(1, context.num_gpus()))
integration.run_synthetic(
ncf_keras_main.main,
tmp_root=self.get_temp_dir(),
extra_flags=self._BASE_END_TO_END_FLAGS +
['-num_gpus', '1', '--dtype', 'fp16'])
@unittest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100)
def test_end_to_end_keras_1_gpu_dist_strat_ctl_fp16(self):
if context.num_gpus() < 1:
self.skipTest(
'{} GPUs are not available for this test. {} GPUs are available'
.format(1, context.num_gpus()))
integration.run_synthetic(
ncf_keras_main.main,
tmp_root=self.get_temp_dir(),
extra_flags=self._BASE_END_TO_END_FLAGS +
['-num_gpus', '1', '--dtype', 'fp16', '--keras_use_ctl'])
@unittest.mock.patch.object(rconst, 'SYNTHETIC_BATCHES_PER_EPOCH', 100)
def test_end_to_end_keras_2_gpu_fp16(self):
if context.num_gpus() < 2:
self.skipTest(
'{} GPUs are not available for this test. {} GPUs are available'
.format(2, context.num_gpus()))
integration.run_synthetic(
ncf_keras_main.main,
tmp_root=self.get_temp_dir(),
extra_flags=self._BASE_END_TO_END_FLAGS +
['-num_gpus', '2', '--dtype', 'fp16'])
if __name__ == '__main__':
tf.test.main()
| 4,134 | 34.042373 | 80 | py |
models | models-master/official/recommendation/ranking/task_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for task."""
from absl.testing import parameterized
import tensorflow as tf
from official.core import exp_factory
from official.recommendation.ranking import task
from official.recommendation.ranking.data import data_pipeline
class TaskTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(('dlrm_criteo', True),
('dlrm_criteo', False),
('dcn_criteo', True),
('dcn_criteo', False))
def test_task(self, config_name, is_training):
params = exp_factory.get_exp_config(config_name)
params.task.train_data.global_batch_size = 16
params.task.validation_data.global_batch_size = 16
params.task.model.vocab_sizes = [40, 12, 11, 13, 2, 5]
params.task.model.embedding_dim = 8
params.task.model.bottom_mlp = [64, 32, 8]
params.task.use_synthetic_data = True
params.task.model.num_dense_features = 5
ranking_task = task.RankingTask(params.task,
params.trainer.optimizer_config)
if is_training:
dataset = data_pipeline.train_input_fn(params.task)
else:
dataset = data_pipeline.eval_input_fn(params.task)
iterator = iter(dataset(ctx=None))
model = ranking_task.build_model()
if is_training:
ranking_task.train_step(next(iterator), model, model.optimizer,
metrics=model.metrics)
else:
ranking_task.validation_step(next(iterator), model, metrics=model.metrics)
if __name__ == '__main__':
tf.test.main()
| 2,182 | 34.209677 | 80 | py |
models | models-master/official/recommendation/ranking/task.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Task for the Ranking model."""
import math
from typing import Dict, List, Optional, Union
import tensorflow as tf
import tensorflow_recommenders as tfrs
from official.core import base_task
from official.core import config_definitions
from official.recommendation.ranking import common
from official.recommendation.ranking.configs import config
from official.recommendation.ranking.data import data_pipeline
RuntimeConfig = config_definitions.RuntimeConfig
def _get_tpu_embedding_feature_config(
vocab_sizes: List[int],
embedding_dim: Union[int, List[int]],
table_name_prefix: str = 'embedding_table'
) -> Dict[str, tf.tpu.experimental.embedding.FeatureConfig]:
"""Returns TPU embedding feature config.
i'th table config will have vocab size of vocab_sizes[i] and embedding
dimension of embedding_dim if embedding_dim is an int or embedding_dim[i] if
embedding_dim is a list).
Args:
vocab_sizes: List of sizes of categories/id's in the table.
embedding_dim: An integer or a list of embedding table dimensions.
table_name_prefix: a prefix for embedding tables.
Returns:
A dictionary of feature_name, FeatureConfig pairs.
"""
if isinstance(embedding_dim, List):
if len(vocab_sizes) != len(embedding_dim):
raise ValueError(
f'length of vocab_sizes: {len(vocab_sizes)} is not equal to the '
f'length of embedding_dim: {len(embedding_dim)}')
elif isinstance(embedding_dim, int):
embedding_dim = [embedding_dim] * len(vocab_sizes)
else:
raise ValueError('embedding_dim is not either a list or an int, got '
f'{type(embedding_dim)}')
feature_config = {}
for i, vocab_size in enumerate(vocab_sizes):
table_config = tf.tpu.experimental.embedding.TableConfig(
vocabulary_size=vocab_size,
dim=embedding_dim[i],
combiner='mean',
initializer=tf.initializers.TruncatedNormal(
mean=0.0, stddev=1 / math.sqrt(embedding_dim[i])),
name=table_name_prefix + '_%s' % i)
feature_config[str(i)] = tf.tpu.experimental.embedding.FeatureConfig(
table=table_config)
return feature_config
class RankingTask(base_task.Task):
"""A task for Ranking Model."""
def __init__(self,
params: config.Task,
optimizer_config: config.OptimizationConfig,
logging_dir: Optional[str] = None,
steps_per_execution: int = 1,
name: Optional[str] = None):
"""Task initialization.
Args:
params: the RankingModel task configuration instance.
optimizer_config: Optimizer configuration instance.
logging_dir: a string pointing to where the model, summaries etc. will be
saved.
steps_per_execution: Int. Defaults to 1. The number of batches to run
during each `tf.function` call. It's used for compile/fit API.
name: the task name.
"""
super().__init__(params, logging_dir, name=name)
self._optimizer_config = optimizer_config
self._steps_per_execution = steps_per_execution
def build_inputs(self, params, input_context=None):
"""Builds classification input."""
dataset = data_pipeline.CriteoTsvReader(
file_pattern=params.input_path,
params=params,
vocab_sizes=self.task_config.model.vocab_sizes,
num_dense_features=self.task_config.model.num_dense_features,
use_synthetic_data=self.task_config.use_synthetic_data)
return dataset(input_context)
@classmethod
def create_optimizer(cls, optimizer_config: config.OptimizationConfig,
runtime_config: Optional[RuntimeConfig] = None) -> None:
"""See base class. Return None, optimizer is set in `build_model`."""
return None
def build_model(self) -> tf.keras.Model:
"""Creates Ranking model architecture and Optimizers.
The RankingModel uses different optimizers/learning rates for embedding
variables and dense variables.
Returns:
A Ranking model instance.
"""
lr_config = self.optimizer_config.lr_config
lr_callable = common.WarmUpAndPolyDecay(
batch_size=self.task_config.train_data.global_batch_size,
decay_exp=lr_config.decay_exp,
learning_rate=lr_config.learning_rate,
warmup_steps=lr_config.warmup_steps,
decay_steps=lr_config.decay_steps,
decay_start_steps=lr_config.decay_start_steps)
dense_optimizer = tf.keras.optimizers.legacy.Adam()
embedding_optimizer = tf.keras.optimizers.get(
self.optimizer_config.embedding_optimizer, use_legacy_optimizer=True)
embedding_optimizer.learning_rate = lr_callable
feature_config = _get_tpu_embedding_feature_config(
embedding_dim=self.task_config.model.embedding_dim,
vocab_sizes=self.task_config.model.vocab_sizes)
embedding_layer = tfrs.experimental.layers.embedding.PartialTPUEmbedding(
feature_config=feature_config,
optimizer=embedding_optimizer,
size_threshold=self.task_config.model.size_threshold)
if self.task_config.model.interaction == 'dot':
feature_interaction = tfrs.layers.feature_interaction.DotInteraction(
skip_gather=True)
elif self.task_config.model.interaction == 'cross':
feature_interaction = tf.keras.Sequential([
tf.keras.layers.Concatenate(),
tfrs.layers.feature_interaction.Cross()
])
else:
raise ValueError(
f'params.task.model.interaction {self.task_config.model.interaction} '
f'is not supported it must be either \'dot\' or \'cross\'.')
model = tfrs.experimental.models.Ranking(
embedding_layer=embedding_layer,
bottom_stack=tfrs.layers.blocks.MLP(
units=self.task_config.model.bottom_mlp, final_activation='relu'),
feature_interaction=feature_interaction,
top_stack=tfrs.layers.blocks.MLP(
units=self.task_config.model.top_mlp, final_activation='sigmoid'),
)
optimizer = tfrs.experimental.optimizers.CompositeOptimizer([
(embedding_optimizer, lambda: model.embedding_trainable_variables),
(dense_optimizer, lambda: model.dense_trainable_variables),
])
model.compile(optimizer, steps_per_execution=self._steps_per_execution)
return model
def train_step(
self,
inputs: Dict[str, tf.Tensor],
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[List[tf.keras.metrics.Metric]] = None) -> tf.Tensor:
"""See base class."""
# All metrics need to be passed through the RankingModel.
assert metrics == model.metrics
return model.train_step(inputs)
def validation_step(
self,
inputs: Dict[str, tf.Tensor],
model: tf.keras.Model,
metrics: Optional[List[tf.keras.metrics.Metric]] = None) -> tf.Tensor:
"""See base class."""
# All metrics need to be passed through the RankingModel.
assert metrics == model.metrics
return model.test_step(inputs)
@property
def optimizer_config(self) -> config.OptimizationConfig:
return self._optimizer_config
| 7,723 | 36.678049 | 80 | py |
models | models-master/official/recommendation/ranking/common.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags and common definitions for Ranking Models."""
from absl import flags
import tensorflow as tf
from official.common import flags as tfm_flags
FLAGS = flags.FLAGS
def define_flags() -> None:
"""Defines flags for training the Ranking model."""
tfm_flags.define_flags()
FLAGS.set_default(name='experiment', value='dlrm_criteo')
FLAGS.set_default(name='mode', value='train_and_eval')
flags.DEFINE_integer(
name='seed',
default=None,
help='This value will be used to seed both NumPy and TensorFlow.')
flags.DEFINE_string(
name='profile_steps',
default='20,40',
help='Save profiling data to model dir at given range of global steps. '
'The value must be a comma separated pair of positive integers, '
'specifying the first and last step to profile. For example, '
'"--profile_steps=2,4" triggers the profiler to process 3 steps, starting'
' from the 2nd step. Note that profiler has a non-trivial performance '
'overhead, and the output file can be gigantic if profiling many steps.')
@tf.keras.utils.register_keras_serializable(package='RANKING')
class WarmUpAndPolyDecay(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Learning rate callable for the embeddings.
Linear warmup on [0, warmup_steps] then
Constant on [warmup_steps, decay_start_steps]
And polynomial decay on [decay_start_steps, decay_start_steps + decay_steps].
"""
def __init__(self,
batch_size: int,
decay_exp: float = 2.0,
learning_rate: float = 40.0,
warmup_steps: int = 8000,
decay_steps: int = 12000,
decay_start_steps: int = 10000):
super(WarmUpAndPolyDecay, self).__init__()
self.batch_size = batch_size
self.decay_exp = decay_exp
self.learning_rate = learning_rate
self.warmup_steps = warmup_steps
self.decay_steps = decay_steps
self.decay_start_steps = decay_start_steps
def __call__(self, step):
decay_exp = self.decay_exp
learning_rate = self.learning_rate
warmup_steps = self.warmup_steps
decay_steps = self.decay_steps
decay_start_steps = self.decay_start_steps
scal = self.batch_size / 2048
adj_lr = learning_rate * scal
if warmup_steps == 0:
return adj_lr
warmup_lr = step / warmup_steps * adj_lr
global_step = tf.cast(step, tf.float32)
decay_steps = tf.cast(decay_steps, tf.float32)
decay_start_step = tf.cast(decay_start_steps, tf.float32)
warmup_lr = tf.cast(warmup_lr, tf.float32)
steps_since_decay_start = global_step - decay_start_step
already_decayed_steps = tf.minimum(steps_since_decay_start, decay_steps)
decay_lr = adj_lr * (
(decay_steps - already_decayed_steps) / decay_steps)**decay_exp
decay_lr = tf.maximum(0.0001, decay_lr)
lr = tf.where(
global_step < warmup_steps, warmup_lr,
tf.where(
tf.logical_and(decay_steps > 0, global_step > decay_start_step),
decay_lr, adj_lr))
lr = tf.maximum(0.01, lr)
return lr
def get_config(self):
return {
'batch_size': self.batch_size,
'decay_exp': self.decay_exp,
'learning_rate': self.learning_rate,
'warmup_steps': self.warmup_steps,
'decay_steps': self.decay_steps,
'decay_start_steps': self.decay_start_steps
}
| 3,988 | 33.991228 | 80 | py |
models | models-master/official/recommendation/ranking/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/recommendation/ranking/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train and evaluate the Ranking model."""
from typing import Dict
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from official.common import distribute_utils
from official.core import base_trainer
from official.core import train_lib
from official.core import train_utils
from official.recommendation.ranking import common
from official.recommendation.ranking.task import RankingTask
from official.utils.misc import keras_utils
FLAGS = flags.FLAGS
class RankingTrainer(base_trainer.Trainer):
"""A trainer for Ranking Model.
The RankingModel has two optimizers for embedding and non embedding weights.
Overriding `train_loop_end` method to log learning rates for each optimizer.
"""
def train_loop_end(self) -> Dict[str, float]:
"""See base class."""
self.join()
logs = {}
for metric in self.train_metrics + [self.train_loss]:
logs[metric.name] = metric.result()
metric.reset_states()
for i, optimizer in enumerate(self.optimizer.optimizers):
lr_key = f'{type(optimizer).__name__}_{i}_learning_rate'
if callable(optimizer.learning_rate):
logs[lr_key] = optimizer.learning_rate(self.global_step)
else:
logs[lr_key] = optimizer.learning_rate
return logs
def main(_) -> None:
"""Train and evaluate the Ranking model."""
params = train_utils.parse_configuration(FLAGS)
mode = FLAGS.mode
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
if FLAGS.seed is not None:
logging.info('Setting tf seed.')
tf.random.set_seed(FLAGS.seed)
task = RankingTask(
params=params.task,
optimizer_config=params.trainer.optimizer_config,
logging_dir=model_dir,
steps_per_execution=params.trainer.steps_per_loop,
name='RankingTask')
enable_tensorboard = params.trainer.callbacks.enable_tensorboard
strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu)
with strategy.scope():
model = task.build_model()
def get_dataset_fn(params):
return lambda input_context: task.build_inputs(params, input_context)
train_dataset = None
if 'train' in mode:
train_dataset = strategy.distribute_datasets_from_function(
get_dataset_fn(params.task.train_data),
options=tf.distribute.InputOptions(experimental_fetch_to_device=False))
validation_dataset = None
if 'eval' in mode:
validation_dataset = strategy.distribute_datasets_from_function(
get_dataset_fn(params.task.validation_data),
options=tf.distribute.InputOptions(experimental_fetch_to_device=False))
if params.trainer.use_orbit:
with strategy.scope():
checkpoint_exporter = train_utils.maybe_create_best_ckpt_exporter(
params, model_dir)
trainer = RankingTrainer(
config=params,
task=task,
model=model,
optimizer=model.optimizer,
train='train' in mode,
evaluate='eval' in mode,
train_dataset=train_dataset,
validation_dataset=validation_dataset,
checkpoint_exporter=checkpoint_exporter)
train_lib.run_experiment(
distribution_strategy=strategy,
task=task,
mode=mode,
params=params,
model_dir=model_dir,
trainer=trainer)
else: # Compile/fit
checkpoint = tf.train.Checkpoint(model=model, optimizer=model.optimizer)
latest_checkpoint = tf.train.latest_checkpoint(model_dir)
if latest_checkpoint:
checkpoint.restore(latest_checkpoint)
logging.info('Loaded checkpoint %s', latest_checkpoint)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
directory=model_dir,
max_to_keep=params.trainer.max_to_keep,
step_counter=model.optimizer.iterations,
checkpoint_interval=params.trainer.checkpoint_interval)
checkpoint_callback = keras_utils.SimpleCheckpoint(checkpoint_manager)
time_callback = keras_utils.TimeHistory(
params.task.train_data.global_batch_size,
params.trainer.time_history.log_steps,
logdir=model_dir if enable_tensorboard else None)
callbacks = [checkpoint_callback, time_callback]
if enable_tensorboard:
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=model_dir,
update_freq=min(1000, params.trainer.validation_interval),
profile_batch=FLAGS.profile_steps)
callbacks.append(tensorboard_callback)
num_epochs = (params.trainer.train_steps //
params.trainer.validation_interval)
current_step = model.optimizer.iterations.numpy()
initial_epoch = current_step // params.trainer.validation_interval
eval_steps = params.trainer.validation_steps if 'eval' in mode else None
if mode in ['train', 'train_and_eval']:
logging.info('Training started')
history = model.fit(
train_dataset,
initial_epoch=initial_epoch,
epochs=num_epochs,
steps_per_epoch=params.trainer.validation_interval,
validation_data=validation_dataset,
validation_steps=eval_steps,
callbacks=callbacks,
)
model.summary()
logging.info('Train history: %s', history.history)
elif mode == 'eval':
logging.info('Evaluation started')
validation_output = model.evaluate(validation_dataset, steps=eval_steps)
logging.info('Evaluation output: %s', validation_output)
else:
raise NotImplementedError('The mode is not implemented: %s' % mode)
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
common.define_flags()
app.run(main)
| 6,613 | 33.628272 | 79 | py |
models | models-master/official/recommendation/ranking/train_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for ranking model and associated functionality."""
import json
import os
from absl import flags
from absl.testing import parameterized
import tensorflow as tf
from official.recommendation.ranking import common
from official.recommendation.ranking import train
FLAGS = flags.FLAGS
def _get_params_override(vocab_sizes,
interaction='dot',
use_orbit=True,
strategy='mirrored'):
# Update `data_dir` if `synthetic_data=False`.
data_dir = ''
return json.dumps({
'runtime': {
'distribution_strategy': strategy,
},
'task': {
'model': {
'vocab_sizes': vocab_sizes,
'embedding_dim': [8] * len(vocab_sizes),
'bottom_mlp': [64, 32, 8],
'interaction': interaction,
},
'train_data': {
'input_path': os.path.join(data_dir, 'train/*'),
'global_batch_size': 16,
},
'validation_data': {
'input_path': os.path.join(data_dir, 'eval/*'),
'global_batch_size': 16,
},
'use_synthetic_data': True,
},
'trainer': {
'use_orbit': use_orbit,
'validation_interval': 20,
'validation_steps': 20,
'train_steps': 40,
},
})
class TrainTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super().setUp()
self._temp_dir = self.get_temp_dir()
self._model_dir = os.path.join(self._temp_dir, 'model_dir')
tf.io.gfile.makedirs(self._model_dir)
FLAGS.model_dir = self._model_dir
FLAGS.tpu = ''
def tearDown(self):
tf.io.gfile.rmtree(self._model_dir)
super().tearDown()
@parameterized.named_parameters(
('DlrmOneDeviceCTL', 'one_device', 'dot', True),
('DlrmOneDevice', 'one_device', 'dot', False),
('DcnOneDeviceCTL', 'one_device', 'cross', True),
('DcnOneDevice', 'one_device', 'cross', False),
('DlrmTPUCTL', 'tpu', 'dot', True),
('DlrmTPU', 'tpu', 'dot', False),
('DcnTPUCTL', 'tpu', 'cross', True),
('DcnTPU', 'tpu', 'cross', False),
('DlrmMirroredCTL', 'Mirrored', 'dot', True),
('DlrmMirrored', 'Mirrored', 'dot', False),
('DcnMirroredCTL', 'Mirrored', 'cross', True),
('DcnMirrored', 'Mirrored', 'cross', False),
)
def testTrainEval(self, strategy, interaction, use_orbit=True):
# Set up simple trainer with synthetic data.
# By default the mode must be `train_and_eval`.
self.assertEqual(FLAGS.mode, 'train_and_eval')
vocab_sizes = [40, 12, 11, 13]
FLAGS.params_override = _get_params_override(vocab_sizes=vocab_sizes,
interaction=interaction,
use_orbit=use_orbit,
strategy=strategy)
train.main('unused_args')
self.assertNotEmpty(
tf.io.gfile.glob(os.path.join(self._model_dir, 'params.yaml')))
@parameterized.named_parameters(
('DlrmTPUCTL', 'tpu', 'dot', True),
('DlrmTPU', 'tpu', 'dot', False),
('DcnTPUCTL', 'tpu', 'cross', True),
('DcnTPU', 'tpu', 'cross', False),
('DlrmMirroredCTL', 'Mirrored', 'dot', True),
('DlrmMirrored', 'Mirrored', 'dot', False),
('DcnMirroredCTL', 'Mirrored', 'cross', True),
('DcnMirrored', 'Mirrored', 'cross', False),
)
def testTrainThenEval(self, strategy, interaction, use_orbit=True):
# Set up simple trainer with synthetic data.
vocab_sizes = [40, 12, 11, 13]
FLAGS.params_override = _get_params_override(vocab_sizes=vocab_sizes,
interaction=interaction,
use_orbit=use_orbit,
strategy=strategy)
default_mode = FLAGS.mode
# Training.
FLAGS.mode = 'train'
train.main('unused_args')
self.assertNotEmpty(
tf.io.gfile.glob(os.path.join(self._model_dir, 'params.yaml')))
# Evaluation.
FLAGS.mode = 'eval'
train.main('unused_args')
FLAGS.mode = default_mode
if __name__ == '__main__':
common.define_flags()
tf.test.main()
| 4,884 | 32.689655 | 74 | py |
models | models-master/official/recommendation/ranking/configs/config_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for DLRM config."""
from absl.testing import parameterized
import tensorflow as tf
from official.recommendation.ranking.configs import config
class ConfigTest(tf.test.TestCase, parameterized.TestCase):
def test_configs(self):
criteo_config = config.default_config()
self.assertIsInstance(criteo_config, config.Config)
self.assertIsInstance(criteo_config.task, config.Task)
self.assertIsInstance(criteo_config.task.model, config.ModelConfig)
self.assertIsInstance(criteo_config.task.train_data,
config.DataConfig)
self.assertIsInstance(criteo_config.task.validation_data,
config.DataConfig)
criteo_config.task.train_data.is_training = None
with self.assertRaises(KeyError):
criteo_config.validate()
if __name__ == '__main__':
tf.test.main()
| 1,464 | 34.731707 | 74 | py |
models | models-master/official/recommendation/ranking/configs/config.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ranking Model configuration definition."""
import dataclasses
from typing import List, Optional, Union
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
@dataclasses.dataclass
class CallbacksConfig(hyperparams.Config):
"""Configuration for Callbacks.
Attributes:
enable_checkpoint_and_export: Whether or not to enable checkpoints as a
Callback. Defaults to True.
enable_backup_and_restore: Whether or not to add BackupAndRestore
callback. Defaults to True.
enable_tensorboard: Whether or not to enable TensorBoard as a Callback.
Defaults to True.
enable_time_history: Whether or not to enable TimeHistory Callbacks.
Defaults to True.
"""
enable_checkpoint_and_export: bool = True
enable_backup_and_restore: bool = False
enable_tensorboard: bool = True
enable_time_history: bool = True
@dataclasses.dataclass
class LearningRateConfig(hyperparams.Config):
"""Learning rate scheduler config."""
learning_rate: float = 1.25
warmup_steps: int = 8000
decay_steps: int = 30000
decay_start_steps: int = 70000
decay_exp: float = 2
@dataclasses.dataclass
class OptimizationConfig(hyperparams.Config):
"""Embedding Optimizer config."""
lr_config: LearningRateConfig = dataclasses.field(
default_factory=LearningRateConfig
)
embedding_optimizer: str = 'SGD'
@dataclasses.dataclass
class DataConfig(hyperparams.Config):
"""Dataset config for training and evaluation."""
input_path: str = ''
global_batch_size: int = 0
is_training: bool = True
dtype: str = 'float32'
shuffle_buffer_size: int = 10000
cycle_length: int = 10
sharding: bool = True
num_shards_per_host: int = 8
@dataclasses.dataclass
class ModelConfig(hyperparams.Config):
"""Configuration for training.
Attributes:
num_dense_features: Number of dense features.
vocab_sizes: Vocab sizes for each of the sparse features. The order agrees
with the order of the input data.
embedding_dim: An integer or a list of embedding table dimensions.
If it's an integer then all tables will have the same embedding dimension.
If it's a list then the length should match with `vocab_sizes`.
size_threshold: A threshold for table sizes below which a keras
embedding layer is used, and above which a TPU embedding layer is used.
If it's -1 then only keras embedding layer will be used for all tables,
if 0 only then only TPU embedding layer will be used.
bottom_mlp: The sizes of hidden layers for bottom MLP applied to dense
features.
top_mlp: The sizes of hidden layers for top MLP.
interaction: Interaction can be on of the following:
'dot', 'cross'.
"""
num_dense_features: int = 13
vocab_sizes: List[int] = dataclasses.field(default_factory=list)
embedding_dim: Union[int, List[int]] = 8
size_threshold: int = 50_000
bottom_mlp: List[int] = dataclasses.field(default_factory=list)
top_mlp: List[int] = dataclasses.field(default_factory=list)
interaction: str = 'dot'
@dataclasses.dataclass
class Loss(hyperparams.Config):
"""Configuration for Loss.
Attributes:
label_smoothing: Whether or not to apply label smoothing to the
Binary Crossentropy loss.
"""
label_smoothing: float = 0.0
@dataclasses.dataclass
class Task(hyperparams.Config):
"""The model config."""
init_checkpoint: str = ''
model: ModelConfig = dataclasses.field(default_factory=ModelConfig)
train_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=True)
)
validation_data: DataConfig = dataclasses.field(
default_factory=lambda: DataConfig(is_training=False)
)
loss: Loss = dataclasses.field(default_factory=Loss)
use_synthetic_data: bool = False
@dataclasses.dataclass
class TimeHistoryConfig(hyperparams.Config):
"""Configuration for the TimeHistory callback.
Attributes:
log_steps: Interval of steps between logging of batch level stats.
"""
log_steps: Optional[int] = None
@dataclasses.dataclass
class TrainerConfig(cfg.TrainerConfig):
"""Configuration for training.
Attributes:
train_steps: The number of steps used to train.
validation_steps: The number of steps used to eval.
validation_interval: The Number of training steps to run between
evaluations.
callbacks: An instance of CallbacksConfig.
use_orbit: Whether to use orbit library with custom training loop or
compile/fit API.
enable_metrics_in_training: Whether to enable metrics during training.
time_history: Config of TimeHistory callback.
optimizer_config: An `OptimizerConfig` instance for embedding optimizer.
Defaults to None.
"""
train_steps: int = 0
# Sets validation steps to be -1 to evaluate the entire dataset.
validation_steps: int = -1
validation_interval: int = 70000
callbacks: CallbacksConfig = dataclasses.field(
default_factory=CallbacksConfig
)
use_orbit: bool = False
enable_metrics_in_training: bool = True
time_history: TimeHistoryConfig = dataclasses.field(
default_factory=lambda: TimeHistoryConfig(log_steps=5000)
)
optimizer_config: OptimizationConfig = dataclasses.field(
default_factory=OptimizationConfig
)
NUM_TRAIN_EXAMPLES = 4195197692
NUM_EVAL_EXAMPLES = 89137318
train_batch_size = 16384
eval_batch_size = 16384
steps_per_epoch = NUM_TRAIN_EXAMPLES // train_batch_size
vocab_sizes = [
39884406, 39043, 17289, 7420, 20263, 3, 7120, 1543, 63, 38532951,
2953546, 403346, 10, 2208, 11938, 155, 4, 976, 14, 39979771, 25641295,
39664984, 585935, 12972, 108, 36
]
@dataclasses.dataclass
class Config(hyperparams.Config):
"""Configuration to train the RankingModel.
By default it configures DLRM model on criteo dataset.
Attributes:
runtime: A `RuntimeConfig` instance.
task: `Task` instance.
trainer: A `TrainerConfig` instance.
"""
runtime: cfg.RuntimeConfig = dataclasses.field(
default_factory=cfg.RuntimeConfig
)
task: Task = dataclasses.field(
default_factory=lambda: Task( # pylint: disable=g-long-lambda
model=ModelConfig(
embedding_dim=8,
vocab_sizes=vocab_sizes,
bottom_mlp=[64, 32, 8],
top_mlp=[64, 32, 1],
),
loss=Loss(label_smoothing=0.0),
train_data=DataConfig(
is_training=True, global_batch_size=train_batch_size
),
validation_data=DataConfig(
is_training=False, global_batch_size=eval_batch_size
),
)
)
trainer: TrainerConfig = dataclasses.field(
default_factory=lambda: TrainerConfig( # pylint: disable=g-long-lambda
train_steps=2 * steps_per_epoch,
validation_interval=steps_per_epoch,
validation_steps=NUM_EVAL_EXAMPLES // eval_batch_size,
enable_metrics_in_training=True,
optimizer_config=OptimizationConfig(),
)
)
restrictions: dataclasses.InitVar[Optional[List[str]]] = None
def default_config() -> Config:
return Config(
runtime=cfg.RuntimeConfig(),
task=Task(
model=ModelConfig(
embedding_dim=8,
vocab_sizes=vocab_sizes,
bottom_mlp=[64, 32, 4],
top_mlp=[64, 32, 1]),
loss=Loss(label_smoothing=0.0),
train_data=DataConfig(
global_batch_size=train_batch_size,
is_training=True,
sharding=True),
validation_data=DataConfig(
global_batch_size=eval_batch_size,
is_training=False,
sharding=False)),
trainer=TrainerConfig(
train_steps=2 * steps_per_epoch,
validation_interval=steps_per_epoch,
validation_steps=NUM_EVAL_EXAMPLES // eval_batch_size,
enable_metrics_in_training=True,
optimizer_config=OptimizationConfig()),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
])
@exp_factory.register_config_factory('dlrm_criteo')
def dlrm_criteo_tb_config() -> Config:
return Config(
runtime=cfg.RuntimeConfig(),
task=Task(
model=ModelConfig(
num_dense_features=13,
vocab_sizes=vocab_sizes,
bottom_mlp=[512, 256, 64],
embedding_dim=64,
top_mlp=[1024, 1024, 512, 256, 1],
interaction='dot'),
loss=Loss(label_smoothing=0.0),
train_data=DataConfig(
global_batch_size=train_batch_size,
is_training=True,
sharding=True),
validation_data=DataConfig(
global_batch_size=eval_batch_size,
is_training=False,
sharding=False)),
trainer=TrainerConfig(
train_steps=steps_per_epoch,
validation_interval=steps_per_epoch // 2,
validation_steps=NUM_EVAL_EXAMPLES // eval_batch_size,
enable_metrics_in_training=True,
optimizer_config=OptimizationConfig()),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
])
@exp_factory.register_config_factory('dcn_criteo')
def dcn_criteo_tb_config() -> Config:
return Config(
runtime=cfg.RuntimeConfig(),
task=Task(
model=ModelConfig(
num_dense_features=13,
vocab_sizes=vocab_sizes,
bottom_mlp=[512, 256, 64],
embedding_dim=64,
top_mlp=[1024, 1024, 512, 256, 1],
interaction='cross'),
loss=Loss(label_smoothing=0.0),
train_data=DataConfig(
global_batch_size=train_batch_size,
is_training=True,
sharding=True),
validation_data=DataConfig(
global_batch_size=eval_batch_size,
is_training=False,
sharding=False)),
trainer=TrainerConfig(
train_steps=steps_per_epoch,
validation_interval=steps_per_epoch // 2,
validation_steps=NUM_EVAL_EXAMPLES // eval_batch_size,
enable_metrics_in_training=True,
optimizer_config=OptimizationConfig()),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
])
| 11,077 | 33.086154 | 80 | py |
models | models-master/official/recommendation/ranking/configs/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/recommendation/ranking/data/data_pipeline.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data pipeline for the Ranking model.
This module defines various input datasets for the Ranking model.
"""
from typing import List
import tensorflow as tf
from official.recommendation.ranking.configs import config
class CriteoTsvReader:
"""Input reader callable for pre-processed Criteo data.
Raw Criteo data is assumed to be preprocessed in the following way:
1. Missing values are replaced with zeros.
2. Negative values are replaced with zeros.
3. Integer features are transformed by log(x+1) and are hence tf.float32.
4. Categorical data is bucketized and are hence tf.int32.
"""
def __init__(self,
file_pattern: str,
params: config.DataConfig,
num_dense_features: int,
vocab_sizes: List[int],
use_synthetic_data: bool = False):
self._file_pattern = file_pattern
self._params = params
self._num_dense_features = num_dense_features
self._vocab_sizes = vocab_sizes
self._use_synthetic_data = use_synthetic_data
def __call__(self, ctx: tf.distribute.InputContext) -> tf.data.Dataset:
params = self._params
# Per replica batch size.
batch_size = ctx.get_per_replica_batch_size(
params.global_batch_size) if ctx else params.global_batch_size
if self._use_synthetic_data:
return self._generate_synthetic_data(ctx, batch_size)
@tf.function
def _parse_fn(example: tf.Tensor):
"""Parser function for pre-processed Criteo TSV records."""
label_defaults = [[0.0]]
dense_defaults = [
[0.0] for _ in range(self._num_dense_features)
]
num_sparse_features = len(self._vocab_sizes)
categorical_defaults = [
[0] for _ in range(num_sparse_features)
]
record_defaults = label_defaults + dense_defaults + categorical_defaults
fields = tf.io.decode_csv(
example, record_defaults, field_delim='\t', na_value='-1')
num_labels = 1
label = tf.reshape(fields[0], [batch_size, 1])
features = {}
num_dense = len(dense_defaults)
dense_features = []
offset = num_labels
for idx in range(num_dense):
dense_features.append(fields[idx + offset])
features['dense_features'] = tf.stack(dense_features, axis=1)
offset += num_dense
features['sparse_features'] = {}
for idx in range(num_sparse_features):
features['sparse_features'][str(idx)] = fields[idx + offset]
return features, label
filenames = tf.data.Dataset.list_files(self._file_pattern, shuffle=False)
# Shard the full dataset according to host number.
# Each host will get 1 / num_of_hosts portion of the data.
if params.sharding and ctx and ctx.num_input_pipelines > 1:
filenames = filenames.shard(ctx.num_input_pipelines,
ctx.input_pipeline_id)
num_shards_per_host = 1
if params.sharding:
num_shards_per_host = params.num_shards_per_host
def make_dataset(shard_index):
filenames_for_shard = filenames.shard(num_shards_per_host, shard_index)
dataset = tf.data.TextLineDataset(filenames_for_shard)
if params.is_training:
dataset = dataset.repeat()
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.map(_parse_fn,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
indices = tf.data.Dataset.range(num_shards_per_host)
dataset = indices.interleave(
map_func=make_dataset,
cycle_length=params.cycle_length,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def _generate_synthetic_data(self, ctx: tf.distribute.InputContext,
batch_size: int) -> tf.data.Dataset:
"""Creates synthetic data based on the parameter batch size.
Args:
ctx: Input Context
batch_size: per replica batch size.
Returns:
The synthetic dataset.
"""
params = self._params
num_dense = self._num_dense_features
num_replicas = ctx.num_replicas_in_sync if ctx else 1
if params.is_training:
dataset_size = 1000 * batch_size * num_replicas
else:
dataset_size = 1000 * batch_size * num_replicas
dense_tensor = tf.random.uniform(
shape=(dataset_size, num_dense), maxval=1.0, dtype=tf.float32)
sparse_tensors = []
for size in self._vocab_sizes:
sparse_tensors.append(
tf.random.uniform(
shape=(dataset_size,), maxval=int(size), dtype=tf.int32))
sparse_tensor_elements = {
str(i): sparse_tensors[i] for i in range(len(sparse_tensors))
}
# the mean is in [0, 1] interval.
dense_tensor_mean = tf.math.reduce_mean(dense_tensor, axis=1)
sparse_tensors = tf.stack(sparse_tensors, axis=-1)
sparse_tensors_mean = tf.math.reduce_sum(sparse_tensors, axis=1)
# the mean is in [0, 1] interval.
sparse_tensors_mean = tf.cast(sparse_tensors_mean, dtype=tf.float32)
sparse_tensors_mean /= sum(self._vocab_sizes)
# the label is in [0, 1] interval.
label_tensor = (dense_tensor_mean + sparse_tensors_mean) / 2.0
# Using the threshold 0.5 to convert to 0/1 labels.
label_tensor = tf.cast(label_tensor + 0.5, tf.int32)
input_elem = {'dense_features': dense_tensor,
'sparse_features': sparse_tensor_elements}, label_tensor
dataset = tf.data.Dataset.from_tensor_slices(input_elem)
dataset = dataset.cache()
if params.is_training:
dataset = dataset.repeat()
return dataset.batch(batch_size, drop_remainder=True)
def train_input_fn(params: config.Task) -> CriteoTsvReader:
"""Returns callable object of batched training examples.
Args:
params: hyperparams to create input pipelines.
Returns:
CriteoTsvReader callable for training dataset.
"""
return CriteoTsvReader(
file_pattern=params.train_data.input_path,
params=params.train_data,
vocab_sizes=params.model.vocab_sizes,
num_dense_features=params.model.num_dense_features,
use_synthetic_data=params.use_synthetic_data)
def eval_input_fn(params: config.Task) -> CriteoTsvReader:
"""Returns callable object of batched eval examples.
Args:
params: hyperparams to create input pipelines.
Returns:
CriteoTsvReader callable for eval dataset.
"""
return CriteoTsvReader(
file_pattern=params.validation_data.input_path,
params=params.validation_data,
vocab_sizes=params.model.vocab_sizes,
num_dense_features=params.model.num_dense_features,
use_synthetic_data=params.use_synthetic_data)
| 7,319 | 33.528302 | 78 | py |
models | models-master/official/recommendation/ranking/data/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/recommendation/ranking/data/data_pipeline_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for data_pipeline."""
from absl.testing import parameterized
import tensorflow as tf
from official.recommendation.ranking.configs import config
from official.recommendation.ranking.data import data_pipeline
class DataPipelineTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters(('Train', True),
('Eval', False))
def testSyntheticDataPipeline(self, is_training):
task = config.Task(
model=config.ModelConfig(
embedding_dim=4,
num_dense_features=8,
vocab_sizes=[40, 12, 11, 13, 2, 5],
bottom_mlp=[64, 32, 4],
top_mlp=[64, 32, 1]),
train_data=config.DataConfig(global_batch_size=16),
validation_data=config.DataConfig(global_batch_size=16),
use_synthetic_data=True)
num_dense_features = task.model.num_dense_features
num_sparse_features = len(task.model.vocab_sizes)
batch_size = task.train_data.global_batch_size
if is_training:
dataset = data_pipeline.train_input_fn(task)
else:
dataset = data_pipeline.eval_input_fn(task)
dataset_iter = iter(dataset(ctx=None))
# Consume full batches and validate shapes.
for _ in range(10):
features, label = next(dataset_iter)
dense_features = features['dense_features']
sparse_features = features['sparse_features']
self.assertEqual(dense_features.shape, [batch_size, num_dense_features])
self.assertLen(sparse_features, num_sparse_features)
for _, val in sparse_features.items():
self.assertEqual(val.shape, [batch_size])
self.assertEqual(label.shape, [batch_size])
if __name__ == '__main__':
tf.test.main()
| 2,335 | 35.5 | 78 | py |
models | models-master/official/recommendation/ranking/preprocessing/shard_rebalancer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rebalance a set of CSV/TFRecord shards to a target number of files.
"""
import argparse
import datetime
import os
import apache_beam as beam
import tensorflow as tf
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_path",
default=None,
required=True,
help="Input path.")
parser.add_argument(
"--output_path",
default=None,
required=True,
help="Output path.")
parser.add_argument(
"--num_output_files",
type=int,
default=256,
help="Number of output file shards.")
parser.add_argument(
"--filetype",
default="tfrecord",
help="File type, needs to be one of {tfrecord, csv}.")
parser.add_argument(
"--project",
default=None,
help="ID (not name) of your project. Ignored by DirectRunner")
parser.add_argument(
"--runner",
help="Runner for Apache Beam, needs to be one of "
"{DirectRunner, DataflowRunner}.",
default="DirectRunner")
parser.add_argument(
"--region",
default=None,
help="region")
args = parser.parse_args()
def rebalance_data_shards():
"""Rebalances data shards."""
def csv_pipeline(pipeline: beam.Pipeline):
"""Rebalances CSV dataset.
Args:
pipeline: Beam pipeline object.
"""
_ = (
pipeline
| beam.io.ReadFromText(args.input_path)
| beam.io.WriteToText(args.output_path,
num_shards=args.num_output_files))
def tfrecord_pipeline(pipeline: beam.Pipeline):
"""Rebalances TFRecords dataset.
Args:
pipeline: Beam pipeline object.
"""
example_coder = beam.coders.ProtoCoder(tf.train.Example)
_ = (
pipeline
| beam.io.ReadFromTFRecord(args.input_path, coder=example_coder)
| beam.io.WriteToTFRecord(args.output_path, file_name_suffix="tfrecord",
coder=example_coder,
num_shards=args.num_output_files))
job_name = (
f"shard-rebalancer-{datetime.datetime.now().strftime('%y%m%d-%H%M%S')}")
# set up Beam pipeline.
options = {
"staging_location": os.path.join(args.output_path, "tmp", "staging"),
"temp_location": os.path.join(args.output_path, "tmp"),
"job_name": job_name,
"project": args.project,
"save_main_session": True,
"region": args.region,
}
opts = beam.pipeline.PipelineOptions(flags=[], **options)
with beam.Pipeline(args.runner, options=opts) as pipeline:
if args.filetype == "tfrecord":
tfrecord_pipeline(pipeline)
elif args.filetype == "csv":
csv_pipeline(pipeline)
if __name__ == "__main__":
rebalance_data_shards()
| 3,253 | 27.051724 | 80 | py |
models | models-master/official/recommendation/ranking/preprocessing/criteo_preprocess.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX beam preprocessing pipeline for Criteo data.
Preprocessing util for criteo data. Transformations:
1. Fill missing features with zeros.
2. Set negative integer features to zeros.
3. Normalize integer features using log(x+1).
4. For categorical features (hex), convert to integer and take value modulus the
max_vocab_size value.
Usage:
For raw Criteo data, this script should be run twice.
First run should set vocab_gen_mode to true. This run is used to generate
vocabulary files in the temp_dir location.
Second run should set vocab_gen_mode to false. It is necessary to point to the
same temp_dir used during the first run.
"""
import argparse
import datetime
import os
from absl import logging
import apache_beam as beam
import numpy as np
import tensorflow as tf
import tensorflow_transform as tft
import tensorflow_transform.beam as tft_beam
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import schema_utils
from tfx_bsl.public import tfxio
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_path",
default=None,
required=True,
help="Input path. Be sure to set this to cover all data, to ensure "
"that sparse vocabs are complete.")
parser.add_argument(
"--output_path",
default=None,
required=True,
help="Output path.")
parser.add_argument(
"--temp_dir",
default=None,
required=True,
help="Directory to store temporary metadata. Important because vocab "
"dictionaries will be stored here. Co-located with data, ideally.")
parser.add_argument(
"--csv_delimeter",
default="\t",
help="Delimeter string for input and output.")
parser.add_argument(
"--vocab_gen_mode",
action="store_true",
default=False,
help="If it is set, process full dataset and do not write CSV output. In "
"this mode, See temp_dir for vocab files. input_path should cover all "
"data, e.g. train, test, eval.")
parser.add_argument(
"--runner",
help="Runner for Apache Beam, needs to be one of {DirectRunner, "
"DataflowRunner}.",
default="DirectRunner")
parser.add_argument(
"--project",
default=None,
help="ID of your project. Ignored by DirectRunner.")
parser.add_argument(
"--region",
default=None,
help="Region. Ignored by DirectRunner.")
parser.add_argument(
"--max_vocab_size",
type=int,
default=10_000_000,
help="Max index range, categorical features convert to integer and take "
"value modulus the max_vocab_size")
args = parser.parse_args()
NUM_NUMERIC_FEATURES = 13
NUMERIC_FEATURE_KEYS = [
f"int-feature-{x + 1}" for x in range(NUM_NUMERIC_FEATURES)]
CATEGORICAL_FEATURE_KEYS = [
"categorical-feature-%d" % x for x in range(NUM_NUMERIC_FEATURES + 1, 40)]
LABEL_KEY = "clicked"
# Data is first preprocessed in pure Apache Beam using numpy.
# This removes missing values and hexadecimal-encoded values.
# For the TF schema, we can thus specify the schema as FixedLenFeature
# for TensorFlow Transform.
FEATURE_SPEC = dict([(name, tf.io.FixedLenFeature([], dtype=tf.int64))
for name in CATEGORICAL_FEATURE_KEYS] +
[(name, tf.io.FixedLenFeature([], dtype=tf.float32))
for name in NUMERIC_FEATURE_KEYS] +
[(LABEL_KEY, tf.io.FixedLenFeature([], tf.float32))])
INPUT_METADATA = dataset_metadata.DatasetMetadata(
schema_utils.schema_from_feature_spec(FEATURE_SPEC))
def apply_vocab_fn(inputs):
"""Preprocessing fn for sparse features.
Applies vocab to bucketize sparse features. This function operates using
previously-created vocab files.
Pre-condition: Full vocab has been materialized.
Args:
inputs: Input features to transform.
Returns:
Output dict with transformed features.
"""
outputs = {}
outputs[LABEL_KEY] = inputs[LABEL_KEY]
for key in NUMERIC_FEATURE_KEYS:
outputs[key] = inputs[key]
for idx, key in enumerate(CATEGORICAL_FEATURE_KEYS):
vocab_fn = os.path.join(
args.temp_dir, "tftransform_tmp", "feature_{}_vocab".format(idx))
outputs[key] = tft.apply_vocabulary(inputs[key], vocab_fn)
return outputs
def compute_vocab_fn(inputs):
"""Preprocessing fn for sparse features.
This function computes unique IDs for the sparse features. We rely on implicit
behavior which writes the vocab files to the vocab_filename specified in
tft.compute_and_apply_vocabulary.
Pre-condition: Sparse features have been converted to integer and mod'ed with
args.max_vocab_size.
Args:
inputs: Input features to transform.
Returns:
Output dict with transformed features.
"""
outputs = {}
outputs[LABEL_KEY] = inputs[LABEL_KEY]
for key in NUMERIC_FEATURE_KEYS:
outputs[key] = inputs[key]
for idx, key in enumerate(CATEGORICAL_FEATURE_KEYS):
outputs[key] = tft.compute_and_apply_vocabulary(
x=inputs[key],
vocab_filename="feature_{}_vocab".format(idx))
return outputs
class FillMissing(beam.DoFn):
"""Fills missing elements with zero string value."""
def process(self, element):
elem_list = element.split(args.csv_delimeter)
out_list = []
for val in elem_list:
new_val = "0" if not val else val
out_list.append(new_val)
yield (args.csv_delimeter).join(out_list)
class NegsToZeroLog(beam.DoFn):
"""For int features, sets negative values to zero and takes log(x+1)."""
def process(self, element):
elem_list = element.split(args.csv_delimeter)
out_list = []
for i, val in enumerate(elem_list):
if i > 0 and i <= NUM_NUMERIC_FEATURES:
new_val = "0" if int(val) < 0 else val
new_val = np.log(int(new_val) + 1)
new_val = str(new_val)
else:
new_val = val
out_list.append(new_val)
yield (args.csv_delimeter).join(out_list)
class HexToIntModRange(beam.DoFn):
"""For categorical features, takes decimal value and mods with max value."""
def process(self, element):
elem_list = element.split(args.csv_delimeter)
out_list = []
for i, val in enumerate(elem_list):
if i > NUM_NUMERIC_FEATURES:
new_val = int(val, 16) % args.max_vocab_size
else:
new_val = val
out_list.append(str(new_val))
yield str.encode((args.csv_delimeter).join(out_list))
def transform_data(data_path, output_path):
"""Preprocesses Criteo data.
Two processing modes are supported. Raw data will require two passes.
If full vocab files already exist, only one pass is necessary.
Args:
data_path: File(s) to read.
output_path: Path to which output CSVs are written, if necessary.
"""
preprocessing_fn = compute_vocab_fn if args.vocab_gen_mode else apply_vocab_fn
gcp_project = args.project
region = args.region
job_name = (f"criteo-preprocessing-"
f"{datetime.datetime.now().strftime('%y%m%d-%H%M%S')}")
# set up Beam pipeline.
pipeline_options = None
if args.runner == "DataflowRunner":
options = {
"staging_location": os.path.join(output_path, "tmp", "staging"),
"temp_location": os.path.join(output_path, "tmp"),
"job_name": job_name,
"project": gcp_project,
"save_main_session": True,
"region": region,
"setup_file": "./setup.py",
}
pipeline_options = beam.pipeline.PipelineOptions(flags=[], **options)
elif args.runner == "DirectRunner":
pipeline_options = beam.options.pipeline_options.DirectOptions(
direct_num_workers=os.cpu_count(),
direct_running_mode="multi_threading")
with beam.Pipeline(args.runner, options=pipeline_options) as pipeline:
with tft_beam.Context(temp_dir=args.temp_dir):
processed_lines = (
pipeline
# Read in TSV data.
| beam.io.ReadFromText(data_path, coder=beam.coders.StrUtf8Coder())
# Fill in missing elements with the defaults (zeros).
| "FillMissing" >> beam.ParDo(FillMissing())
# For numerical features, set negatives to zero. Then take log(x+1).
| "NegsToZeroLog" >> beam.ParDo(NegsToZeroLog())
# For categorical features, mod the values with vocab size.
| "HexToIntModRange" >> beam.ParDo(HexToIntModRange()))
# CSV reader: List the cols in order, as dataset schema is not ordered.
ordered_columns = [LABEL_KEY
] + NUMERIC_FEATURE_KEYS + CATEGORICAL_FEATURE_KEYS
csv_tfxio = tfxio.BeamRecordCsvTFXIO(
physical_format="text",
column_names=ordered_columns,
delimiter=args.csv_delimeter,
schema=INPUT_METADATA.schema)
converted_data = (
processed_lines
| "DecodeData" >> csv_tfxio.BeamSource())
raw_dataset = (converted_data, csv_tfxio.TensorAdapterConfig())
# The TFXIO output format is chosen for improved performance.
transformed_dataset, _ = (
raw_dataset | tft_beam.AnalyzeAndTransformDataset(
preprocessing_fn, output_record_batches=False))
# Transformed metadata is not necessary for encoding.
transformed_data, transformed_metadata = transformed_dataset
if not args.vocab_gen_mode:
# Write to CSV.
transformed_csv_coder = tft.coders.CsvCoder(
ordered_columns, transformed_metadata.schema,
delimiter=args.csv_delimeter)
_ = (
transformed_data
| "EncodeDataCsv" >> beam.Map(transformed_csv_coder.encode)
| "WriteDataCsv" >> beam.io.WriteToText(output_path))
if __name__ == "__main__":
logging.set_verbosity(logging.INFO)
transform_data(data_path=args.input_path,
output_path=args.output_path)
| 10,351 | 32.286174 | 80 | py |
models | models-master/official/recommendation/ranking/preprocessing/setup.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup configuration for criteo dataset preprocessing.
This is used while running Tensorflow transform on Cloud Dataflow.
"""
import setuptools
version = "0.1.0"
if __name__ == "__main__":
setuptools.setup(
name="criteo_preprocessing",
version=version,
install_requires=["tensorflow-transform"],
packages=setuptools.find_packages(),
)
| 978 | 30.580645 | 74 | py |
models | models-master/official/utils/hyperparams_flags.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common flags for importing hyperparameters."""
from absl import flags
from official.utils.flags import core as flags_core
FLAGS = flags.FLAGS
def define_gin_flags():
"""Define common gin configurable flags."""
flags.DEFINE_multi_string('gin_file', None,
'List of paths to the config files.')
flags.DEFINE_multi_string(
'gin_param', None, 'Newline separated list of Gin parameter bindings.')
def define_common_hparams_flags():
"""Define the common flags across models."""
flags.DEFINE_string(
'model_dir',
default=None,
help=('The directory where the model and training/evaluation summaries'
'are stored.'))
flags.DEFINE_integer(
'train_batch_size', default=None, help='Batch size for training.')
flags.DEFINE_integer(
'eval_batch_size', default=None, help='Batch size for evaluation.')
flags.DEFINE_string(
'precision',
default=None,
help=('Precision to use; one of: {bfloat16, float32}'))
flags.DEFINE_string(
'config_file',
default=None,
help=('A YAML file which specifies overrides. Note that this file can be '
'used as an override template to override the default parameters '
'specified in Python. If the same parameter is specified in both '
'`--config_file` and `--params_override`, the one in '
'`--params_override` will be used finally.'))
flags.DEFINE_string(
'params_override',
default=None,
help=('a YAML/JSON string or a YAML file which specifies additional '
'overrides over the default parameters and those specified in '
'`--config_file`. Note that this is supposed to be used only to '
'override the model parameters, but not the parameters like TPU '
'specific flags. One canonical use case of `--config_file` and '
'`--params_override` is users first define a template config file '
'using `--config_file`, then use `--params_override` to adjust the '
'minimal set of tuning parameters, for example setting up different'
' `train_batch_size`. '
'The final override order of parameters: default_model_params --> '
'params from config_file --> params in params_override.'
'See also the help message of `--config_file`.'))
flags.DEFINE_integer('save_checkpoint_freq', None,
'Number of steps to save checkpoint.')
def initialize_common_flags():
"""Define the common flags across models."""
define_common_hparams_flags()
flags_core.define_device(tpu=True)
flags_core.define_base(
num_gpu=True, model_dir=False, data_dir=False, batch_size=False)
flags_core.define_distribution(worker_hosts=True, task_index=True)
flags_core.define_performance(all_reduce_alg=True, num_packs=True)
# Reset the default value of num_gpus to zero.
FLAGS.num_gpus = 0
flags.DEFINE_string(
'strategy_type', 'mirrored', 'Type of distribute strategy.'
'One of mirrored, tpu and multiworker.')
def strategy_flags_dict():
"""Returns TPU and/or GPU related flags in a dictionary."""
return {
'distribution_strategy': FLAGS.strategy_type,
# TPUStrategy related flags.
'tpu': FLAGS.tpu,
# MultiWorkerMirroredStrategy related flags.
'all_reduce_alg': FLAGS.all_reduce_alg,
'worker_hosts': FLAGS.worker_hosts,
'task_index': FLAGS.task_index,
# MirroredStrategy and OneDeviceStrategy
'num_gpus': FLAGS.num_gpus,
'num_packs': FLAGS.num_packs,
}
def hparam_flags_dict():
"""Returns model params related flags in a dictionary."""
return {
'data_dir': FLAGS.data_dir,
'model_dir': FLAGS.model_dir,
'train_batch_size': FLAGS.train_batch_size,
'eval_batch_size': FLAGS.eval_batch_size,
'precision': FLAGS.precision,
'config_file': FLAGS.config_file,
'params_override': FLAGS.params_override,
}
| 4,607 | 36.16129 | 80 | py |
models | models-master/official/utils/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/utils/testing/mock_task.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock task for testing."""
import dataclasses
import numpy as np
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling.hyperparams import base_config
class MockModel(tf.keras.Model):
def __init__(self, network):
super().__init__()
self.network = network
def call(self, inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
outputs = self.network(inputs)
self.add_loss(tf.reduce_mean(outputs))
return outputs
@dataclasses.dataclass
class MockTaskConfig(cfg.TaskConfig):
pass
@base_config.bind(MockTaskConfig)
class MockTask(base_task.Task):
"""Mock task object for testing."""
def __init__(self, params=None, logging_dir=None, name=None):
super().__init__(params=params, logging_dir=logging_dir, name=name)
def build_model(self, *arg, **kwargs):
inputs = tf.keras.layers.Input(shape=(2,), name="random", dtype=tf.float32)
outputs = tf.keras.layers.Dense(
1, bias_initializer=tf.keras.initializers.Ones(), name="dense_0")(
inputs)
network = tf.keras.Model(inputs=inputs, outputs=outputs)
return MockModel(network)
def build_metrics(self, training: bool = True):
del training
return [tf.keras.metrics.Accuracy(name="acc")]
def validation_step(self, inputs, model: tf.keras.Model, metrics=None):
logs = super().validation_step(inputs, model, metrics)
logs["counter"] = tf.constant(1, dtype=tf.float32)
return logs
def build_inputs(self, params):
def generate_data(_):
x = tf.zeros(shape=(2,), dtype=tf.float32)
label = tf.zeros([1], dtype=tf.int32)
return x, label
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
generate_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset.prefetch(buffer_size=1).batch(2, drop_remainder=True)
def aggregate_logs(self, state, step_outputs):
if state is None:
state = {}
for key, value in step_outputs.items():
if key not in state:
state[key] = []
state[key].append(
np.concatenate([np.expand_dims(v.numpy(), axis=0) for v in value]))
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
for k, v in aggregated_logs.items():
aggregated_logs[k] = np.sum(np.stack(v, axis=0))
return aggregated_logs
@exp_factory.register_config_factory("mock")
def mock_experiment() -> cfg.ExperimentConfig:
config = cfg.ExperimentConfig(
task=MockTaskConfig(), trainer=cfg.TrainerConfig())
return config
| 3,302 | 31.067961 | 100 | py |
models | models-master/official/utils/testing/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/utils/testing/integration.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper code to run complete models from within python."""
import os
import shutil
import sys
import tempfile
from absl import flags
from absl.testing import flagsaver
from official.utils.flags import core as flags_core
@flagsaver.flagsaver
def run_synthetic(main,
tmp_root,
extra_flags=None,
synth=True,
train_epochs=1,
epochs_between_evals=1):
"""Performs a minimal run of a model.
This function is intended to test for syntax errors throughout a model. A
very limited run is performed using synthetic data.
Args:
main: The primary function used to exercise a code path. Generally this
function is "<MODULE>.main(argv)".
tmp_root: Root path for the temp directory created by the test class.
extra_flags: Additional flags passed by the caller of this function.
synth: Use synthetic data.
train_epochs: Value of the --train_epochs flag.
epochs_between_evals: Value of the --epochs_between_evals flag.
"""
extra_flags = [] if extra_flags is None else extra_flags
model_dir = tempfile.mkdtemp(dir=tmp_root)
args = [sys.argv[0], "--model_dir", model_dir] + extra_flags
if synth:
args.append("--use_synthetic_data")
if train_epochs is not None:
args.extend(["--train_epochs", str(train_epochs)])
if epochs_between_evals is not None:
args.extend(["--epochs_between_evals", str(epochs_between_evals)])
try:
flags_core.parse_flags(argv=args)
main(flags.FLAGS)
finally:
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
| 2,220 | 30.28169 | 77 | py |
models | models-master/official/utils/docs/build_tfm_api_docs.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Tool to generate api_docs for tensorflow_models/official library.
Example:
$> pip install -U git+https://github.com/tensorflow/docs
$> python build_nlp_api_docs.py --output_dir=/tmp/api_docs
"""
import pathlib
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from tensorflow_docs.api_generator import doc_controls
from tensorflow_docs.api_generator import generate_lib
from tensorflow_docs.api_generator import parser
from tensorflow_docs.api_generator import public_api
from tensorflow_docs.api_generator.pretty_docs import base_page
from tensorflow_docs.api_generator.pretty_docs import function_page
import tensorflow_models as tfm
FLAGS = flags.FLAGS
flags.DEFINE_string('output_dir', None, 'Where to write the resulting docs to.')
flags.DEFINE_string(
'code_url_prefix',
'https://github.com/tensorflow/models/blob/master/tensorflow_models',
'The url prefix for links to code.')
flags.DEFINE_bool('search_hints', True,
'Include metadata search hints in the generated files')
flags.DEFINE_string('site_path', '/api_docs/python',
'Path prefix in the _toc.yaml')
PROJECT_SHORT_NAME = 'tfm'
PROJECT_FULL_NAME = 'TensorFlow Modeling Library'
class ExpFactoryInfo(function_page.FunctionPageInfo):
"""Customize the page for the experiment factory."""
def collect_docs(self):
super().collect_docs()
self.doc.docstring_parts.append(self.make_factory_options_table())
def make_factory_options_table(self):
lines = [
'',
'Allowed values for `exp_name`:',
'',
# The indent is important here, it keeps the site's markdown parser
# from switching to HTML mode.
' <table>\n',
'<th><code>exp_name</code></th><th>Description</th>',
]
reference_resolver = self.parser_config.reference_resolver
api_tree = self.parser_config.api_tree
for name, fn in sorted(tfm.core.exp_factory._REGISTERED_CONFIGS.items()): # pylint: disable=protected-access
fn_api_node = api_tree.node_for_object(fn)
if fn_api_node is None:
location = parser.get_defined_in(self.py_object, self.parser_config)
link = base_page.small_source_link(location, name)
else:
link = reference_resolver.python_link(name, fn_api_node.full_name)
doc = fn.__doc__
if doc:
doc = doc.splitlines()[0]
else:
doc = ''
lines.append(f'<tr><td>{link}</td><td>{doc}</td></tr>')
lines.append('</table>')
return '\n'.join(lines)
def hide_module_model_and_layer_methods():
"""Hide methods and properties defined in the base classes of Keras layers.
We hide all methods and properties of the base classes, except:
- `__init__` is always documented.
- `call` is always documented, as it can carry important information for
complex layers.
"""
module_contents = list(tf.Module.__dict__.items())
model_contents = list(tf.keras.Model.__dict__.items())
layer_contents = list(tf.keras.layers.Layer.__dict__.items())
for name, obj in module_contents + layer_contents + model_contents:
if name == '__init__':
# Always document __init__.
continue
if name == 'call':
# Always document `call`.
if hasattr(obj, doc_controls._FOR_SUBCLASS_IMPLEMENTERS): # pylint: disable=protected-access
delattr(obj, doc_controls._FOR_SUBCLASS_IMPLEMENTERS) # pylint: disable=protected-access
continue
# Otherwise, exclude from documentation.
if isinstance(obj, property):
obj = obj.fget
if isinstance(obj, (staticmethod, classmethod)):
obj = obj.__func__
try:
doc_controls.do_not_doc_in_subclasses(obj)
except AttributeError:
pass
def custom_filter(path, parent, children):
if len(path) <= 2:
# Don't filter the contents of the top level `tfm.vision` package.
return children
else:
return public_api.explicit_package_contents_filter(path, parent, children)
def gen_api_docs(code_url_prefix, site_path, output_dir, project_short_name,
project_full_name, search_hints):
"""Generates api docs for the tensorflow docs package."""
hide_module_model_and_layer_methods()
del tfm.nlp.layers.MultiHeadAttention
del tfm.nlp.layers.EinsumDense
doc_controls.set_custom_page_builder_cls(tfm.core.exp_factory.get_exp_config,
ExpFactoryInfo)
url_parts = code_url_prefix.strip('/').split('/')
url_parts = url_parts[:url_parts.index('tensorflow_models')]
url_parts.append('official')
official_url_prefix = '/'.join(url_parts)
tfm_base_dir = pathlib.Path(tfm.__file__).parent
# The `layers` submodule (and others) are actually defined in the `official`
# package. Find the path to `official`.
official_base_dir = [
p for p in pathlib.Path(tfm.vision.layers.__file__).parents
if p.name == 'official'
][0]
doc_generator = generate_lib.DocGenerator(
root_title=project_full_name,
py_modules=[(project_short_name, tfm)],
base_dir=[tfm_base_dir, official_base_dir],
code_url_prefix=[
code_url_prefix,
official_url_prefix,
],
search_hints=search_hints,
site_path=site_path,
callbacks=[custom_filter],
)
doc_generator.build(output_dir)
logging.info('Output docs to: %s', output_dir)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
gen_api_docs(
code_url_prefix=FLAGS.code_url_prefix,
site_path=FLAGS.site_path,
output_dir=FLAGS.output_dir,
project_short_name=PROJECT_SHORT_NAME,
project_full_name=PROJECT_FULL_NAME,
search_hints=FLAGS.search_hints)
if __name__ == '__main__':
flags.mark_flag_as_required('output_dir')
app.run(main)
| 6,437 | 31.515152 | 114 | py |
models | models-master/official/utils/docs/build_orbit_api_docs.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Tool to generate api_docs for tensorflow_models/official library.
Example:
$> pip install -U git+https://github.com/tensorflow/docs
$> python build_orbit_api_docs.py --output_dir=/tmp/api_docs
"""
from absl import app
from absl import flags
from absl import logging
import orbit
import tensorflow as tf
from tensorflow_docs.api_generator import doc_controls
from tensorflow_docs.api_generator import generate_lib
from tensorflow_docs.api_generator import public_api
FLAGS = flags.FLAGS
flags.DEFINE_string('output_dir', None, 'Where to write the resulting docs to.')
flags.DEFINE_string('code_url_prefix',
'https://github.com/tensorflow/models/blob/master/orbit',
'The url prefix for links to code.')
flags.DEFINE_bool('search_hints', True,
'Include metadata search hints in the generated files')
flags.DEFINE_string('site_path', '/api_docs/python',
'Path prefix in the _toc.yaml')
PROJECT_SHORT_NAME = 'orbit'
PROJECT_FULL_NAME = 'Orbit'
def hide_module_model_and_layer_methods():
"""Hide methods and properties defined in the base classes of Keras layers.
We hide all methods and properties of the base classes, except:
- `__init__` is always documented.
- `call` is always documented, as it can carry important information for
complex layers.
"""
module_contents = list(tf.Module.__dict__.items())
model_contents = list(tf.keras.Model.__dict__.items())
layer_contents = list(tf.keras.layers.Layer.__dict__.items())
for name, obj in module_contents + layer_contents + model_contents:
if name == '__init__':
# Always document __init__.
continue
if name == 'call':
# Always document `call`.
if hasattr(obj, doc_controls._FOR_SUBCLASS_IMPLEMENTERS): # pylint: disable=protected-access
delattr(obj, doc_controls._FOR_SUBCLASS_IMPLEMENTERS) # pylint: disable=protected-access
continue
# Otherwise, exclude from documentation.
if isinstance(obj, property):
obj = obj.fget
if isinstance(obj, (staticmethod, classmethod)):
obj = obj.__func__
try:
doc_controls.do_not_doc_in_subclasses(obj)
except AttributeError:
pass
def gen_api_docs(code_url_prefix, site_path, output_dir, project_short_name,
project_full_name, search_hints):
"""Generates api docs for the tensorflow docs package."""
doc_generator = generate_lib.DocGenerator(
root_title=project_full_name,
py_modules=[(project_short_name, orbit)],
code_url_prefix=code_url_prefix,
search_hints=search_hints,
site_path=site_path,
callbacks=[public_api.explicit_package_contents_filter],
)
doc_generator.build(output_dir)
logging.info('Output docs to: %s', output_dir)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
gen_api_docs(
code_url_prefix=FLAGS.code_url_prefix,
site_path=FLAGS.site_path,
output_dir=FLAGS.output_dir,
project_short_name=PROJECT_SHORT_NAME,
project_full_name=PROJECT_FULL_NAME,
search_hints=FLAGS.search_hints)
if __name__ == '__main__':
flags.mark_flag_as_required('output_dir')
app.run(main)
| 3,850 | 31.091667 | 99 | py |
models | models-master/official/utils/docs/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/utils/misc/model_helpers_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Model Helper functions."""
import tensorflow as tf # pylint: disable=g-bad-import-order
from official.utils.misc import model_helpers
class PastStopThresholdTest(tf.test.TestCase):
"""Tests for past_stop_threshold."""
def setUp(self):
super(PastStopThresholdTest, self).setUp()
tf.compat.v1.disable_eager_execution()
def test_past_stop_threshold(self):
"""Tests for normal operating conditions."""
self.assertTrue(model_helpers.past_stop_threshold(0.54, 1))
self.assertTrue(model_helpers.past_stop_threshold(54, 100))
self.assertFalse(model_helpers.past_stop_threshold(0.54, 0.1))
self.assertFalse(model_helpers.past_stop_threshold(-0.54, -1.5))
self.assertTrue(model_helpers.past_stop_threshold(-0.54, 0))
self.assertTrue(model_helpers.past_stop_threshold(0, 0))
self.assertTrue(model_helpers.past_stop_threshold(0.54, 0.54))
def test_past_stop_threshold_none_false(self):
"""Tests that check None returns false."""
self.assertFalse(model_helpers.past_stop_threshold(None, -1.5))
self.assertFalse(model_helpers.past_stop_threshold(None, None))
self.assertFalse(model_helpers.past_stop_threshold(None, 1.5))
# Zero should be okay, though.
self.assertTrue(model_helpers.past_stop_threshold(0, 1.5))
def test_past_stop_threshold_not_number(self):
"""Tests for error conditions."""
with self.assertRaises(ValueError):
model_helpers.past_stop_threshold('str', 1)
with self.assertRaises(ValueError):
model_helpers.past_stop_threshold('str', tf.constant(5))
with self.assertRaises(ValueError):
model_helpers.past_stop_threshold('str', 'another')
with self.assertRaises(ValueError):
model_helpers.past_stop_threshold(0, None)
with self.assertRaises(ValueError):
model_helpers.past_stop_threshold(0.7, 'str')
with self.assertRaises(ValueError):
model_helpers.past_stop_threshold(tf.constant(4), None)
class SyntheticDataTest(tf.test.TestCase):
"""Tests for generate_synthetic_data."""
def test_generate_synethetic_data(self):
input_element, label_element = tf.compat.v1.data.make_one_shot_iterator(
model_helpers.generate_synthetic_data(
input_shape=tf.TensorShape([5]),
input_value=123,
input_dtype=tf.float32,
label_shape=tf.TensorShape([]),
label_value=456,
label_dtype=tf.int32)).get_next()
with self.session() as sess:
for n in range(5):
inp, lab = sess.run((input_element, label_element))
self.assertAllClose(inp, [123., 123., 123., 123., 123.])
self.assertEquals(lab, 456)
def test_generate_only_input_data(self):
d = model_helpers.generate_synthetic_data(
input_shape=tf.TensorShape([4]),
input_value=43.5,
input_dtype=tf.float32)
element = tf.compat.v1.data.make_one_shot_iterator(d).get_next()
self.assertFalse(isinstance(element, tuple))
with self.session() as sess:
inp = sess.run(element)
self.assertAllClose(inp, [43.5, 43.5, 43.5, 43.5])
def test_generate_nested_data(self):
d = model_helpers.generate_synthetic_data(
input_shape={
'a': tf.TensorShape([2]),
'b': {
'c': tf.TensorShape([3]),
'd': tf.TensorShape([])
}
},
input_value=1.1)
element = tf.compat.v1.data.make_one_shot_iterator(d).get_next()
self.assertIn('a', element)
self.assertIn('b', element)
self.assertEquals(len(element['b']), 2)
self.assertIn('c', element['b'])
self.assertIn('d', element['b'])
self.assertNotIn('c', element)
with self.session() as sess:
inp = sess.run(element)
self.assertAllClose(inp['a'], [1.1, 1.1])
self.assertAllClose(inp['b']['c'], [1.1, 1.1, 1.1])
self.assertAllClose(inp['b']['d'], 1.1)
if __name__ == '__main__':
tf.test.main()
| 4,549 | 34.546875 | 76 | py |
models | models-master/official/utils/misc/keras_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for the Keras implementations of models."""
import multiprocessing
import os
import time
from absl import logging
import tensorflow as tf
from tensorflow.python.eager import monitoring
global_batch_size_gauge = monitoring.IntGauge(
'/tensorflow/training/global_batch_size', 'TF training global batch size')
first_batch_time_gauge = monitoring.IntGauge(
'/tensorflow/training/first_batch',
'TF training start/end time for first batch (unix epoch time in us.',
'type')
first_batch_start_time = first_batch_time_gauge.get_cell('start')
first_batch_end_time = first_batch_time_gauge.get_cell('end')
class BatchTimestamp(object):
"""A structure to store batch time stamp."""
def __init__(self, batch_index, timestamp):
self.batch_index = batch_index
self.timestamp = timestamp
def __repr__(self):
return "'BatchTimestamp<batch_index: {}, timestamp: {}>'".format(
self.batch_index, self.timestamp)
class TimeHistory(tf.keras.callbacks.Callback):
"""Callback for Keras models."""
def __init__(self, batch_size, log_steps, initial_step=0, logdir=None):
"""Callback for logging performance.
Args:
batch_size: Total batch size.
log_steps: Interval of steps between logging of batch level stats.
initial_step: Optional, initial step.
logdir: Optional directory to write TensorBoard summaries.
"""
# TODO(wcromar): remove this parameter and rely on `logs` parameter of
# on_train_batch_end()
self.batch_size = batch_size
super(TimeHistory, self).__init__()
self.log_steps = log_steps
self.last_log_step = initial_step
self.steps_before_epoch = initial_step
self.steps_in_epoch = 0
self.start_time = None
global_batch_size_gauge.get_cell().set(batch_size)
if logdir:
self.summary_writer = tf.summary.create_file_writer(logdir)
else:
self.summary_writer = None
# Logs start of step 1 then end of each step based on log_steps interval.
self.timestamp_log = []
# Records the time each epoch takes to run from start to finish of epoch.
self.epoch_runtime_log = []
@property
def global_steps(self):
"""The current 1-indexed global step."""
return self.steps_before_epoch + self.steps_in_epoch
@property
def average_steps_per_second(self):
"""The average training steps per second across all epochs."""
return self.global_steps / sum(self.epoch_runtime_log)
@property
def average_examples_per_second(self):
"""The average number of training examples per second across all epochs."""
return self.average_steps_per_second * self.batch_size
def get_examples_per_sec(self, warmup=1):
"""Calculates examples/sec through timestamp_log and skip warmup period."""
# First entry in timestamp_log is the start of the step 1. The rest of the
# entries are the end of each step recorded.
time_log = self.timestamp_log
seconds = time_log[-1].timestamp - time_log[warmup].timestamp
steps = time_log[-1].batch_index - time_log[warmup].batch_index
return self.batch_size * steps / seconds
def get_startup_time(self, start_time_sec):
return self.timestamp_log[0].timestamp - start_time_sec
def on_train_end(self, logs=None):
self.train_finish_time = time.time()
if self.summary_writer:
self.summary_writer.flush()
def on_epoch_begin(self, epoch, logs=None):
self.epoch_start = time.time()
def on_batch_begin(self, batch, logs=None):
if not self.start_time:
self.start_time = time.time()
if not first_batch_start_time.value():
first_batch_start_time.set(int(self.start_time * 1000000))
# Record the timestamp of the first global step
if not self.timestamp_log:
self.timestamp_log.append(
BatchTimestamp(self.global_steps, self.start_time))
def on_batch_end(self, batch, logs=None):
"""Records elapse time of the batch and calculates examples per second."""
if not first_batch_end_time.value():
first_batch_end_time.set(int(time.time() * 1000000))
self.steps_in_epoch = batch + 1
steps_since_last_log = self.global_steps - self.last_log_step
if steps_since_last_log >= self.log_steps:
now = time.time()
elapsed_time = now - self.start_time
steps_per_second = steps_since_last_log / elapsed_time
examples_per_second = steps_per_second * self.batch_size
self.timestamp_log.append(BatchTimestamp(self.global_steps, now))
logging.info(
'TimeHistory: %.2f seconds, %.2f examples/second between steps %d '
'and %d', elapsed_time, examples_per_second, self.last_log_step,
self.global_steps)
if self.summary_writer:
with self.summary_writer.as_default():
tf.summary.scalar('steps_per_second', steps_per_second,
self.global_steps)
tf.summary.scalar('examples_per_second', examples_per_second,
self.global_steps)
self.last_log_step = self.global_steps
self.start_time = None
def on_epoch_end(self, epoch, logs=None):
epoch_run_time = time.time() - self.epoch_start
self.epoch_runtime_log.append(epoch_run_time)
self.steps_before_epoch += self.steps_in_epoch
self.steps_in_epoch = 0
class SimpleCheckpoint(tf.keras.callbacks.Callback):
"""Keras callback to save tf.train.Checkpoints."""
def __init__(self, checkpoint_manager):
super(SimpleCheckpoint, self).__init__()
self.checkpoint_manager = checkpoint_manager
def on_epoch_end(self, epoch, logs=None):
step_counter = self.checkpoint_manager._step_counter.numpy() # pylint: disable=protected-access
self.checkpoint_manager.save(checkpoint_number=step_counter)
def set_session_config(enable_xla=False):
"""Sets the session config."""
if enable_xla:
tf.config.optimizer.set_jit(True)
# TODO(hongkuny): remove set_config_v2 globally.
set_config_v2 = set_session_config
def set_gpu_thread_mode_and_count(gpu_thread_mode, datasets_num_private_threads,
num_gpus, per_gpu_thread_count):
"""Set GPU thread mode and count, and adjust dataset threads count."""
cpu_count = multiprocessing.cpu_count()
logging.info('Logical CPU cores: %s', cpu_count)
# Allocate private thread pool for each GPU to schedule and launch kernels
per_gpu_thread_count = per_gpu_thread_count or 2
os.environ['TF_GPU_THREAD_MODE'] = gpu_thread_mode
os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)
logging.info('TF_GPU_THREAD_COUNT: %s', os.environ['TF_GPU_THREAD_COUNT'])
logging.info('TF_GPU_THREAD_MODE: %s', os.environ['TF_GPU_THREAD_MODE'])
# Limit data preprocessing threadpool to CPU cores minus number of total GPU
# private threads and memory copy threads.
total_gpu_thread_count = per_gpu_thread_count * num_gpus
num_runtime_threads = num_gpus
if not datasets_num_private_threads:
datasets_num_private_threads = min(
cpu_count - total_gpu_thread_count - num_runtime_threads, num_gpus * 8)
logging.info('Set datasets_num_private_threads to %s',
datasets_num_private_threads)
| 7,783 | 35.716981 | 100 | py |
models | models-master/official/utils/misc/model_helpers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Miscellaneous functions that can be called by models."""
import numbers
from absl import logging
import tensorflow as tf
from tensorflow.python.util import nest
# pylint:disable=logging-format-interpolation
def past_stop_threshold(stop_threshold, eval_metric):
"""Return a boolean representing whether a model should be stopped.
Args:
stop_threshold: float, the threshold above which a model should stop
training.
eval_metric: float, the current value of the relevant metric to check.
Returns:
True if training should stop, False otherwise.
Raises:
ValueError: if either stop_threshold or eval_metric is not a number
"""
if stop_threshold is None:
return False
if not isinstance(stop_threshold, numbers.Number):
raise ValueError("Threshold for checking stop conditions must be a number.")
if not isinstance(eval_metric, numbers.Number):
raise ValueError("Eval metric being checked against stop conditions "
"must be a number.")
if eval_metric >= stop_threshold:
logging.info("Stop threshold of {} was passed with metric value {}.".format(
stop_threshold, eval_metric))
return True
return False
def generate_synthetic_data(input_shape,
input_value=0,
input_dtype=None,
label_shape=None,
label_value=0,
label_dtype=None):
"""Create a repeating dataset with constant values.
Args:
input_shape: a tf.TensorShape object or nested tf.TensorShapes. The shape of
the input data.
input_value: Value of each input element.
input_dtype: Input dtype. If None, will be inferred by the input value.
label_shape: a tf.TensorShape object or nested tf.TensorShapes. The shape of
the label data.
label_value: Value of each input element.
label_dtype: Input dtype. If None, will be inferred by the target value.
Returns:
Dataset of tensors or tuples of tensors (if label_shape is set).
"""
# TODO(kathywu): Replace with SyntheticDataset once it is in contrib.
element = input_element = nest.map_structure(
lambda s: tf.constant(input_value, input_dtype, s), input_shape)
if label_shape:
label_element = nest.map_structure(
lambda s: tf.constant(label_value, label_dtype, s), label_shape)
element = (input_element, label_element)
return tf.data.Dataset.from_tensors(element).repeat()
def apply_clean(flags_obj):
if flags_obj.clean and tf.io.gfile.exists(flags_obj.model_dir):
logging.info("--clean flag set. Removing existing model dir:"
" {}".format(flags_obj.model_dir))
tf.io.gfile.rmtree(flags_obj.model_dir)
| 3,360 | 34.378947 | 80 | py |
models | models-master/official/utils/misc/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/utils/flags/_conventions.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Central location for shared argparse convention definitions."""
import sys
import codecs
import functools
from absl import app as absl_app
from absl import flags
# This codifies help string conventions and makes it easy to update them if
# necessary. Currently the only major effect is that help bodies start on the
# line after flags are listed. All flag definitions should wrap the text bodies
# with help wrap when calling DEFINE_*.
_help_wrap = functools.partial(
flags.text_wrap, length=80, indent="", firstline_indent="\n")
# Pretty formatting causes issues when utf-8 is not installed on a system.
def _stdout_utf8():
try:
codecs.lookup("utf-8")
except LookupError:
return False
return getattr(sys.stdout, "encoding", "") == "UTF-8"
if _stdout_utf8():
help_wrap = _help_wrap
else:
def help_wrap(text, *args, **kwargs):
return _help_wrap(text, *args, **kwargs).replace(u"\ufeff", u"")
# Replace None with h to also allow -h
absl_app.HelpshortFlag.SHORT_NAME = "h"
| 1,618 | 30.745098 | 79 | py |
models | models-master/official/utils/flags/core.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Public interface for flag definition.
See _example.py for detailed instructions on defining flags.
"""
import sys
from six.moves import shlex_quote
from absl import app as absl_app
from absl import flags
from official.utils.flags import _base
from official.utils.flags import _benchmark
from official.utils.flags import _conventions
from official.utils.flags import _device
from official.utils.flags import _distribution
from official.utils.flags import _misc
from official.utils.flags import _performance
def set_defaults(**kwargs):
for key, value in kwargs.items():
flags.FLAGS.set_default(name=key, value=value)
def parse_flags(argv=None):
"""Reset flags and reparse. Currently only used in testing."""
flags.FLAGS.unparse_flags()
absl_app.parse_flags_with_usage(argv or sys.argv)
def register_key_flags_in_core(f):
"""Defines a function in core.py, and registers its key flags.
absl uses the location of a flags.declare_key_flag() to determine the context
in which a flag is key. By making all declares in core, this allows model
main functions to call flags.adopt_module_key_flags() on core and correctly
chain key flags.
Args:
f: The function to be wrapped
Returns:
The "core-defined" version of the input function.
"""
def core_fn(*args, **kwargs):
key_flags = f(*args, **kwargs)
[flags.declare_key_flag(fl) for fl in key_flags] # pylint: disable=expression-not-assigned
return core_fn
define_base = register_key_flags_in_core(_base.define_base)
# We have define_base_eager for compatibility, since it used to be a separate
# function from define_base.
define_base_eager = define_base
define_log_steps = register_key_flags_in_core(_benchmark.define_log_steps)
define_benchmark = register_key_flags_in_core(_benchmark.define_benchmark)
define_device = register_key_flags_in_core(_device.define_device)
define_image = register_key_flags_in_core(_misc.define_image)
define_performance = register_key_flags_in_core(_performance.define_performance)
define_distribution = register_key_flags_in_core(
_distribution.define_distribution)
help_wrap = _conventions.help_wrap
get_num_gpus = _base.get_num_gpus
get_tf_dtype = _performance.get_tf_dtype
get_loss_scale = _performance.get_loss_scale
DTYPE_MAP = _performance.DTYPE_MAP
require_cloud_storage = _device.require_cloud_storage
def _get_nondefault_flags_as_dict():
"""Returns the nondefault flags as a dict from flag name to value."""
nondefault_flags = {}
for flag_name in flags.FLAGS:
flag_value = getattr(flags.FLAGS, flag_name)
if (flag_name != flags.FLAGS[flag_name].short_name and
flag_value != flags.FLAGS[flag_name].default):
nondefault_flags[flag_name] = flag_value
return nondefault_flags
def get_nondefault_flags_as_str():
"""Returns flags as a string that can be passed as command line arguments.
E.g., returns: "--batch_size=256 --use_synthetic_data" for the following code
block:
```
flags.FLAGS.batch_size = 256
flags.FLAGS.use_synthetic_data = True
print(get_nondefault_flags_as_str())
```
Only flags with nondefault values are returned, as passing default flags as
command line arguments has no effect.
Returns:
A string with the flags, that can be passed as command line arguments to a
program to use the flags.
"""
nondefault_flags = _get_nondefault_flags_as_dict()
flag_strings = []
for name, value in sorted(nondefault_flags.items()):
if isinstance(value, bool):
flag_str = '--{}'.format(name) if value else '--no{}'.format(name)
elif isinstance(value, list):
flag_str = '--{}={}'.format(name, ','.join(value))
else:
flag_str = '--{}={}'.format(name, value)
flag_strings.append(flag_str)
return ' '.join(shlex_quote(flag_str) for flag_str in flag_strings)
| 4,427 | 32.801527 | 95 | py |
models | models-master/official/utils/flags/_misc.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Misc flags."""
from absl import flags
from official.utils.flags._conventions import help_wrap
def define_image(data_format=True):
"""Register image specific flags.
Args:
data_format: Create a flag to specify image axis convention.
Returns:
A list of flags for core.py to marks as key flags.
"""
key_flags = []
if data_format:
flags.DEFINE_enum(
name="data_format",
short_name="df",
default=None,
enum_values=["channels_first", "channels_last"],
help=help_wrap(
"A flag to override the data format used in the model. "
"channels_first provides a performance boost on GPU but is not "
"always compatible with CPU. If left unspecified, the data format "
"will be chosen automatically based on whether TensorFlow was "
"built for CPU or GPU."))
key_flags.append("data_format")
return key_flags
| 1,541 | 30.469388 | 79 | py |
models | models-master/official/utils/flags/flags_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from absl import flags
import tensorflow as tf
from official.utils.flags import core as flags_core # pylint: disable=g-bad-import-order
def define_flags():
flags_core.define_base(
clean=True,
num_gpu=False,
stop_threshold=True,
hooks=True,
train_epochs=True,
epochs_between_evals=True)
flags_core.define_performance(
num_parallel_calls=True,
inter_op=True,
intra_op=True,
loss_scale=True,
synthetic_data=True,
dtype=True)
flags_core.define_image()
flags_core.define_benchmark()
class BaseTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(BaseTester, cls).setUpClass()
define_flags()
def test_default_setting(self):
"""Test to ensure fields exist and defaults can be set."""
defaults = dict(
data_dir="dfgasf",
model_dir="dfsdkjgbs",
train_epochs=534,
epochs_between_evals=15,
batch_size=256,
hooks=["LoggingTensorHook"],
num_parallel_calls=18,
inter_op_parallelism_threads=5,
intra_op_parallelism_threads=10,
data_format="channels_first")
flags_core.set_defaults(**defaults)
flags_core.parse_flags()
for key, value in defaults.items():
assert flags.FLAGS.get_flag_value(name=key, default=None) == value
def test_benchmark_setting(self):
defaults = dict(
hooks=["LoggingMetricHook"],
benchmark_log_dir="/tmp/12345",
gcp_project="project_abc",
)
flags_core.set_defaults(**defaults)
flags_core.parse_flags()
for key, value in defaults.items():
assert flags.FLAGS.get_flag_value(name=key, default=None) == value
def test_booleans(self):
"""Test to ensure boolean flags trigger as expected."""
flags_core.parse_flags([__file__, "--use_synthetic_data"])
assert flags.FLAGS.use_synthetic_data
def test_parse_dtype_info(self):
flags_core.parse_flags([__file__, "--dtype", "fp16"])
self.assertEqual(flags_core.get_tf_dtype(flags.FLAGS), tf.float16)
self.assertEqual(
flags_core.get_loss_scale(flags.FLAGS, default_for_fp16=2), 2)
flags_core.parse_flags([__file__, "--dtype", "fp16", "--loss_scale", "5"])
self.assertEqual(
flags_core.get_loss_scale(flags.FLAGS, default_for_fp16=2), 5)
flags_core.parse_flags(
[__file__, "--dtype", "fp16", "--loss_scale", "dynamic"])
self.assertEqual(
flags_core.get_loss_scale(flags.FLAGS, default_for_fp16=2), "dynamic")
flags_core.parse_flags([__file__, "--dtype", "fp32"])
self.assertEqual(flags_core.get_tf_dtype(flags.FLAGS), tf.float32)
self.assertEqual(
flags_core.get_loss_scale(flags.FLAGS, default_for_fp16=2), 1)
flags_core.parse_flags([__file__, "--dtype", "fp32", "--loss_scale", "5"])
self.assertEqual(
flags_core.get_loss_scale(flags.FLAGS, default_for_fp16=2), 5)
with self.assertRaises(SystemExit):
flags_core.parse_flags([__file__, "--dtype", "int8"])
with self.assertRaises(SystemExit):
flags_core.parse_flags(
[__file__, "--dtype", "fp16", "--loss_scale", "abc"])
def test_get_nondefault_flags_as_str(self):
defaults = dict(
clean=True,
data_dir="abc",
hooks=["LoggingTensorHook"],
stop_threshold=1.5,
use_synthetic_data=False)
flags_core.set_defaults(**defaults)
flags_core.parse_flags()
expected_flags = ""
self.assertEqual(flags_core.get_nondefault_flags_as_str(), expected_flags)
flags.FLAGS.clean = False
expected_flags += "--noclean"
self.assertEqual(flags_core.get_nondefault_flags_as_str(), expected_flags)
flags.FLAGS.data_dir = "xyz"
expected_flags += " --data_dir=xyz"
self.assertEqual(flags_core.get_nondefault_flags_as_str(), expected_flags)
flags.FLAGS.hooks = ["aaa", "bbb", "ccc"]
expected_flags += " --hooks=aaa,bbb,ccc"
self.assertEqual(flags_core.get_nondefault_flags_as_str(), expected_flags)
flags.FLAGS.stop_threshold = 3.
expected_flags += " --stop_threshold=3.0"
self.assertEqual(flags_core.get_nondefault_flags_as_str(), expected_flags)
flags.FLAGS.use_synthetic_data = True
expected_flags += " --use_synthetic_data"
self.assertEqual(flags_core.get_nondefault_flags_as_str(), expected_flags)
# Assert that explicit setting a flag to its default value does not cause it
# to appear in the string
flags.FLAGS.use_synthetic_data = False
expected_flags = expected_flags[:-len(" --use_synthetic_data")]
self.assertEqual(flags_core.get_nondefault_flags_as_str(), expected_flags)
if __name__ == "__main__":
unittest.main()
| 5,302 | 31.533742 | 89 | py |
models | models-master/official/utils/flags/_base.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags which will be nearly universal across models."""
from absl import flags
import tensorflow as tf
from official.utils.flags._conventions import help_wrap
def define_base(data_dir=True,
model_dir=True,
clean=False,
train_epochs=False,
epochs_between_evals=False,
stop_threshold=False,
batch_size=True,
num_gpu=False,
hooks=False,
export_dir=False,
distribution_strategy=False,
run_eagerly=False):
"""Register base flags.
Args:
data_dir: Create a flag for specifying the input data directory.
model_dir: Create a flag for specifying the model file directory.
clean: Create a flag for removing the model_dir.
train_epochs: Create a flag to specify the number of training epochs.
epochs_between_evals: Create a flag to specify the frequency of testing.
stop_threshold: Create a flag to specify a threshold accuracy or other eval
metric which should trigger the end of training.
batch_size: Create a flag to specify the batch size.
num_gpu: Create a flag to specify the number of GPUs used.
hooks: Create a flag to specify hooks for logging.
export_dir: Create a flag to specify where a SavedModel should be exported.
distribution_strategy: Create a flag to specify which Distribution Strategy
to use.
run_eagerly: Create a flag to specify to run eagerly op by op.
Returns:
A list of flags for core.py to marks as key flags.
"""
key_flags = []
if data_dir:
flags.DEFINE_string(
name="data_dir",
short_name="dd",
default="/tmp",
help=help_wrap("The location of the input data."))
key_flags.append("data_dir")
if model_dir:
flags.DEFINE_string(
name="model_dir",
short_name="md",
default="/tmp",
help=help_wrap("The location of the model checkpoint files."))
key_flags.append("model_dir")
if clean:
flags.DEFINE_boolean(
name="clean",
default=False,
help=help_wrap("If set, model_dir will be removed if it exists."))
key_flags.append("clean")
if train_epochs:
flags.DEFINE_integer(
name="train_epochs",
short_name="te",
default=1,
help=help_wrap("The number of epochs used to train."))
key_flags.append("train_epochs")
if epochs_between_evals:
flags.DEFINE_integer(
name="epochs_between_evals",
short_name="ebe",
default=1,
help=help_wrap("The number of training epochs to run between "
"evaluations."))
key_flags.append("epochs_between_evals")
if stop_threshold:
flags.DEFINE_float(
name="stop_threshold",
short_name="st",
default=None,
help=help_wrap("If passed, training will stop at the earlier of "
"train_epochs and when the evaluation metric is "
"greater than or equal to stop_threshold."))
if batch_size:
flags.DEFINE_integer(
name="batch_size",
short_name="bs",
default=32,
help=help_wrap("Batch size for training and evaluation. When using "
"multiple gpus, this is the global batch size for "
"all devices. For example, if the batch size is 32 "
"and there are 4 GPUs, each GPU will get 8 examples on "
"each step."))
key_flags.append("batch_size")
if num_gpu:
flags.DEFINE_integer(
name="num_gpus",
short_name="ng",
default=1,
help=help_wrap("How many GPUs to use at each worker with the "
"DistributionStrategies API. The default is 1."))
if run_eagerly:
flags.DEFINE_boolean(
name="run_eagerly",
default=False,
help="Run the model op by op without building a model function.")
if hooks:
flags.DEFINE_list(
name="hooks",
short_name="hk",
default="LoggingTensorHook",
help=help_wrap(
u"A list of (case insensitive) strings to specify the names of "
u"training hooks. Example: `--hooks ProfilerHook,"
u"ExamplesPerSecondHook`\n See hooks_helper "
u"for details."))
key_flags.append("hooks")
if export_dir:
flags.DEFINE_string(
name="export_dir",
short_name="ed",
default=None,
help=help_wrap("If set, a SavedModel serialization of the model will "
"be exported to this directory at the end of training. "
"See the README for more details and relevant links."))
key_flags.append("export_dir")
if distribution_strategy:
flags.DEFINE_string(
name="distribution_strategy",
short_name="ds",
default="mirrored",
help=help_wrap("The Distribution Strategy to use for training. "
"Accepted values are 'off', 'one_device', "
"'mirrored', 'parameter_server', 'collective', "
"case insensitive. 'off' means not to use "
"Distribution Strategy; 'default' means to choose "
"from `MirroredStrategy` or `OneDeviceStrategy` "
"according to the number of GPUs."))
return key_flags
def get_num_gpus(flags_obj):
"""Treat num_gpus=-1 as 'use all'."""
if flags_obj.num_gpus != -1:
return flags_obj.num_gpus
from tensorflow.python.client import device_lib # pylint: disable=g-import-not-at-top
local_device_protos = device_lib.list_local_devices()
return sum([1 for d in local_device_protos if d.device_type == "GPU"])
| 6,395 | 34.932584 | 88 | py |
models | models-master/official/utils/flags/_performance.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Register flags for optimizing performance."""
import multiprocessing
from absl import flags # pylint: disable=g-bad-import-order
import tensorflow as tf # pylint: disable=g-bad-import-order
from official.utils.flags._conventions import help_wrap
# Map string to TensorFlow dtype
DTYPE_MAP = {
"fp16": tf.float16,
"bf16": tf.bfloat16,
"fp32": tf.float32,
}
def get_tf_dtype(flags_obj):
if getattr(flags_obj, "fp16_implementation", None) == "graph_rewrite":
# If the graph_rewrite is used, we build the graph with fp32, and let the
# graph rewrite change ops to fp16.
return tf.float32
return DTYPE_MAP[flags_obj.dtype]
def get_loss_scale(flags_obj, default_for_fp16):
dtype = get_tf_dtype(flags_obj)
if flags_obj.loss_scale == "dynamic":
return flags_obj.loss_scale
elif flags_obj.loss_scale is not None:
return float(flags_obj.loss_scale)
elif dtype == tf.float32 or dtype == tf.bfloat16:
return 1 # No loss scaling is needed for fp32
else:
assert dtype == tf.float16
return default_for_fp16
def define_performance(num_parallel_calls=False,
inter_op=False,
intra_op=False,
synthetic_data=False,
max_train_steps=False,
dtype=False,
all_reduce_alg=False,
num_packs=False,
tf_gpu_thread_mode=False,
datasets_num_private_threads=False,
datasets_num_parallel_batches=False,
fp16_implementation=False,
loss_scale=False,
tf_data_experimental_slack=False,
enable_xla=False,
training_dataset_cache=False):
"""Register flags for specifying performance tuning arguments.
Args:
num_parallel_calls: Create a flag to specify parallelism of data loading.
inter_op: Create a flag to allow specification of inter op threads.
intra_op: Create a flag to allow specification of intra op threads.
synthetic_data: Create a flag to allow the use of synthetic data.
max_train_steps: Create a flags to allow specification of maximum number of
training steps
dtype: Create flags for specifying dtype.
all_reduce_alg: If set forces a specific algorithm for multi-gpu.
num_packs: If set provides number of packs for MirroredStrategy's cross
device ops.
tf_gpu_thread_mode: gpu_private triggers us of private thread pool.
datasets_num_private_threads: Number of private threads for datasets.
datasets_num_parallel_batches: Determines how many batches to process in
parallel when using map and batch from tf.data.
fp16_implementation: Create fp16_implementation flag.
loss_scale: Controls the loss scaling, normally for mixed-precision
training. Can only be turned on if dtype is also True.
tf_data_experimental_slack: Determines whether to enable tf.data's
`experimental_slack` option.
enable_xla: Determines if XLA (auto clustering) is turned on.
training_dataset_cache: Whether to cache the training dataset on workers.
Typically used to improve training performance when training data is in
remote storage and can fit into worker memory.
Returns:
A list of flags for core.py to marks as key flags.
"""
key_flags = []
if num_parallel_calls:
flags.DEFINE_integer(
name="num_parallel_calls",
short_name="npc",
default=multiprocessing.cpu_count(),
help=help_wrap("The number of records that are processed in parallel "
"during input processing. This can be optimized per "
"data set but for generally homogeneous data sets, "
"should be approximately the number of available CPU "
"cores. (default behavior)"))
if inter_op:
flags.DEFINE_integer(
name="inter_op_parallelism_threads",
short_name="inter",
default=0,
help=help_wrap("Number of inter_op_parallelism_threads to use for CPU. "
"See TensorFlow config.proto for details."))
if intra_op:
flags.DEFINE_integer(
name="intra_op_parallelism_threads",
short_name="intra",
default=0,
help=help_wrap("Number of intra_op_parallelism_threads to use for CPU. "
"See TensorFlow config.proto for details."))
if synthetic_data:
flags.DEFINE_bool(
name="use_synthetic_data",
short_name="synth",
default=False,
help=help_wrap(
"If set, use fake data (zeroes) instead of a real dataset. "
"This mode is useful for performance debugging, as it removes "
"input processing steps, but will not learn anything."))
if max_train_steps:
flags.DEFINE_integer(
name="max_train_steps",
short_name="mts",
default=None,
help=help_wrap(
"The model will stop training if the global_step reaches this "
"value. If not set, training will run until the specified number "
"of epochs have run as usual. It is generally recommended to set "
"--train_epochs=1 when using this flag."))
if dtype:
flags.DEFINE_enum(
name="dtype",
short_name="dt",
default="fp32",
enum_values=DTYPE_MAP.keys(),
help=help_wrap("The TensorFlow datatype used for calculations. "
"For 16-bit dtypes, variables and certain ops will "
"still be float32 for numeric stability."))
if loss_scale:
flags.DEFINE_string(
name="loss_scale",
short_name="ls",
default=None,
help=help_wrap(
"The amount to scale the loss by when --dtype=fp16. This can be "
"an int/float or the string 'dynamic'. Before gradients are "
"computed, the loss is multiplied by the loss scale, making all "
"gradients loss_scale times larger. To adjust for this, "
"gradients are divided by the loss scale before being applied to "
"variables. This is mathematically equivalent to training "
"without a loss scale, but the loss scale helps avoid some "
"intermediate gradients from underflowing to zero. The default "
"is 'dynamic', which dynamic determines the optimal loss scale "
"during training."))
# pylint: disable=unused-variable
@flags.validator(
flag_name="loss_scale",
message="loss_scale should be a positive int/float or the string "
"'dynamic'.")
def _check_loss_scale(loss_scale):
"""Validator to check the loss scale flag is valid."""
if loss_scale is None:
return True # null case is handled in get_loss_scale()
if loss_scale == "dynamic":
return True
try:
loss_scale = float(loss_scale)
except ValueError:
return False
return loss_scale > 0
# pylint: enable=unused-variable
if fp16_implementation:
flags.DEFINE_enum(
name="fp16_implementation",
default="keras",
enum_values=("keras", "graph_rewrite"),
help=help_wrap(
"When --dtype=fp16, how fp16 should be implemented. This has no "
"impact on correctness. 'keras' uses the "
"tf.keras.mixed_precision API. 'graph_rewrite' uses the "
"tf.compat.v1.mixed_precision."
"enable_mixed_precision_graph_rewrite API."))
@flags.multi_flags_validator(
["fp16_implementation", "dtype", "loss_scale"])
def _check_fp16_implementation(flags_dict):
"""Validator to check fp16_implementation flag is valid."""
if (flags_dict["fp16_implementation"] == "graph_rewrite" and
flags_dict["dtype"] != "fp16"):
raise flags.ValidationError("--fp16_implementation should not be "
"specified unless --dtype=fp16")
return True
if all_reduce_alg:
flags.DEFINE_string(
name="all_reduce_alg",
short_name="ara",
default=None,
help=help_wrap("Defines the algorithm to use for performing all-reduce."
"When specified with MirroredStrategy for single "
"worker, this controls "
"tf.contrib.distribute.AllReduceCrossTowerOps. When "
"specified with MultiWorkerMirroredStrategy, this "
"controls "
"tf.distribute.experimental.CollectiveCommunication; "
"valid options are `ring` and `nccl`."))
if num_packs:
flags.DEFINE_integer(
name="num_packs",
default=1,
help=help_wrap("Sets `num_packs` in the cross device ops used in "
"MirroredStrategy. For details, see "
"tf.distribute.NcclAllReduce."))
if tf_gpu_thread_mode:
flags.DEFINE_string(
name="tf_gpu_thread_mode",
short_name="gt_mode",
default=None,
help=help_wrap(
"Whether and how the GPU device uses its own threadpool."))
flags.DEFINE_integer(
name="per_gpu_thread_count",
short_name="pgtc",
default=0,
help=help_wrap("The number of threads to use for GPU. Only valid when "
"tf_gpu_thread_mode is not global."))
if datasets_num_private_threads:
flags.DEFINE_integer(
name="datasets_num_private_threads",
default=None,
help=help_wrap(
"Number of threads for a private threadpool created for all"
"datasets computation.."))
if datasets_num_parallel_batches:
flags.DEFINE_integer(
name="datasets_num_parallel_batches",
default=None,
help=help_wrap(
"Determines how many batches to process in parallel when using "
"map and batch from tf.data."))
if training_dataset_cache:
flags.DEFINE_boolean(
name="training_dataset_cache",
default=False,
help=help_wrap(
"Determines whether to cache the training dataset on workers. "
"Typically used to improve training performance when training "
"data is in remote storage and can fit into worker memory."))
if tf_data_experimental_slack:
flags.DEFINE_boolean(
name="tf_data_experimental_slack",
default=False,
help=help_wrap(
"Whether to enable tf.data's `experimental_slack` option."))
if enable_xla:
flags.DEFINE_boolean(
name="enable_xla",
default=False,
help="Whether to enable XLA auto jit compilation")
return key_flags
| 11,566 | 38.210169 | 80 | py |
models | models-master/official/utils/flags/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/utils/flags/_device.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags for managing compute devices. Currently only contains TPU flags."""
from absl import flags
from absl import logging
from official.utils.flags._conventions import help_wrap
def require_cloud_storage(flag_names):
"""Register a validator to check directory flags.
Args:
flag_names: An iterable of strings containing the names of flags to be
checked.
"""
msg = "TPU requires GCS path for {}".format(", ".join(flag_names))
@flags.multi_flags_validator(["tpu"] + flag_names, message=msg)
def _path_check(flag_values): # pylint: disable=missing-docstring
if flag_values["tpu"] is None:
return True
valid_flags = True
for key in flag_names:
if not flag_values[key].startswith("gs://"):
logging.error("%s must be a GCS path.", key)
valid_flags = False
return valid_flags
def define_device(tpu=True):
"""Register device specific flags.
Args:
tpu: Create flags to specify TPU operation.
Returns:
A list of flags for core.py to marks as key flags.
"""
key_flags = []
if tpu:
flags.DEFINE_string(
name="tpu",
default=None,
help=help_wrap(
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a "
"grpc://ip.address.of.tpu:8470 url. Passing `local` will use the"
"CPU of the local instance instead. (Good for debugging.)"))
key_flags.append("tpu")
flags.DEFINE_string(
name="tpu_zone",
default=None,
help=help_wrap(
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE "
"project from metadata."))
flags.DEFINE_string(
name="tpu_gcp_project",
default=None,
help=help_wrap(
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE "
"project from metadata."))
flags.DEFINE_integer(
name="num_tpu_shards",
default=8,
help=help_wrap("Number of shards (TPU chips)."))
return key_flags
| 2,826 | 30.065934 | 80 | py |
models | models-master/official/utils/flags/_distribution.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags related to distributed execution."""
from absl import flags
import tensorflow as tf
from official.utils.flags._conventions import help_wrap
def define_distribution(worker_hosts=True, task_index=True):
"""Register distributed execution flags.
Args:
worker_hosts: Create a flag for specifying comma-separated list of workers.
task_index: Create a flag for specifying index of task.
Returns:
A list of flags for core.py to marks as key flags.
"""
key_flags = []
if worker_hosts:
flags.DEFINE_string(
name='worker_hosts',
default=None,
help=help_wrap(
'Comma-separated list of worker ip:port pairs for running '
'multi-worker models with DistributionStrategy. The user would '
'start the program on each host with identical value for this '
'flag.'))
if task_index:
flags.DEFINE_integer(
name='task_index',
default=-1,
help=help_wrap('If multi-worker training, the task_index of this '
'worker.'))
return key_flags
| 1,694 | 30.981132 | 79 | py |
models | models-master/official/utils/flags/_benchmark.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags for benchmarking models."""
from absl import flags
from official.utils.flags._conventions import help_wrap
def define_log_steps():
flags.DEFINE_integer(
name="log_steps",
default=100,
help="Frequency with which to log timing information with TimeHistory.")
return []
def define_benchmark(benchmark_log_dir=True, bigquery_uploader=True):
"""Register benchmarking flags.
Args:
benchmark_log_dir: Create a flag to specify location for benchmark logging.
bigquery_uploader: Create flags for uploading results to BigQuery.
Returns:
A list of flags for core.py to marks as key flags.
"""
key_flags = []
flags.DEFINE_enum(
name="benchmark_logger_type",
default="BaseBenchmarkLogger",
enum_values=["BaseBenchmarkLogger", "BenchmarkFileLogger"],
help=help_wrap("The type of benchmark logger to use. Defaults to using "
"BaseBenchmarkLogger which logs to STDOUT. Different "
"loggers will require other flags to be able to work."))
flags.DEFINE_string(
name="benchmark_test_id",
short_name="bti",
default=None,
help=help_wrap("The unique test ID of the benchmark run. It could be the "
"combination of key parameters. It is hardware "
"independent and could be used compare the performance "
"between different test runs. This flag is designed for "
"human consumption, and does not have any impact within "
"the system."))
define_log_steps()
if benchmark_log_dir:
flags.DEFINE_string(
name="benchmark_log_dir",
short_name="bld",
default=None,
help=help_wrap("The location of the benchmark logging."))
if bigquery_uploader:
flags.DEFINE_string(
name="gcp_project",
short_name="gp",
default=None,
help=help_wrap(
"The GCP project name where the benchmark will be uploaded."))
flags.DEFINE_string(
name="bigquery_data_set",
short_name="bds",
default="test_benchmark",
help=help_wrap(
"The Bigquery dataset name where the benchmark will be uploaded."))
flags.DEFINE_string(
name="bigquery_run_table",
short_name="brt",
default="benchmark_run",
help=help_wrap("The Bigquery table name where the benchmark run "
"information will be uploaded."))
flags.DEFINE_string(
name="bigquery_run_status_table",
short_name="brst",
default="benchmark_run_status",
help=help_wrap("The Bigquery table name where the benchmark run "
"status information will be uploaded."))
flags.DEFINE_string(
name="bigquery_metric_table",
short_name="bmt",
default="benchmark_metric",
help=help_wrap("The Bigquery table name where the benchmark metric "
"information will be uploaded."))
@flags.multi_flags_validator(
["benchmark_logger_type", "benchmark_log_dir"],
message="--benchmark_logger_type=BenchmarkFileLogger will require "
"--benchmark_log_dir being set")
def _check_benchmark_log_dir(flags_dict):
benchmark_logger_type = flags_dict["benchmark_logger_type"]
if benchmark_logger_type == "BenchmarkFileLogger":
return flags_dict["benchmark_log_dir"]
return True
return key_flags
| 4,082 | 33.601695 | 80 | py |
models | models-master/official/modeling/tf_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common TF utilities."""
import functools
import inspect
import six
import tensorflow as tf
from tensorflow.python.util import deprecation
from official.modeling import activations
@deprecation.deprecated(
None,
"tf.keras.layers.Layer supports multiple positional args and kwargs as "
"input tensors. pack/unpack inputs to override __call__ is no longer "
"needed.")
def pack_inputs(inputs):
"""Pack a list of `inputs` tensors to a tuple.
Args:
inputs: a list of tensors.
Returns:
a tuple of tensors. if any input is None, replace it with a special constant
tensor.
"""
inputs = tf.nest.flatten(inputs)
outputs = []
for x in inputs:
if x is None:
outputs.append(tf.constant(0, shape=[], dtype=tf.int32))
else:
outputs.append(x)
return tuple(outputs)
@deprecation.deprecated(
None,
"tf.keras.layers.Layer supports multiple positional args and kwargs as "
"input tensors. pack/unpack inputs to override __call__ is no longer "
"needed.")
def unpack_inputs(inputs):
"""unpack a tuple of `inputs` tensors to a tuple.
Args:
inputs: a list of tensors.
Returns:
a tuple of tensors. if any input is a special constant tensor, replace it
with None.
"""
inputs = tf.nest.flatten(inputs)
outputs = []
for x in inputs:
if is_special_none_tensor(x):
outputs.append(None)
else:
outputs.append(x)
x = tuple(outputs)
# To trick the very pointless 'unbalanced-tuple-unpacking' pylint check
# from triggering.
if len(x) == 1:
return x[0]
return tuple(outputs)
def is_special_none_tensor(tensor):
"""Checks if a tensor is a special None Tensor."""
return tensor.shape.ndims == 0 and tensor.dtype == tf.int32
def get_activation(identifier, use_keras_layer=False, **kwargs):
"""Maps an identifier to a Python function, e.g., "relu" => `tf.nn.relu`.
It checks string first and if it is one of customized activation not in TF,
the corresponding activation will be returned. For non-customized activation
names and callable identifiers, always fallback to tf.keras.activations.get.
Prefers using keras layers when use_keras_layer=True. Now it only supports
'relu', 'linear', 'identity', 'swish', 'mish', 'leaky_relu', and 'gelu'.
Args:
identifier: String name of the activation function or callable.
use_keras_layer: If True, use keras layer if identifier is allow-listed.
**kwargs: Keyword arguments to use to instantiate an activation function.
Available only for 'leaky_relu' and 'gelu' when using keras layers.
For example: get_activation('leaky_relu', use_keras_layer=True, alpha=0.1)
Returns:
A Python function corresponding to the activation function or a keras
activation layer when use_keras_layer=True.
"""
if isinstance(identifier, six.string_types):
identifier = str(identifier).lower()
if use_keras_layer:
keras_layer_allowlist = {
"relu": "relu",
"linear": "linear",
"identity": "linear",
"swish": "swish",
"sigmoid": "sigmoid",
"relu6": tf.nn.relu6,
"leaky_relu": functools.partial(tf.nn.leaky_relu, **kwargs),
"hard_swish": activations.hard_swish,
"hard_sigmoid": activations.hard_sigmoid,
"mish": activations.mish,
"gelu": functools.partial(tf.nn.gelu, **kwargs),
}
if identifier in keras_layer_allowlist:
return tf.keras.layers.Activation(keras_layer_allowlist[identifier])
name_to_fn = {
"gelu": activations.gelu,
"simple_swish": activations.simple_swish,
"hard_swish": activations.hard_swish,
"relu6": activations.relu6,
"hard_sigmoid": activations.hard_sigmoid,
"identity": activations.identity,
"mish": activations.mish,
}
if identifier in name_to_fn:
return tf.keras.activations.get(name_to_fn[identifier])
return tf.keras.activations.get(identifier)
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
raise ValueError(
"For the tensor `%s`, the actual tensor rank `%d` (shape = %s) is not "
"equal to the expected tensor rank `%s`" %
(name, actual_rank, str(tensor.shape), str(expected_rank)))
def safe_mean(losses):
"""Computes a safe mean of the losses.
Args:
losses: `Tensor` whose elements contain individual loss measurements.
Returns:
A scalar representing the mean of `losses`. If `num_present` is zero,
then zero is returned.
"""
total = tf.reduce_sum(losses)
num_elements = tf.cast(tf.size(losses), dtype=losses.dtype)
return tf.math.divide_no_nan(total, num_elements)
def get_replica_id():
"""Gets replica id depending on the environment."""
context = tf.distribute.get_replica_context()
if context is not None:
return context.replica_id_in_sync_group
else:
raise RuntimeError("Unknown replica context. The `get_replica_id` method "
"relies on TF 2.x tf.distribute API.")
def cross_replica_concat(value, axis, name="cross_replica_concat"):
"""Concatenates the given `value` across (GPU/TPU) cores, along `axis`.
In general, each core ("replica") will pass a
replica-specific value as `value` (corresponding to some element of a
data-parallel computation taking place across replicas).
The resulting concatenated `Tensor` will have the same shape as `value` for
all dimensions except `axis`, where it will be larger by a factor of the
number of replicas. It will also have the same `dtype` as `value`.
The position of a given replica's `value` within the resulting concatenation
is determined by that replica's replica ID. For
example:
With `value` for replica 0 given as
0 0 0
0 0 0
and `value` for replica 1 given as
1 1 1
1 1 1
the resulting concatenation along axis 0 will be
0 0 0
0 0 0
1 1 1
1 1 1
and this result will be identical across all replicas.
Note that this API only works in TF2 with `tf.distribute`.
Args:
value: The `Tensor` to concatenate across replicas. Each replica will have a
different value for this `Tensor`, and these replica-specific values will
be concatenated.
axis: The axis along which to perform the concatenation as a Python integer
(not a `Tensor`). E.g., `axis=0` to concatenate along the batch dimension.
name: A name for the operation (used to create a name scope).
Returns:
The result of concatenating `value` along `axis` across replicas.
Raises:
RuntimeError: when the batch (0-th) dimension is None.
"""
with tf.name_scope(name):
context = tf.distribute.get_replica_context()
# Typically this could be hit only if the tensor is derived from a
# dataset with finite epochs and drop_remainder=False, where the last
# batch could of different batch size and then the dim-0 is of dynamic
# shape.
if value.shape.as_list()[0] is None:
raise RuntimeError(f"{value} has unknown batch.")
return context.all_gather(value, axis=axis)
def clone_initializer(initializer):
# Keras initializer is going to be stateless, which mean reusing the same
# initializer will produce same init value when the shapes are the same.
if isinstance(initializer, tf.keras.initializers.Initializer):
return initializer.__class__.from_config(initializer.get_config())
# When the input is string/dict or other serialized configs, caller will
# create a new keras Initializer instance based on that, and we don't need to
# do anything
return initializer
def serialize_keras_object(obj):
if hasattr(tf.keras.utils, "legacy"):
return tf.keras.utils.legacy.serialize_keras_object(obj)
else:
return tf.keras.utils.serialize_keras_object(obj)
def deserialize_keras_object(
config, module_objects=None, custom_objects=None, printable_module_name=None
):
if hasattr(tf.keras.utils, "legacy"):
return tf.keras.utils.legacy.deserialize_keras_object(
config, custom_objects, module_objects, printable_module_name
)
else:
return tf.keras.utils.deserialize_keras_object(
config, custom_objects, module_objects, printable_module_name
)
def serialize_layer(layer, use_legacy_format=False):
if (
"use_legacy_format"
in inspect.getfullargspec(tf.keras.layers.serialize).args
):
return tf.keras.layers.serialize(layer, use_legacy_format=use_legacy_format)
else:
return tf.keras.layers.serialize(layer)
def serialize_initializer(initializer, use_legacy_format=False):
if (
"use_legacy_format"
in inspect.getfullargspec(tf.keras.initializers.serialize).args
):
return tf.keras.initializers.serialize(
initializer, use_legacy_format=use_legacy_format
)
else:
return tf.keras.initializers.serialize(initializer)
def serialize_regularizer(regularizer, use_legacy_format=False):
if (
"use_legacy_format"
in inspect.getfullargspec(tf.keras.regularizers.serialize).args
):
return tf.keras.regularizers.serialize(
regularizer, use_legacy_format=use_legacy_format
)
else:
return tf.keras.regularizers.serialize(regularizer)
def serialize_constraint(constraint, use_legacy_format=False):
if (
"use_legacy_format"
in inspect.getfullargspec(tf.keras.constraints.serialize).args
):
return tf.keras.constraints.serialize(
constraint, use_legacy_format=use_legacy_format
)
else:
return tf.keras.constraints.serialize(constraint)
def serialize_activation(activation, use_legacy_format=False):
if (
"use_legacy_format"
in inspect.getfullargspec(tf.keras.activations.serialize).args
):
return tf.keras.activations.serialize(
activation, use_legacy_format=use_legacy_format
)
else:
return tf.keras.activations.serialize(activation)
| 12,146 | 31.565684 | 80 | py |
models | models-master/official/modeling/performance.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions and classes related to training performance."""
from absl import logging
import tensorflow as tf
def configure_optimizer(optimizer,
use_float16=False,
loss_scale=None,
use_graph_rewrite=None):
"""Configures optimizer object with performance options."""
if use_graph_rewrite is not None:
logging.warning('`use_graph_rewrite` is deprecated inside '
'`configure_optimizer`. Please remove the usage.')
del use_graph_rewrite
if use_float16:
if loss_scale in (None, 'dynamic'):
optimizer = tf.keras.mixed_precision.LossScaleOptimizer(optimizer)
else:
# loss_scale is a number. We interpret that as a fixed loss scale.
optimizer = tf.keras.mixed_precision.LossScaleOptimizer(
optimizer, dynamic=False, initial_scale=loss_scale)
return optimizer
def set_mixed_precision_policy(dtype, loss_scale=None):
"""Sets the global `tf.keras.mixed_precision.Policy`."""
# TODO(b/191894773): Remove loss_scale argument
assert loss_scale is None, (
'The loss_scale argument must be None. The argument exists for '
'historical reasons and will be removed soon.')
if dtype == tf.float16:
tf.keras.mixed_precision.set_global_policy('mixed_float16')
elif dtype == tf.bfloat16:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16')
elif dtype == tf.float32:
tf.keras.mixed_precision.set_global_policy('float32')
else:
raise ValueError('Unexpected dtype: %s' % dtype)
| 2,159 | 39 | 74 | py |
models | models-master/official/modeling/grad_utils_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for grad_utils."""
import tensorflow as tf
from official.modeling import grad_utils
from official.modeling import performance
class GradUtilsTest(tf.test.TestCase):
def test_minimize(self):
optimizer = tf.keras.optimizers.SGD(0.1)
with tf.GradientTape() as tape:
model = tf.keras.layers.Dense(2)
outputs = model(tf.zeros((2, 2), tf.float32))
loss = tf.reduce_mean(outputs)
grad_utils.minimize_using_explicit_allreduce(tape, optimizer, loss,
model.trainable_variables)
def test_minimize_fp16(self):
optimizer = performance.configure_optimizer(
tf.keras.optimizers.SGD(0.1), use_float16=True)
performance.set_mixed_precision_policy(tf.float16)
with tf.GradientTape() as tape:
model = tf.keras.layers.Dense(2)
outputs = model(tf.zeros((2, 2), tf.float16))
loss = tf.reduce_mean(outputs)
grad_utils.minimize_using_explicit_allreduce(tape, optimizer, loss,
model.trainable_variables)
# Test other fp16 settings.
def _clip_by_global_norm(grads_and_vars):
grads, tvars = list(zip(*grads_and_vars))
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
return zip(grads, tvars)
with tf.GradientTape() as tape:
model = tf.keras.layers.Dense(2)
outputs = model(tf.zeros((2, 2), tf.float16))
loss = tf.reduce_mean(outputs)
optimizer = performance.configure_optimizer(
tf.keras.optimizers.SGD(0.1), use_float16=True, loss_scale=128)
grad_utils.minimize_using_explicit_allreduce(
tape,
optimizer,
loss,
model.trainable_variables,
pre_allreduce_callbacks=[_clip_by_global_norm],
post_allreduce_callbacks=[_clip_by_global_norm])
def test_set_mixed_precision_policy(self):
performance.set_mixed_precision_policy(tf.float16)
performance.set_mixed_precision_policy(tf.bfloat16)
performance.set_mixed_precision_policy(tf.float32)
with self.assertRaises(ValueError):
performance.set_mixed_precision_policy(tf.int32)
if __name__ == '__main__':
tf.test.main()
| 2,786 | 34.730769 | 75 | py |
models | models-master/official/modeling/grad_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Some gradient util functions to help users writing custom training loop."""
from absl import logging
import tensorflow as tf
def _filter_grads(grads_and_vars):
"""Filter out iterable with grad equal to None."""
grads_and_vars = tuple(grads_and_vars)
if not grads_and_vars:
return grads_and_vars
filtered = []
vars_with_empty_grads = []
for grad, var in grads_and_vars:
if grad is None:
vars_with_empty_grads.append(var)
else:
filtered.append((grad, var))
filtered = tuple(filtered)
if not filtered:
raise ValueError("No gradients provided for any variable: %s." %
([v.name for _, v in grads_and_vars],))
if vars_with_empty_grads:
logging.warning(
("Gradients do not exist for variables %s when minimizing the loss."),
([v.name for v in vars_with_empty_grads]))
return filtered
def _filter_and_allreduce_gradients(grads_and_vars,
allreduce_precision="float32",
bytes_per_pack=0):
"""Filter None grads and then allreduce gradients in specified precision.
This utils function is used when users intent to explicitly allreduce
gradients and customize gradients operations before and after allreduce.
The allreduced gradients are then passed to optimizer.apply_gradients(
experimental_aggregate_gradients=False).
Args:
grads_and_vars: gradients and variables pairs.
allreduce_precision: Whether to allreduce gradients in float32 or float16.
bytes_per_pack: A non-negative integer. Breaks collective operations into
packs of certain size. If it's zero, all gradients are in one pack.
Returns:
pairs of allreduced non-None gradients and variables.
"""
filtered_grads_and_vars = _filter_grads(grads_and_vars)
(grads, variables) = zip(*filtered_grads_and_vars)
if allreduce_precision == "float16":
grads = [tf.cast(grad, "float16") for grad in grads]
hints = tf.distribute.experimental.CommunicationOptions(
bytes_per_pack=bytes_per_pack)
allreduced_grads = tf.distribute.get_strategy( # pylint: disable=protected-access
).extended._replica_ctx_all_reduce(tf.distribute.ReduceOp.SUM, grads, hints)
if allreduce_precision == "float16":
allreduced_grads = [tf.cast(grad, "float32") for grad in allreduced_grads]
return allreduced_grads, variables
def _run_callbacks(callbacks, grads_and_vars):
for callback in callbacks:
grads_and_vars = callback(grads_and_vars)
return grads_and_vars
def minimize_using_explicit_allreduce(tape,
optimizer,
loss,
trainable_variables,
pre_allreduce_callbacks=None,
post_allreduce_callbacks=None,
allreduce_bytes_per_pack=0):
"""Minimizes loss for one step by updating `trainable_variables`.
Minimizes loss for one step by updating `trainable_variables`.
This explicitly performs gradient allreduce, instead of relying on implicit
allreduce in optimizer.apply_gradients(). If training using FP16 mixed
precision, explicit allreduce will aggregate gradients in FP16 format.
For TPU and GPU training using FP32, explicit allreduce will aggregate
gradients in FP32 format.
Args:
tape: An instance of `tf.GradientTape`.
optimizer: An instance of `tf.keras.optimizers.Optimizer`.
loss: the loss tensor.
trainable_variables: A list of model Variables.
pre_allreduce_callbacks: A list of callback functions that takes gradients
and model variables pairs as input, manipulate them, and returns a new
gradients and model variables pairs. The callback functions will be
invoked in the list order and before gradients are allreduced. With
mixed precision training, the pre_allreduce_allbacks will be applied on
scaled_gradients. Default is no callbacks.
post_allreduce_callbacks: A list of callback functions that takes
gradients and model variables pairs as input, manipulate them, and
returns a new gradients and model variables paris. The callback
functions will be invoked in the list order and right before gradients
are applied to variables for updates. Default is no callbacks.
allreduce_bytes_per_pack: A non-negative integer. Breaks collective
operations into packs of certain size. If it's zero, all gradients are
in one pack.
"""
if isinstance(optimizer,
tf.keras.mixed_precision.LossScaleOptimizer):
# FP16 GPU code path
with tape:
scaled_loss = optimizer.get_scaled_loss(loss)
scaled_grads = tape.gradient(scaled_loss, trainable_variables)
grads_and_vars = zip(scaled_grads, trainable_variables)
if pre_allreduce_callbacks:
grads_and_vars = _run_callbacks(pre_allreduce_callbacks, grads_and_vars)
(allreduced_scaled_grads,
filtered_training_vars) = _filter_and_allreduce_gradients(
grads_and_vars,
allreduce_precision="float16",
bytes_per_pack=allreduce_bytes_per_pack)
allreduced_unscaled_grads = optimizer.get_unscaled_gradients(
allreduced_scaled_grads)
grads_and_vars = zip(allreduced_unscaled_grads, filtered_training_vars)
else:
# TPU or FP32 GPU code path
grads = tape.gradient(loss, trainable_variables)
grads_and_vars = zip(grads, trainable_variables)
if pre_allreduce_callbacks:
grads_and_vars = _run_callbacks(pre_allreduce_callbacks, grads_and_vars)
(allreduced_grads,
filtered_training_vars) = _filter_and_allreduce_gradients(
grads_and_vars,
allreduce_precision="float32",
bytes_per_pack=allreduce_bytes_per_pack)
grads_and_vars = zip(allreduced_grads, filtered_training_vars)
if post_allreduce_callbacks:
grads_and_vars = _run_callbacks(post_allreduce_callbacks, grads_and_vars)
optimizer.apply_gradients(
grads_and_vars, experimental_aggregate_gradients=False)
| 6,730 | 43.282895 | 84 | py |
models | models-master/official/modeling/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/modeling/tf_utils_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_utils."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.modeling import tf_utils
def all_strategy_combinations():
return combinations.combine(
strategy=[
strategy_combinations.cloud_tpu_strategy,
# TODO(b/285797201):disable multi-gpu tests due to hanging.
# strategy_combinations.mirrored_strategy_with_two_gpus,
],
mode='eager',
)
class TFUtilsTest(tf.test.TestCase, parameterized.TestCase):
@combinations.generate(all_strategy_combinations())
def test_cross_replica_concat(self, strategy):
num_cores = strategy.num_replicas_in_sync
shape = (2, 3, 4)
def concat(axis):
@tf.function
def function():
replica_value = tf.fill(shape, tf_utils.get_replica_id())
return tf_utils.cross_replica_concat(replica_value, axis=axis)
return function
def expected(axis):
values = [np.full(shape, i) for i in range(num_cores)]
return np.concatenate(values, axis=axis)
per_replica_results = strategy.run(concat(axis=0))
replica_0_result = per_replica_results.values[0].numpy()
for value in per_replica_results.values[1:]:
self.assertAllClose(value.numpy(), replica_0_result)
self.assertAllClose(replica_0_result, expected(axis=0))
replica_0_result = strategy.run(concat(axis=1)).values[0].numpy()
self.assertAllClose(replica_0_result, expected(axis=1))
replica_0_result = strategy.run(concat(axis=2)).values[0].numpy()
self.assertAllClose(replica_0_result, expected(axis=2))
@combinations.generate(all_strategy_combinations())
def test_cross_replica_concat_gradient(self, strategy):
num_cores = strategy.num_replicas_in_sync
shape = (10, 5)
@tf.function
def function():
replica_value = tf.random.normal(shape)
with tf.GradientTape() as tape:
tape.watch(replica_value)
concat_value = tf_utils.cross_replica_concat(replica_value, axis=0)
output = tf.reduce_sum(concat_value)
return tape.gradient(output, replica_value)
per_replica_gradients = strategy.run(function)
for gradient in per_replica_gradients.values:
self.assertAllClose(gradient, num_cores * tf.ones(shape))
@parameterized.parameters(('relu', True), ('relu', False),
('leaky_relu', False), ('leaky_relu', True),
('mish', True), ('mish', False), ('gelu', True))
def test_get_activations(self, name, use_keras_layer):
fn = tf_utils.get_activation(name, use_keras_layer)
self.assertIsNotNone(fn)
@combinations.generate(all_strategy_combinations())
def test_get_leaky_relu_layer(self, strategy):
@tf.function
def forward(x):
fn = tf_utils.get_activation(
'leaky_relu', use_keras_layer=True, alpha=0.1)
return strategy.run(fn, args=(x,)).values[0]
got = forward(tf.constant([-1]))
self.assertAllClose(got, tf.constant([-0.1]))
if __name__ == '__main__':
tf.test.main()
| 3,766 | 33.559633 | 76 | py |
models | models-master/official/modeling/privacy/ops_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ops."""
from unittest import mock
import tensorflow as tf
from official.modeling.privacy import ops
class OpsTest(tf.test.TestCase):
def test_clip_l2_norm(self):
x = tf.constant([4.0, 3.0])
y = tf.constant([[12.0]])
tensors = [(x, x), (y, y)]
clipped = ops.clip_l2_norm(tensors, 1.0)
for a, b in zip(clipped, tensors):
self.assertAllClose(a[0], b[0] / 13.0) # sqrt(4^2 + 3^2 + 12 ^3) = 13
self.assertAllClose(a[1], b[1])
@mock.patch.object(tf.random,
'normal',
autospec=True)
def test_add_noise(self, mock_random):
x = tf.constant([0.0, 0.0])
y = tf.constant([[0.0]])
tensors = [(x, x), (y, y)]
mock_random.side_effect = [tf.constant([1.0, 1.0]), tf.constant([[1.0]])]
added = ops.add_noise(tensors, 10.0)
for a, b in zip(added, tensors):
self.assertAllClose(a[0], b[0] + 1.0)
self.assertAllClose(a[1], b[1])
_, kwargs = mock_random.call_args
self.assertEqual(kwargs['stddev'], 10.0)
if __name__ == '__main__':
tf.test.main()
| 1,684 | 30.792453 | 77 | py |
models | models-master/official/modeling/privacy/configs_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for configs."""
import tensorflow as tf
from official.modeling.privacy import configs
class ConfigsTest(tf.test.TestCase):
def test_clipping_norm_default(self):
clipping_norm = configs.DifferentialPrivacyConfig().clipping_norm
self.assertEqual(100000000.0, clipping_norm)
def test_noise_multiplier_default(self):
noise_multiplier = configs.DifferentialPrivacyConfig().noise_multiplier
self.assertEqual(0.0, noise_multiplier)
def test_config(self):
dp_config = configs.DifferentialPrivacyConfig(
clipping_norm=1.0,
noise_multiplier=1.0,
)
self.assertEqual(1.0, dp_config.clipping_norm)
self.assertEqual(1.0, dp_config.noise_multiplier)
if __name__ == '__main__':
tf.test.main()
| 1,359 | 31.380952 | 75 | py |
models | models-master/official/modeling/privacy/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/modeling/privacy/ops.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ops for differential privacy (gradient) transforms."""
from typing import List, Tuple
import warnings
import tensorflow as tf
def clip_l2_norm(grads_vars: List[Tuple[tf.Tensor, tf.Tensor]],
l2_norm_clip: float) -> List[Tuple[tf.Tensor, tf.Tensor]]:
"""DEPRECATED Clip gradients by global norm.
Args:
grads_vars: List of tuple of gradient and its corresponding variables
l2_norm_clip: Float for differential privacy norm
Returns:
List of clipped gradients and its corresponding variables
"""
warnings.warn("`clip_l2_norm` deprecated.",
DeprecationWarning)
gradients = []
variables = []
for (g, v) in grads_vars:
gradients.append(g)
variables.append(v)
clipped_gradients = tf.clip_by_global_norm(gradients, l2_norm_clip)[0]
return list(zip(clipped_gradients, variables))
def add_noise(grads_vars: List[Tuple[tf.Tensor, tf.Tensor]],
noise_stddev: float) -> List[Tuple[tf.Tensor, tf.Tensor]]:
"""DEPRECATED Add noise to gradients.
Args:
grads_vars: List of tuple of gradient and its corresponding variables
noise_stddev: Noise multiplier
Returns:
List of noised gradients and its corresponding variables
"""
warnings.warn("`add_noise` deprecated.", DeprecationWarning)
ret = []
for (g, v) in grads_vars:
noise = tf.random.normal(tf.shape(g), stddev=noise_stddev)
ret.append((g + noise, v))
return ret
| 2,044 | 30.953125 | 75 | py |
models | models-master/official/modeling/privacy/configs.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configs for differential privacy."""
import dataclasses
from official.modeling.hyperparams import base_config
@dataclasses.dataclass
class DifferentialPrivacyConfig(base_config.Config):
# Applied to the gradients
# Setting to a large number so nothing is clipped.
clipping_norm: float = 100000000.0 # 10^9
noise_multiplier: float = 0.0
| 960 | 34.592593 | 74 | py |
models | models-master/official/modeling/fast_training/progressive/train_lib_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the progressive train_lib."""
import os
from absl import flags
from absl.testing import parameterized
import dataclasses
import orbit
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.common import flags as tfm_flags
# pylint: disable=unused-import
from official.common import registry_imports
# pylint: enable=unused-import
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling import optimization
from official.modeling.hyperparams import params_dict
from official.modeling.fast_training.progressive import policies
from official.modeling.fast_training.progressive import train_lib
from official.modeling.fast_training.progressive import trainer as prog_trainer_lib
from official.utils.testing import mock_task
FLAGS = flags.FLAGS
tfm_flags.define_flags()
@dataclasses.dataclass
class ProgTaskConfig(cfg.TaskConfig):
pass
@task_factory.register_task_cls(ProgTaskConfig)
class ProgMockTask(policies.ProgressivePolicy, mock_task.MockTask):
"""Progressive task for testing."""
def __init__(self, params: cfg.TaskConfig, logging_dir: str = None):
mock_task.MockTask.__init__(
self, params=params, logging_dir=logging_dir)
policies.ProgressivePolicy.__init__(self)
def num_stages(self):
return 2
def num_steps(self, stage_id):
return 2 if stage_id == 0 else 4
def get_model(self, stage_id, old_model=None):
del stage_id, old_model
return self.build_model()
def get_optimizer(self, stage_id):
"""Build optimizer for each stage."""
params = optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 0.01,
'end_learning_rate': 0.0,
'power': 1.0,
'decay_steps': 10,
},
},
'warmup': {
'polynomial': {
'power': 1,
'warmup_steps': 2,
},
'type': 'polynomial',
}
})
opt_factory = optimization.OptimizerFactory(params)
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
return optimizer
def get_train_dataset(self, stage_id):
del stage_id
strategy = tf.distribute.get_strategy()
return orbit.utils.make_distributed_dataset(
strategy, self.build_inputs, None)
def get_eval_dataset(self, stage_id):
del stage_id
strategy = tf.distribute.get_strategy()
return orbit.utils.make_distributed_dataset(
strategy, self.build_inputs, None)
class TrainTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(TrainTest, self).setUp()
self._test_config = {
'trainer': {
'checkpoint_interval': 10,
'steps_per_loop': 10,
'summary_interval': 10,
'train_steps': 10,
'validation_steps': 5,
'validation_interval': 10,
'continuous_eval_timeout': 1,
'optimizer_config': {
'optimizer': {
'type': 'sgd',
},
'learning_rate': {
'type': 'constant'
}
}
},
}
@combinations.generate(
combinations.combine(
distribution_strategy=[
strategy_combinations.default_strategy,
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
flag_mode=['train', 'eval', 'train_and_eval'],
run_post_eval=[True, False]))
def test_end_to_end(self, distribution_strategy, flag_mode, run_post_eval):
model_dir = self.get_temp_dir()
experiment_config = cfg.ExperimentConfig(
trainer=prog_trainer_lib.ProgressiveTrainerConfig(),
task=ProgTaskConfig())
experiment_config = params_dict.override_params_dict(
experiment_config, self._test_config, is_strict=False)
with distribution_strategy.scope():
task = task_factory.get_task(experiment_config.task,
logging_dir=model_dir)
_, logs = train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=flag_mode,
params=experiment_config,
model_dir=model_dir,
run_post_eval=run_post_eval)
if run_post_eval:
self.assertNotEmpty(logs)
else:
self.assertEmpty(logs)
if flag_mode == 'eval':
return
self.assertNotEmpty(
tf.io.gfile.glob(os.path.join(model_dir, 'checkpoint')))
# Tests continuous evaluation.
_, logs = train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode='continuous_eval',
params=experiment_config,
model_dir=model_dir,
run_post_eval=run_post_eval)
print(logs)
if __name__ == '__main__':
tf.test.main()
| 5,745 | 30.228261 | 83 | py |
models | models-master/official/modeling/fast_training/progressive/utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util classes and functions."""
from absl import logging
import tensorflow as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.trackable import autotrackable
class VolatileTrackable(autotrackable.AutoTrackable):
"""A util class to keep Trackables that might change instances."""
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def reassign_trackable(self, **kwargs):
for k, v in kwargs.items():
delattr(self, k) # untrack this object
setattr(self, k, v) # track the new object
class CheckpointWithHooks(tf.train.Checkpoint):
"""Same as tf.train.Checkpoint but supports hooks.
In progressive training, use this class instead of tf.train.Checkpoint.
Since the network architecture changes during progressive training, we need to
prepare something (like switch to the correct architecture) before loading the
checkpoint. This class supports a hook that will be executed before checkpoint
loading.
"""
def __init__(self, before_load_hook, **kwargs):
self._before_load_hook = before_load_hook
super(CheckpointWithHooks, self).__init__(**kwargs)
# override
def read(self, save_path, options=None):
self._before_load_hook(save_path)
logging.info('Ran before_load_hook.')
super(CheckpointWithHooks, self).read(save_path=save_path, options=options)
| 1,989 | 33.912281 | 80 | py |
models | models-master/official/modeling/fast_training/progressive/policies.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base ProgressivePolicy definition for progressive training.
To write a progressive model, subclass ProgressivePolicy and implement its
abstract methods to handle each training stage.
"""
import abc
import dataclasses
from typing import Any, Mapping
from absl import logging
import six
import tensorflow as tf
from official.common import streamz_counters
from official.modeling.fast_training.progressive import utils
from official.modeling.hyperparams import base_config
@dataclasses.dataclass
class ProgressiveConfig(base_config.Config):
pass
@six.add_metaclass(abc.ABCMeta)
class ProgressivePolicy:
"""The APIs for handling progressive training stages.
Attributes:
cur_model: The model for the current progressive training stage.
cur_train_dataset: The train dataset function for the current stage.
cur_eval_dataset: The eval dataset function for the current stage.
cur_optimizer: The optimizer for the current stage.
cur_checkpoint_items: Items to be saved in and restored from checkpoints,
for the progressive trainer.
is_last_stage: Whether it is currently in the last stage.
Interfaces:
is_stage_advancing: Returns if progressive training is advancing to the
next stage.
update_pt_stage: Update progressive training stage.
"""
def __init__(self):
"""Initialize stage policy."""
self._cur_train_dataset = None
self._cur_eval_dataset = None
self._volatiles = utils.VolatileTrackable(optimizer=None, model=None)
stage_id = 0
self._stage_id = tf.Variable(
stage_id,
trainable=False,
dtype=tf.int64,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,
shape=[])
self._volatiles.reassign_trackable(
optimizer=self.get_optimizer(stage_id),
model=self.get_model(stage_id, old_model=None)) # pytype: disable=wrong-arg-types # typed-keras
streamz_counters.progressive_policy_creation_counter.get_cell(
).increase_by(1)
def compute_stage_id(self, global_step: int) -> int:
for stage_id in range(self.num_stages()):
global_step -= self.num_steps(stage_id)
if global_step < 0:
return stage_id
logging.error('Global step %d found no matching progressive stages. '
'Default to the last stage.', global_step)
return self.num_stages() - 1
@abc.abstractmethod
def num_stages(self) -> int:
"""Return the total number of progressive stages."""
pass
@abc.abstractmethod
def num_steps(self, stage_id: int) -> int:
"""Return the total number of steps in this stage."""
pass
@abc.abstractmethod
def get_model(self,
stage_id: int,
old_model: tf.keras.Model = None) -> tf.keras.Model: # pytype: disable=annotation-type-mismatch # typed-keras
"""Return model for this stage. For initialization, `old_model` = None."""
pass
@abc.abstractmethod
def get_optimizer(self, stage_id: int) -> tf.keras.optimizers.Optimizer:
"""Return optimizer for this stage."""
pass
@abc.abstractmethod
def get_train_dataset(self, stage_id: int) -> tf.data.Dataset:
"""Return training Dataset for this stage."""
pass
@abc.abstractmethod
def get_eval_dataset(self, stage_id: int) -> tf.data.Dataset:
"""Return evaluation Dataset for this stage."""
pass
@property
def cur_model(self) -> tf.keras.Model:
return self._volatiles.model
@property
def cur_train_dataset(self) -> tf.data.Dataset:
if self._cur_train_dataset is None:
self._cur_train_dataset = self.get_train_dataset(self._stage_id.numpy())
return self._cur_train_dataset
@property
def cur_eval_dataset(self) -> tf.data.Dataset:
if self._cur_eval_dataset is None:
self._cur_eval_dataset = self.get_eval_dataset(self._stage_id.numpy())
return self._cur_eval_dataset
@property
def cur_optimizer(self) -> tf.keras.optimizers.Optimizer:
return self._volatiles.optimizer
@property
def is_last_stage(self) -> bool:
stage_id = self._stage_id.numpy()
return stage_id >= self.num_stages() - 1
@property
def cur_checkpoint_items(self) -> Mapping[str, Any]:
return dict(stage_id=self._stage_id, volatiles=self._volatiles)
def is_stage_advancing(self, global_step: int) -> bool:
old_stage_id = self._stage_id.numpy()
new_stage_id = self.compute_stage_id(global_step)
return old_stage_id != new_stage_id
def update_pt_stage(self, global_step: int, pass_old_model=True) -> None:
"""Update progressive training internal status.
Call this after a training loop ends.
Args:
global_step: an integer scalar of the current global step.
pass_old_model: whether to pass the old_model to get_model() function.
This is set to False if the old_model is irrelevant (e.g, just a default
model from stage 0).
"""
old_stage_id = self._stage_id.numpy()
new_stage_id = self.compute_stage_id(global_step)
logging.info('Switching stage from %d to %d', old_stage_id, new_stage_id)
# Update stage id.
self._stage_id.assign(new_stage_id)
# Update dataset function.
self._cur_train_dataset = None
self._cur_eval_dataset = None
# Update optimizer and model.
new_optimizer = self.get_optimizer(new_stage_id)
self._volatiles.reassign_trackable(optimizer=new_optimizer)
new_model = self.get_model(
new_stage_id, old_model=self.cur_model if pass_old_model else None)
self._volatiles.reassign_trackable(model=new_model)
| 6,138 | 33.296089 | 127 | py |
models | models-master/official/modeling/fast_training/progressive/train_lib.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFM progressive training driver library.
Compared to the common training driver, the only difference is that we use
prog_trainer_lib.ProgressiveTrainer instead of the base trainer.
"""
# pytype: disable=attribute-error
import os
from typing import Any, Mapping, Tuple
# Import libraries
from absl import logging
import orbit
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions
from official.core import train_lib as base_train_lib
from official.modeling.fast_training.progressive import trainer as prog_trainer_lib
def run_experiment(distribution_strategy: tf.distribute.Strategy,
task: base_task.Task,
mode: str,
params: config_definitions.ExperimentConfig,
model_dir: str,
run_post_eval: bool = False,
save_summary: bool = True) \
-> Tuple[tf.keras.Model, Mapping[str, Any]]:
"""Runs train/eval configured by the experiment params.
Args:
distribution_strategy: A distribution distribution_strategy.
task: A Task instance.
mode: A 'str', specifying the mode. Can be 'train', 'eval', 'train_and_eval'
or 'continuous_eval'.
params: ExperimentConfig instance.
model_dir: A 'str', a path to store model checkpoints and summaries.
run_post_eval: Whether to run post eval once after training, metrics logs
are returned.
save_summary: Whether to save train and validation summary.
Returns:
A 2-tuple of (model, eval_logs).
model: `tf.keras.Model` instance.
eval_logs: returns eval metrics logs when run_post_eval is set to True,
otherwise, returns {}.
"""
with distribution_strategy.scope():
logging.info('Running progressive trainer.')
trainer = prog_trainer_lib.ProgressiveTrainer(
params, task, ckpt_dir=model_dir,
train='train' in mode,
evaluate=('eval' in mode) or run_post_eval,
checkpoint_exporter=base_train_lib.maybe_create_best_ckpt_exporter(
params, model_dir))
if trainer.checkpoint:
checkpoint_manager = tf.train.CheckpointManager(
trainer.checkpoint,
directory=model_dir,
max_to_keep=params.trainer.max_to_keep,
step_counter=trainer.global_step,
checkpoint_interval=params.trainer.checkpoint_interval,
init_fn=trainer.initialize)
else:
checkpoint_manager = None
controller = orbit.Controller(
strategy=distribution_strategy,
trainer=trainer if 'train' in mode else None,
evaluator=trainer,
global_step=trainer.global_step,
steps_per_loop=params.trainer.steps_per_loop,
checkpoint_manager=checkpoint_manager,
summary_dir=os.path.join(model_dir, 'train') if (save_summary) else None,
eval_summary_dir=os.path.join(model_dir, 'validation') if
(save_summary) else None,
summary_interval=params.trainer.summary_interval if
(save_summary) else None)
logging.info('Starts to execute mode: %s', mode)
with distribution_strategy.scope():
if mode == 'train':
controller.train(steps=params.trainer.train_steps)
elif mode == 'train_and_eval':
controller.train_and_evaluate(
train_steps=params.trainer.train_steps,
eval_steps=params.trainer.validation_steps,
eval_interval=params.trainer.validation_interval)
elif mode == 'eval':
controller.evaluate(steps=params.trainer.validation_steps)
elif mode == 'continuous_eval':
def timeout_fn():
if trainer.global_step.numpy() >= params.trainer.train_steps:
return True
return False
controller.evaluate_continuously(
steps=params.trainer.validation_steps,
timeout=params.trainer.continuous_eval_timeout,
timeout_fn=timeout_fn)
else:
raise NotImplementedError('The mode is not implemented: %s' % mode)
if run_post_eval:
with distribution_strategy.scope():
return trainer.model, trainer.evaluate(
tf.convert_to_tensor(params.trainer.validation_steps))
else:
return trainer.model, {}
| 4,740 | 36.330709 | 83 | py |
models | models-master/official/modeling/fast_training/progressive/trainer_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the progressive trainer."""
# pylint: disable=g-direct-tensorflow-import
import os
from absl.testing import parameterized
import orbit
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.core import config_definitions as cfg
from official.modeling import optimization
from official.modeling.fast_training.progressive import policies
from official.modeling.fast_training.progressive import trainer as trainer_lib
from official.nlp.configs import bert
from official.utils.testing import mock_task
def all_strategy_combinations():
return combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],)
def get_exp_config():
return cfg.ExperimentConfig(
task=cfg.TaskConfig(
model=bert.PretrainerConfig()),
trainer=trainer_lib.ProgressiveTrainerConfig(
export_checkpoint=True,
export_checkpoint_interval=1,
export_only_final_stage_ckpt=False))
class TestPolicy(policies.ProgressivePolicy, mock_task.MockTask):
"""Just for testing purposes."""
def __init__(self, strategy, task_config, change_train_dataset=True):
self._strategy = strategy
self._change_train_dataset = change_train_dataset
self._my_train_dataset = None
mock_task.MockTask.__init__(self, params=task_config, logging_dir=None)
policies.ProgressivePolicy.__init__(self)
def num_stages(self) -> int:
return 2
def num_steps(self, stage_id: int) -> int:
return 2 if stage_id == 0 else 4
def get_model(self,
stage_id: int,
old_model: tf.keras.Model) -> tf.keras.Model:
del stage_id, old_model
return self.build_model()
def get_optimizer(self, stage_id: int) -> tf.keras.optimizers.Optimizer:
optimizer_type = 'sgd' if stage_id == 0 else 'adamw'
optimizer_config = cfg.OptimizationConfig({
'optimizer': {'type': optimizer_type},
'learning_rate': {'type': 'constant'}})
opt_factory = optimization.OptimizerFactory(optimizer_config)
return opt_factory.build_optimizer(opt_factory.build_learning_rate())
def get_train_dataset(self, stage_id: int) -> tf.data.Dataset:
if not self._change_train_dataset and self._my_train_dataset:
return self._my_train_dataset
if self._strategy:
self._my_train_dataset = orbit.utils.make_distributed_dataset(
self._strategy,
self._build_inputs,
stage_id)
else:
self._my_train_dataset = self._build_inputs(stage_id)
return self._my_train_dataset
def get_eval_dataset(self, stage_id: int) -> tf.data.Dataset:
if self._strategy:
return orbit.utils.make_distributed_dataset(
self._strategy,
self._build_inputs,
stage_id)
return self._build_inputs(stage_id)
def _build_inputs(self, stage_id):
def dummy_data(_):
batch_size = 2 if stage_id == 0 else 1
x = tf.zeros(shape=(batch_size, 2), dtype=tf.float32)
label = tf.zeros(shape=(batch_size, 1), dtype=tf.float32)
return x, label
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
return dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
class TrainerTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(TrainerTest, self).setUp()
self._config = get_exp_config()
def create_test_trainer(self, distribution, model_dir, change_train_dataset):
trainer = trainer_lib.ProgressiveTrainer(
self._config,
prog_task=TestPolicy(
distribution, self._config.task, change_train_dataset),
ckpt_dir=model_dir)
return trainer
@combinations.generate(all_strategy_combinations())
def test_checkpointing(self, distribution):
model_dir = self.get_temp_dir()
ckpt_file = os.path.join(model_dir, 'ckpt')
with distribution.scope():
trainer = self.create_test_trainer(distribution, model_dir, True)
self.assertFalse(trainer._task.is_last_stage)
trainer.train(tf.convert_to_tensor(4, dtype=tf.int32))
self.assertTrue(trainer._task.is_last_stage)
trainer.checkpoint.save(ckpt_file)
trainer = self.create_test_trainer(distribution, model_dir, True)
self.assertFalse(trainer._task.is_last_stage)
trainer.checkpoint.restore(ckpt_file + '-1')
self.assertTrue(trainer._task.is_last_stage)
@combinations.generate(all_strategy_combinations())
def test_train_dataset(self, distribution):
model_dir = self.get_temp_dir()
with distribution.scope():
trainer = self.create_test_trainer(distribution, model_dir, True)
# Using dataset of stage == 0
train_iter = tf.nest.map_structure(iter, trainer.train_dataset)
train_data = train_iter.next()[0]
if distribution.num_replicas_in_sync > 1:
train_data = train_data.values[0]
self.assertEqual(train_data.shape[0], 2)
trainer.train(tf.convert_to_tensor(4, dtype=tf.int32))
# Using dataset of stage == 1
train_iter = tf.nest.map_structure(iter, trainer.train_dataset)
train_data = train_iter.next()[0]
if distribution.num_replicas_in_sync > 1:
train_data = train_data.values[0]
self.assertEqual(train_data.shape[0], 1)
with self.assertRaises(SyntaxError):
trainer.train_dataset = None
@combinations.generate(all_strategy_combinations())
def test_train_dataset_no_switch(self, distribution):
model_dir = self.get_temp_dir()
with distribution.scope():
trainer = self.create_test_trainer(distribution, model_dir, False)
trainer.train(tf.convert_to_tensor(2, dtype=tf.int32))
# _train_iter is not reset since the dataset is not changed.
self.assertIsNotNone(trainer._train_iter)
with distribution.scope():
trainer = self.create_test_trainer(distribution, model_dir, True)
trainer.train(tf.convert_to_tensor(2, dtype=tf.int32))
# _train_iter is reset since the dataset changed.
self.assertIsNone(trainer._train_iter)
class TrainerWithMaskedLMTaskTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(TrainerWithMaskedLMTaskTest, self).setUp()
self._config = get_exp_config()
def create_test_trainer(self, distribution):
trainer = trainer_lib.ProgressiveTrainer(
self._config,
prog_task=TestPolicy(distribution, self._config.task),
ckpt_dir=self.get_temp_dir())
return trainer
@combinations.generate(all_strategy_combinations())
def test_trainer_train(self, distribution):
with distribution.scope():
trainer = self.create_test_trainer(distribution)
logs = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('training_loss', logs)
self.assertIn('learning_rate', logs)
@combinations.generate(all_strategy_combinations())
def test_trainer_validate(self, distribution):
with distribution.scope():
trainer = self.create_test_trainer(distribution)
logs = trainer.evaluate(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('validation_loss', logs)
self.assertEqual(logs['counter'], 5. * distribution.num_replicas_in_sync)
@combinations.generate(
combinations.combine(
mixed_precision_dtype=['float32', 'bfloat16', 'float16'],
loss_scale=[None, 'dynamic', 128, 256],
))
def test_configure_optimizer(self, mixed_precision_dtype, loss_scale):
config = cfg.ExperimentConfig(
task=cfg.TaskConfig(
model=bert.PretrainerConfig()),
runtime=cfg.RuntimeConfig(
mixed_precision_dtype=mixed_precision_dtype, loss_scale=loss_scale),
trainer=trainer_lib.ProgressiveTrainerConfig(
export_checkpoint=True,
export_checkpoint_interval=1,
export_only_final_stage_ckpt=False))
task = TestPolicy(None, config.task)
trainer = trainer_lib.ProgressiveTrainer(config, task, self.get_temp_dir())
if mixed_precision_dtype != 'float16':
self.assertIsInstance(
trainer.optimizer,
(tf.keras.optimizers.SGD, tf.keras.optimizers.legacy.SGD))
elif mixed_precision_dtype == 'float16' and loss_scale is None:
self.assertIsInstance(
trainer.optimizer,
(tf.keras.optimizers.SGD, tf.keras.optimizers.legacy.SGD))
metrics = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('training_loss', metrics)
if __name__ == '__main__':
tf.test.main()
| 9,288 | 37.226337 | 80 | py |
models | models-master/official/modeling/fast_training/progressive/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFM binary for the progressive trainer."""
from absl import app
from absl import flags
import gin
from official.common import distribute_utils
# pylint: disable=unused-import
from official.common import registry_imports
# pylint: enable=unused-import
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_utils
from official.modeling import performance
from official.modeling.fast_training.progressive import train_lib
FLAGS = flags.FLAGS
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu,
**params.runtime.model_parallelism())
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir)
train_utils.save_gin_config(FLAGS.mode, model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(main)
| 2,596 | 36.1 | 80 | py |
models | models-master/official/modeling/fast_training/progressive/trainer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Progressive Trainer implementation.
The trainer implements the Orbit `StandardTrainable` and
`StandardEvaluable` interfaces. Trainers inside this project should be
interchangable and independent on model architectures and tasks.
"""
import dataclasses
import os
from typing import Any, Optional
# Import libraries
from absl import logging
import gin
import orbit
import tensorflow as tf
from official.core import base_task
from official.core import base_trainer as trainer_lib
from official.core import config_definitions
from official.modeling.fast_training.progressive import policies
from official.modeling.fast_training.progressive import utils
ExperimentConfig = config_definitions.ExperimentConfig
@dataclasses.dataclass
class ProgressiveTrainerConfig(config_definitions.TrainerConfig):
"""Configuration for progressive trainer.
Attributes:
progressive: A task-specific config. Users can subclass ProgressiveConfig
and define any task-specific settings in their subclass.
export_checkpoint: A bool. Whether to export checkpoints in non-progressive
manner (without the volatiles wrapper) such that your down-stream tasks
can load checkpoints from a progressive trainer as if it is a regular
checkpoint.
export_checkpoint_interval: A bool. The number of steps between exporting
checkpoints. If None (by default), will use the same value as
TrainerConfig.checkpoint_interval.
export_max_to_keep: The maximum number of exported checkpoints to keep.
If None (by default), will use the same value as
TrainerConfig.max_to_keep.
export_only_final_stage_ckpt: A bool. Whether to just export checkpoints
during the final progressive training stage. In other words, whether to
not export small, partial models. In many cases, it is not meaningful to
finetune a small, partial model in down-stream tasks.
"""
progressive: Optional[policies.ProgressiveConfig] = None
export_checkpoint: bool = True
export_checkpoint_interval: Optional[int] = None
export_max_to_keep: Optional[int] = None
export_only_final_stage_ckpt: bool = True
@gin.configurable
class ProgressiveTrainer(trainer_lib.Trainer):
"""Implements the progressive trainer shared for TensorFlow models."""
def __init__(
self,
config: ExperimentConfig,
prog_task: base_task.Task, # also implemented ProgressivePolicy.
ckpt_dir: str = '',
train: bool = True,
evaluate: bool = True,
checkpoint_exporter: Any = None):
"""Initialize common trainer for TensorFlow models.
Args:
config: An `ExperimentConfig` instance specifying experiment config.
prog_task: An instance both implemented policies.ProgressivePolicy and
base_task.Task.
ckpt_dir: Checkpoint directory.
train: bool, whether or not this trainer will be used for training.
default to True.
evaluate: bool, whether or not this trainer will be used for evaluation.
default to True.
checkpoint_exporter: an object that has the `maybe_export_checkpoint`
interface.
"""
# Gets the current distribution strategy. If not inside any strategy scope,
# it gets a single-replica no-op strategy.
self._strategy = tf.distribute.get_strategy()
self._config = config
self._runtime_options = trainer_lib.get_runtime_options(config)
self._task = prog_task
# Directory for non-progressive checkpoint
self._export_ckpt_dir = os.path.join(ckpt_dir, 'exported_ckpts')
tf.io.gfile.makedirs(self._export_ckpt_dir)
self._export_ckpt_manager = None
# Receive other checkpoint export, e.g, best checkpoint exporter.
# TODO(lehou): unify the checkpoint exporting logic, although the default
# setting does not use checkpoint_exporter.
self._checkpoint_exporter = checkpoint_exporter
self._global_step = orbit.utils.create_global_step()
self._checkpoint = utils.CheckpointWithHooks(
before_load_hook=self._update_pt_stage_from_ckpt,
global_step=self.global_step,
**self._task.cur_checkpoint_items)
self._train_loss = tf.keras.metrics.Mean('training_loss', dtype=tf.float32)
self._validation_loss = tf.keras.metrics.Mean(
'validation_loss', dtype=tf.float32)
self._train_metrics = self.task.build_metrics(
training=True) + self.model.metrics
self._validation_metrics = self.task.build_metrics(
training=False) + self.model.metrics
if train:
orbit.StandardTrainer.__init__(
self,
None, # Manage train_dataset by ourselves, not by StandardTrainer.
options=orbit.StandardTrainerOptions(
use_tf_while_loop=config.trainer.train_tf_while_loop,
use_tf_function=config.trainer.train_tf_function))
if evaluate:
orbit.StandardEvaluator.__init__(
self,
None, # Manage train_dataset by ourselves, not by StandardEvaluator.
options=orbit.StandardEvaluatorOptions(
use_tf_function=config.trainer.eval_tf_function))
@property
def model(self):
return self._task.cur_model
@property
def optimizer(self):
return self._task.cur_optimizer
# override
@property
def train_dataset(self):
"""Overriding StandardTrainer.train_dataset."""
return self._task.cur_train_dataset
# override
@train_dataset.setter
def train_dataset(self, _):
raise SyntaxError('Please do not set train_dataset. Progressive training '
'relies on progressive policy to manager train dataset.')
# override
@property
def eval_dataset(self):
"""Overriding StandardEvaluator.eval_dataset."""
return self._task.cur_eval_dataset
# override
@eval_dataset.setter
def eval_dataset(self, _):
raise SyntaxError('Please do not set eval_dataset. Progressive training '
'relies on progressive policy to manager eval dataset.')
def train_loop_end(self):
"""See base class."""
logs = {}
for metric in self.train_metrics + [self.train_loss]:
logs[metric.name] = metric.result()
metric.reset_states()
if callable(self.optimizer.learning_rate):
logs['learning_rate'] = self.optimizer.learning_rate(
self.optimizer.iterations)
else:
logs['learning_rate'] = self.optimizer.learning_rate
self._maybe_export_non_progressive_checkpoint(self._export_ckpt_dir)
if self._task.is_stage_advancing(self.global_step.numpy()):
old_train_dataset = self.train_dataset
# Update progressive properties
self._task.update_pt_stage(self.global_step.numpy())
# Setting `self._train_loop_fn` and `self._eval_loop_fn` to None will
# rebuild the train and eval functions with the updated model.
self._train_loop_fn = None
self._eval_loop_fn = None
if self.train_dataset != old_train_dataset:
# Setting `self._train_iter` to None will rebuild the dataset iterator.
self._train_iter = None
# Setting `self._export_ckpt_manager` to None will rebuild the checkpoint
# for exporting.
self._export_ckpt_manager = None
return logs
def _update_pt_stage_from_ckpt(self, ckpt_file):
"""Update stage properties based on the global_step variable in a ckpt file.
Before loading variables from a checkpoint file, we need to go to the
correct stage and build corresponding model and optimizer, to make sure that
we retore variables of the right model and optimizer.
Args:
ckpt_file: Checkpoint file that will be restored/read from.
"""
if not ckpt_file:
return
ckpt = tf.train.Checkpoint(global_step=self.global_step)
ckpt.read(ckpt_file).expect_partial().assert_existing_objects_matched()
if self._task.is_stage_advancing(self.global_step.numpy()):
old_train_dataset = self.train_dataset
# Update progressive properties
self._task.update_pt_stage(self.global_step.numpy(), pass_old_model=False)
# Setting `self._train_loop_fn` and `self._eval_loop_fn` to None will
# rebuild the train and eval functions with the updated model.
self._train_loop_fn = None
self._eval_loop_fn = None
if self.train_dataset != old_train_dataset:
# Setting `self._train_iter` to None will rebuild the dataset iterator.
self._train_iter = None
# Setting `self._export_ckpt_manager` to None will rebuild the checkpoint
# for exporting.
self._export_ckpt_manager = None
def _maybe_export_non_progressive_checkpoint(self, export_ckpt_dir):
"""Export checkpoints in non-progressive format.
This basically removes the wrapping of self._task.cur_checkpoint_items
-- just save the model, optimizer, etc., directly.
The purpose is to let your down-stream tasks to use these checkpoints.
Args:
export_ckpt_dir: A str. folder of exported checkpoints.
"""
if not self.config.trainer.export_checkpoint:
logging.info('Not exporting checkpoints.')
return
if not self._task.is_last_stage and (
self.config.trainer.export_only_final_stage_ckpt):
logging.info('Not exporting checkpoints until the last stage.')
return
if self._export_ckpt_manager is None:
# Create a checkpoint object just now, to make sure we use
# progressive_policy.cur_model and progressive_policy.cur_optimizer of the
# current stage.
if hasattr(self.model, 'checkpoint_items'):
checkpoint_items = self.model.checkpoint_items
else:
checkpoint_items = {}
checkpoint = tf.train.Checkpoint(
global_step=self.global_step,
model=self.model,
optimizer=self.optimizer,
**checkpoint_items)
max_to_keep = self.config.trainer.export_max_to_keep or (
self.config.trainer.max_to_keep)
checkpoint_interval = self.config.trainer.export_checkpoint_interval or (
self.config.trainer.checkpoint_interval)
self._export_ckpt_manager = tf.train.CheckpointManager(
checkpoint,
directory=export_ckpt_dir,
checkpoint_name='ckpt',
step_counter=self.global_step,
max_to_keep=max_to_keep,
checkpoint_interval=checkpoint_interval,
)
# Make sure we export the last checkpoint.
last_checkpoint = (
self.global_step.numpy() == self._config.trainer.train_steps)
checkpoint_path = self._export_ckpt_manager.save(
checkpoint_number=self.global_step.numpy(),
check_interval=not last_checkpoint)
if checkpoint_path:
logging.info('Checkpoints exported: %s.', checkpoint_path)
| 11,293 | 37.284746 | 80 | py |
models | models-master/official/modeling/fast_training/experimental/tf2_utils_2x_wide.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stacking model horizontally."""
from absl import logging
import numpy as np
import tensorflow as tf
def expand_vector(v: np.ndarray) -> np.ndarray:
"""Expands a vector with batch dimensions.
Equivalent to expand_1_axis(v, epsilon=0.0, axis=-1)
Args:
v: A vector with shape [..., a].
Returns:
A vector with shape [..., 2 * a].
"""
return np.repeat(v, 2, axis=-1)
def expand_1_axis(w: np.ndarray,
epsilon: float,
axis: int) -> np.ndarray:
"""Expands either the first dimension or the last dimension of w.
If `axis = 0`, the following constraint will be satisfied:
matmul(x, w) ==
matmul(expand_vector(x), expand_1_axis(w, epsilon=0.1, axis=0))
If `axis = -1`, the following constraint will be satisfied if `epsilon = 0.0`:
expand_vector(matmul(x, w)) ==
2 * matmul(x, expand_1_axis(w, epsilon=0.0, axis=-1))
Args:
w: Numpy array of shape [a_0, a_1, ..., a_i-1, a_i].
epsilon: Symmetric Noise added to expanded tensor.
axis: Must be either 0 or -1.
Returns:
Expanded numpy array.
"""
assert axis in (0, -1), (
"Only support expanding the first or the last dimension. "
"Got: {}".format(axis))
rank = len(w.shape)
d_w = np.random.normal(np.zeros_like(w), np.fabs(w) * epsilon, w.shape)
d_w = np.repeat(d_w, 2, axis=axis)
sign_flip = np.array([1, -1])
for _ in range(rank - 1):
sign_flip = np.expand_dims(sign_flip, axis=-1 if axis == 0 else 0)
sign_flip = np.tile(sign_flip,
[w.shape[0]] + [1] * (rank - 2) + [w.shape[-1]])
d_w *= sign_flip
w_expand = (np.repeat(w, 2, axis=axis) + d_w) / 2
return w_expand
def expand_2_axes(w: np.ndarray,
epsilon: float) -> np.ndarray:
"""Expands the first dimension and the last dimension of w.
The following constraint will be satisfied:
expand_vector(matmul(x, w)) == matmul(expand_vector(x), expand_2_axes(w))
Args:
w: Numpy array of shape [a_0, a_1, ..., a_i-1, a_i].
epsilon: Symmetric Noise added to expanded tensor.
Returns:
Expanded numpy array.
"""
rank = len(w.shape)
d_w = np.random.normal(np.zeros_like(w), np.fabs(w) * epsilon, w.shape)
d_w = np.repeat(np.repeat(d_w, 2, axis=0), 2, axis=-1)
sign_flip = np.array([1, -1])
for _ in range(rank - 1):
sign_flip = np.expand_dims(sign_flip, axis=-1)
sign_flip = np.tile(sign_flip,
[w.shape[0]] + [1] * (rank - 2) + [w.shape[-1] * 2])
d_w *= sign_flip
w_expand = (np.repeat(np.repeat(w, 2, axis=0), 2, axis=-1) + d_w) / 2
return w_expand
def var_to_var(var_from: tf.Variable,
var_to: tf.Variable,
epsilon: float):
"""Expands a variable to another variable.
Assume the shape of `var_from` is (a, b, ..., y, z), the shape of `var_to`
can be (a, ..., z * 2), (a * 2, ..., z * 2), (a * 2, ..., z)
If the shape of `var_to` is (a, ..., 2 * z):
For any x, tf.matmul(x, var_to) ~= expand_vector(tf.matmul(x, var_from)) / 2
Not that there will be noise added to the left hand side, if epsilon != 0.
If the shape of `var_to` is (2 * a, ..., z):
For any x, tf.matmul(expand_vector(x), var_to) == tf.matmul(x, var_from)
If the shape of `var_to` is (2 * a, ..., 2 * z):
For any x, tf.matmul(expand_vector(x), var_to) ==
expand_vector(tf.matmul(expand_vector(x), var_from))
Args:
var_from: input variable to expand.
var_to: output variable.
epsilon: the noise ratio that will be added, when splitting `var_from`.
"""
shape_from = var_from.shape
shape_to = var_to.shape
if shape_from == shape_to:
var_to.assign(var_from)
elif len(shape_from) == 1 and len(shape_to) == 1:
var_to.assign(expand_vector(var_from.numpy()))
elif shape_from[0] * 2 == shape_to[0] and shape_from[-1] == shape_to[-1]:
var_to.assign(expand_1_axis(var_from.numpy(), epsilon=epsilon, axis=0))
elif shape_from[0] == shape_to[0] and shape_from[-1] * 2 == shape_to[-1]:
var_to.assign(expand_1_axis(var_from.numpy(), epsilon=epsilon, axis=-1))
elif shape_from[0] * 2 == shape_to[0] and shape_from[-1] * 2 == shape_to[-1]:
var_to.assign(expand_2_axes(var_from.numpy(), epsilon=epsilon))
else:
raise ValueError("Shape not supported, {}, {}".format(shape_from, shape_to))
def model_to_model_2x_wide(model_from: tf.Module,
model_to: tf.Module,
epsilon: float = 0.1):
"""Expands a model to a wider version.
Also makes sure that the output of the model is not changed after expanding.
For example:
```
model_narrow = tf.keras.Sequential()
model_narrow.add(tf.keras.Input(shape=(3,)))
model_narrow.add(tf.keras.layers.Dense(4))
model_narrow.add(tf.keras.layers.Dense(1))
model_wide = tf.keras.Sequential()
model_wide.add(tf.keras.Input(shape=(6,)))
model_wide.add(tf.keras.layers.Dense(8))
model_wide.add(tf.keras.layers.Dense(1))
model_to_model_2x_wide(model_narrow, model_wide)
assert model_narrow([[1, 2, 3]]) == model_wide([[1, 1, 2, 2, 3, 3]])
```
We assume that `model_from` and `model_to` has the same architecture and only
widths of them differ.
Args:
model_from: input model to expand.
model_to: output model whose variables will be assigned expanded values
according to `model_from`.
epsilon: the noise ratio that will be added, when splitting `var_from`.
"""
for w_from, w_to in zip(model_from.trainable_variables,
model_to.trainable_variables):
logging.info("expanding %s %s to %s %s",
w_from.name, w_from.shape, w_to.name, w_to.shape)
var_to_var(w_from, w_to, epsilon=epsilon)
| 6,307 | 32.73262 | 80 | py |
models | models-master/official/modeling/fast_training/experimental/tf2_utils_2x_wide_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf2_utils_2x_wide."""
import numpy as np
import tensorflow as tf
from official.modeling.fast_training.experimental import tf2_utils_2x_wide
class Tf2Utils2XWideTest(tf.test.TestCase):
def test_expand_vector(self):
x = np.array([1, 2])
self.assertAllClose(tf2_utils_2x_wide.expand_vector(x),
np.array([1, 1, 2, 2]))
def test_expand_matrix(self):
x = np.array([[1, 2], [3, 4]])
x = tf2_utils_2x_wide.expand_2_axes(x, epsilon=0.1)
self.assertAllClose(x[0, :] + x[1, :], np.array([1, 1, 2, 2]))
self.assertAllClose(x[2, :] + x[3, :], np.array([3, 3, 4, 4]))
def test_expand_matrix_axis_0(self):
x = np.array([[1, 2], [3, 4]])
x = tf2_utils_2x_wide.expand_1_axis(x, axis=0, epsilon=0.1)
self.assertAllClose(x[0, :] + x[1, :], np.array([1, 2]))
self.assertAllClose(x[2, :] + x[3, :], np.array([3, 4]))
def test_expand_matrix_axis_1(self):
x = np.array([[1, 2], [3, 4]])
x = tf2_utils_2x_wide.expand_1_axis(x, axis=-1, epsilon=0.1)
self.assertAllClose(x[:, 0] + x[:, 1], np.array([1, 3]))
self.assertAllClose(x[:, 2] + x[:, 3], np.array([2, 4]))
def test_expand_3d_tensor(self):
x0 = np.array([10, 11])
x1 = np.array([10, 10, 11, 11])
w0 = np.random.rand(2, 2)
w1 = tf2_utils_2x_wide.expand_2_axes(w0, epsilon=0.1)
o0 = np.matmul(x0, w0)
o1 = np.matmul(x1, w1)
self.assertAllClose(np.repeat(o0, 2, axis=-1), o1)
def test_expand_3d_tensor_axis_0(self):
x0 = np.array([10, 11])
x1 = np.array([10, 10, 11, 11])
w0 = np.random.rand(2, 2)
w1 = tf2_utils_2x_wide.expand_1_axis(w0, axis=0, epsilon=0.1)
o0 = np.matmul(x0, w0)
o1 = np.matmul(x1, w1)
self.assertAllClose(o0, o1)
def test_expand_3d_tensor_axis_2(self):
x = np.array([10, 11])
w0 = np.random.rand(2, 2)
w1 = tf2_utils_2x_wide.expand_1_axis(w0, axis=-1, epsilon=0.1)
o0 = np.matmul(x, w0)
o1 = np.matmul(x, w1)
self.assertAllClose(o0, np.sum(o1.reshape(2, 2), axis=-1))
def test_end_to_end(self):
"""Covers expand_vector, expand_2_axes, and expand_1_axis."""
model_narrow = tf.keras.Sequential()
model_narrow.add(tf.keras.Input(shape=(3,)))
model_narrow.add(tf.keras.layers.Dense(4))
model_narrow.add(tf.keras.layers.Dense(4))
model_narrow.add(tf.keras.layers.Dense(1))
model_wide = tf.keras.Sequential()
model_wide.add(tf.keras.Input(shape=(6,)))
model_wide.add(tf.keras.layers.Dense(8))
model_wide.add(tf.keras.layers.Dense(8))
model_wide.add(tf.keras.layers.Dense(1))
x0 = np.array([[1, 2, 3]])
x1 = np.array([[1, 1, 2, 2, 3, 3]])
# Call model once to build variables first.
_, _ = model_narrow(x0), model_wide(x1)
tf2_utils_2x_wide.model_to_model_2x_wide(
model_narrow, model_wide, epsilon=0.2)
self.assertAllClose(model_narrow(x0), model_wide(x1),
rtol=1e-05, atol=1e-05)
if __name__ == "__main__":
tf.test.main()
| 3,585 | 34.156863 | 74 | py |
models | models-master/official/modeling/hyperparams/base_config_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pprint
import dataclasses
from typing import List, Optional, Tuple
from absl.testing import parameterized
import tensorflow as tf
from official.modeling.hyperparams import base_config
@dataclasses.dataclass
class DumpConfig1(base_config.Config):
a: int = 1
b: str = 'text'
@dataclasses.dataclass
class DumpConfig2(base_config.Config):
c: int = 2
d: str = 'text'
e: DumpConfig1 = dataclasses.field(default_factory=DumpConfig1)
optional_e: Optional[DumpConfig1] = None
@dataclasses.dataclass
class DumpConfig3(DumpConfig2):
f: int = 2
g: str = 'text'
h: List[DumpConfig1] = dataclasses.field(
default_factory=lambda: [DumpConfig1(), DumpConfig1()])
g: Tuple[DumpConfig1, ...] = (DumpConfig1(),)
@dataclasses.dataclass
class DumpConfig4(DumpConfig2):
x: int = 3
@dataclasses.dataclass
class DummyConfig5(base_config.Config):
y: Tuple[DumpConfig2, ...] = (DumpConfig2(), DumpConfig4())
z: Tuple[str] = ('a',)
@dataclasses.dataclass
class DumpConfig6(base_config.Config):
test_config1: Optional[DumpConfig1] = None
class BaseConfigTest(parameterized.TestCase, tf.test.TestCase):
def assertHasSameTypes(self, c, d, msg=''):
"""Checks if a Config has the same structure as a given dict.
Args:
c: the Config object to be check.
d: the reference dict object.
msg: The error message to show when type mismatched.
"""
# Make sure d is not a Config. Assume d is either
# dictionary or primitive type and c is the Config or primitive types.
self.assertNotIsInstance(d, base_config.Config)
if isinstance(d, base_config.Config.IMMUTABLE_TYPES):
self.assertEqual(pprint.pformat(c), pprint.pformat(d), msg=msg)
elif isinstance(d, base_config.Config.SEQUENCE_TYPES):
self.assertEqual(type(c), type(d), msg=msg)
for i, v in enumerate(d):
self.assertHasSameTypes(c[i], v, msg='{}[{!r}]'.format(msg, i))
elif isinstance(d, dict):
self.assertIsInstance(c, base_config.Config, msg=msg)
for k, v in sorted(d.items()):
self.assertHasSameTypes(getattr(c, k), v, msg='{}[{!r}]'.format(msg, k))
else:
raise TypeError('Unknown type: %r' % type(d))
def assertImportExport(self, v):
config = base_config.Config({'key': v})
back = config.as_dict()['key']
self.assertEqual(pprint.pformat(back), pprint.pformat(v))
self.assertHasSameTypes(config.key, v, msg='=%s v' % pprint.pformat(v))
def test_invalid_keys(self):
params = base_config.Config()
with self.assertRaises(AttributeError):
_ = params.a
def test_cls(self):
params = base_config.Config()
with self.assertRaisesRegex(
AttributeError,
'`BUILDER` is a property and `_BUILDER` is the reserved'):
params.BUILDER = DumpConfig2
with self.assertRaisesRegex(
AttributeError,
'`BUILDER` is a property and `_BUILDER` is the reserved'):
params._BUILDER = DumpConfig2
base_config.bind(DumpConfig1)(DumpConfig2)
params = DumpConfig1()
self.assertEqual(params.BUILDER, DumpConfig2)
with self.assertRaisesRegex(ValueError,
'Inside a program, we should not bind'):
base_config.bind(DumpConfig1)(DumpConfig2)
def _test():
return 'test'
base_config.bind(DumpConfig2)(_test)
params = DumpConfig2()
self.assertEqual(params.BUILDER(), 'test')
def test_nested_config_types(self):
config = DumpConfig3()
self.assertIsInstance(config.e, DumpConfig1)
self.assertIsInstance(config.h[0], DumpConfig1)
self.assertIsInstance(config.h[1], DumpConfig1)
self.assertIsInstance(config.g[0], DumpConfig1)
config.override({'e': {'a': 2, 'b': 'new text'}})
self.assertIsInstance(config.e, DumpConfig1)
self.assertEqual(config.e.a, 2)
self.assertEqual(config.e.b, 'new text')
config.override({'h': [{'a': 3, 'b': 'new text 2'}]})
self.assertIsInstance(config.h[0], DumpConfig1)
self.assertLen(config.h, 1)
self.assertEqual(config.h[0].a, 3)
self.assertEqual(config.h[0].b, 'new text 2')
config.override({'g': [{'a': 4, 'b': 'new text 3'}]})
self.assertIsInstance(config.g[0], DumpConfig1)
self.assertLen(config.g, 1)
self.assertEqual(config.g[0].a, 4)
self.assertEqual(config.g[0].b, 'new text 3')
def test_replace(self):
config = DumpConfig2()
new_config = config.replace(e={'a': 2})
self.assertEqual(new_config.e.a, 2)
self.assertIsInstance(new_config.e, DumpConfig1)
config = DumpConfig2(e=DumpConfig2())
new_config = config.replace(e={'c': 4})
self.assertEqual(new_config.e.c, 4)
self.assertIsInstance(new_config.e, DumpConfig2)
config = DumpConfig3()
new_config = config.replace(g=[{'a': 4, 'b': 'new text 3'}])
self.assertIsInstance(new_config.g[0], DumpConfig1)
self.assertEqual(new_config.g[0].a, 4)
@parameterized.parameters(
('_locked', "The key '_locked' is internally reserved."),
('_restrictions', "The key '_restrictions' is internally reserved."),
('aa', "The key 'aa' does not exist."),
)
def test_key_error(self, key, msg):
params = base_config.Config()
with self.assertRaisesRegex(KeyError, msg):
params.override({key: True})
@parameterized.parameters(
('str data',),
(123,),
(1.23,),
(None,),
(['str', 1, 2.3, None],),
(('str', 1, 2.3, None),),
)
def test_import_export_immutable_types(self, v):
self.assertImportExport(v)
out = base_config.Config({'key': v})
self.assertEqual(pprint.pformat(v), pprint.pformat(out.key))
def test_override_is_strict_true(self):
params = base_config.Config({
'a': 'aa',
'b': 2,
'c': {
'c1': 'cc',
'c2': 20
}
})
params.override({'a': 2, 'c': {'c1': 'ccc'}}, is_strict=True)
self.assertEqual(params.a, 2)
self.assertEqual(params.c.c1, 'ccc')
with self.assertRaises(KeyError):
params.override({'d': 'ddd'}, is_strict=True)
with self.assertRaises(KeyError):
params.override({'c': {'c3': 30}}, is_strict=True)
config = base_config.Config({'key': [{'a': 42}]})
with self.assertRaisesRegex(KeyError, "The key 'b' does not exist"):
config.override({'key': [{'b': 43}]})
@parameterized.parameters(
(lambda x: x, 'Unknown type'),
(object(), 'Unknown type'),
(set(), 'Unknown type'),
(frozenset(), 'Unknown type'),
)
def test_import_unsupport_types(self, v, msg):
with self.assertRaisesRegex(TypeError, msg):
_ = base_config.Config({'key': v})
@parameterized.parameters(
({
'a': [{
'b': 2,
}, {
'c': 3,
}]
},),
({
'c': [{
'f': 1.1,
}, {
'h': [1, 2],
}]
},),
(({
'a': 'aa',
'b': 2,
'c': {
'c1': 10,
'c2': 20,
}
},),),
)
def test_import_export_nested_structure(self, d):
self.assertImportExport(d)
@parameterized.parameters(
([{
'a': 42,
'b': 'hello',
'c': 1.2
}],),
(({
'a': 42,
'b': 'hello',
'c': 1.2
},),),
)
def test_import_export_nested_sequences(self, v):
self.assertImportExport(v)
@parameterized.parameters(
([([{}],)],),
([['str', 1, 2.3, None]],),
((('str', 1, 2.3, None),),),
([
('str', 1, 2.3, None),
],),
([
('str', 1, 2.3, None),
],),
([[{
'a': 42,
'b': 'hello',
'c': 1.2
}]],),
([[[{
'a': 42,
'b': 'hello',
'c': 1.2
}]]],),
((({
'a': 42,
'b': 'hello',
'c': 1.2
},),),),
(((({
'a': 42,
'b': 'hello',
'c': 1.2
},),),),),
([({
'a': 42,
'b': 'hello',
'c': 1.2
},)],),
(([{
'a': 42,
'b': 'hello',
'c': 1.2
}],),),
)
def test_import_export_unsupport_sequence(self, v):
with self.assertRaisesRegex(TypeError,
'Invalid sequence: only supports single level'):
_ = base_config.Config({'key': v})
def test_construct_subtype(self):
pass
def test_import_config(self):
params = base_config.Config({'a': [{'b': 2}, {'c': {'d': 3}}]})
self.assertLen(params.a, 2)
self.assertEqual(params.a[0].b, 2)
self.assertEqual(type(params.a[0]), base_config.Config)
self.assertEqual(pprint.pformat(params.a[0].b), '2')
self.assertEqual(type(params.a[1]), base_config.Config)
self.assertEqual(type(params.a[1].c), base_config.Config)
self.assertEqual(pprint.pformat(params.a[1].c.d), '3')
def test_override(self):
params = base_config.Config({'a': [{'b': 2}, {'c': {'d': 3}}]})
params.override({'a': [{'b': 4}, {'c': {'d': 5}}]}, is_strict=False)
self.assertEqual(type(params.a), list)
self.assertEqual(type(params.a[0]), base_config.Config)
self.assertEqual(pprint.pformat(params.a[0].b), '4')
self.assertEqual(type(params.a[1]), base_config.Config)
self.assertEqual(type(params.a[1].c), base_config.Config)
self.assertEqual(pprint.pformat(params.a[1].c.d), '5')
@parameterized.parameters(
([{}],),
(({},),),
)
def test_config_vs_params_dict(self, v):
d = {'key': v}
self.assertEqual(type(base_config.Config(d).key[0]), base_config.Config)
self.assertEqual(type(base_config.params_dict.ParamsDict(d).key[0]), dict)
def test_ppformat(self):
self.assertEqual(
pprint.pformat([
's', 1, 1.0, True, None, {}, [], (), {
(2,): (3, [4], {
6: 7,
}),
8: 9,
}
]),
"['s', 1, 1.0, True, None, {}, [], (), {8: 9, (2,): (3, [4], {6: 7})}]")
def test_with_superclass_override(self):
config = DumpConfig2()
config.override({'optional_e': {'a': 2}})
self.assertEqual(
config.optional_e.as_dict(),
{
'a': 2,
'b': 'text',
},
)
# Previously, the following will fail. See b/274696969 for context.
config = DumpConfig3()
config.override({'optional_e': {'a': 2}})
self.assertEqual(
config.optional_e.as_dict(),
{
'a': 2,
'b': 'text',
},
)
def test_get_annotations_without_base_config_leak(self):
with self.assertRaisesRegex(
KeyError, "The key 'restrictions' does not exist"
):
DumpConfig3().override({'restrictions': None})
def test_with_restrictions(self):
restrictions = ['e.a<c']
config = DumpConfig2(restrictions=restrictions)
config.validate()
def test_nested_tuple(self):
config = DummyConfig5()
config.override({
'y': [{
'c': 4,
'd': 'new text 3',
'e': {
'a': 2
}
}, {
'c': 0,
'd': 'new text 3',
'e': {
'a': 2
}
}],
'z': ['a', 'b', 'c'],
})
self.assertEqual(config.y[0].c, 4)
self.assertEqual(config.y[1].c, 0)
self.assertIsInstance(config.y[0], DumpConfig2)
self.assertIsInstance(config.y[1], DumpConfig4)
self.assertSameElements(config.z, ['a', 'b', 'c'])
def test_override_by_empty_sequence(self):
config = DummyConfig5()
config.override({
'y': [],
'z': (),
}, is_strict=True)
self.assertEmpty(config.y)
self.assertEmpty(config.z)
def test_correctly_display_optional_field(self):
c = DumpConfig6()
c.override({'test_config1': {'b': 'abc'}})
self.assertEqual(f'{c}',
"DumpConfig6(test_config1=DumpConfig1(a=1, b='abc'))")
self.assertIsInstance(c.test_config1, DumpConfig1)
if __name__ == '__main__':
tf.test.main()
| 12,626 | 28.502336 | 80 | py |
models | models-master/official/modeling/hyperparams/base_config.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base configurations to standardize experiments."""
import copy
import dataclasses
import functools
import inspect
import typing
from typing import Any, List, Mapping, Optional, Type, Union
from absl import logging
import tensorflow as tf
import yaml
from official.modeling.hyperparams import params_dict
_BOUND = set()
def bind(config_cls):
"""Bind a class to config cls."""
if not inspect.isclass(config_cls):
raise ValueError('The bind decorator is supposed to apply on the class '
f'attribute. Received {config_cls}, not a class.')
def decorator(builder):
if config_cls in _BOUND:
raise ValueError('Inside a program, we should not bind the config with a'
' class twice.')
if inspect.isclass(builder):
config_cls._BUILDER = builder # pylint: disable=protected-access
elif inspect.isfunction(builder):
def _wrapper(self, *args, **kwargs): # pylint: disable=unused-argument
return builder(*args, **kwargs)
config_cls._BUILDER = _wrapper # pylint: disable=protected-access
else:
raise ValueError(f'The `BUILDER` type is not supported: {builder}')
_BOUND.add(config_cls)
return builder
return decorator
def _is_optional(field):
return typing.get_origin(field) is Union and type(None) in typing.get_args(
field)
@dataclasses.dataclass
class Config(params_dict.ParamsDict):
"""The base configuration class that supports YAML/JSON based overrides.
Because of YAML/JSON serialization limitations, some semantics of dataclass
are not supported:
* It recursively enforces a allowlist of basic types and container types, so
it avoids surprises with copy and reuse caused by unanticipated types.
* Warning: it converts Dict to `Config` even within sequences,
e.g. for config = Config({'key': [([{'a': 42}],)]),
type(config.key[0][0][0]) is Config rather than dict.
If you define/annotate some field as Dict, the field will convert to a
`Config` instance and lose the dictionary type.
"""
# The class or method to bind with the params class.
_BUILDER = None
# It's safe to add bytes and other immutable types here.
IMMUTABLE_TYPES = (str, int, float, bool, type(None))
# It's safe to add set, frozenset and other collections here.
SEQUENCE_TYPES = (list, tuple)
default_params: dataclasses.InitVar[Optional[Mapping[str, Any]]] = None
restrictions: dataclasses.InitVar[Optional[List[str]]] = None
def __post_init__(self, default_params, restrictions):
super().__init__(
default_params=default_params,
restrictions=restrictions)
@property
def BUILDER(self):
return self._BUILDER
@classmethod
def _get_annotations(cls):
"""Returns valid annotations.
Note: this is similar to dataclasses.__annotations__ except it also includes
annotations from its parent classes.
"""
all_annotations = typing.get_type_hints(cls)
# Removes Config class annotation from the value, e.g., default_params,
# restrictions, etc.
for k in Config.__annotations__:
del all_annotations[k]
return all_annotations
@classmethod
def _isvalidsequence(cls, v):
"""Check if the input values are valid sequences.
Args:
v: Input sequence.
Returns:
True if the sequence is valid. Valid sequence includes the sequence
type in cls.SEQUENCE_TYPES and element type is in cls.IMMUTABLE_TYPES or
is dict or ParamsDict.
"""
if not isinstance(v, cls.SEQUENCE_TYPES):
return False
return (all(isinstance(e, cls.IMMUTABLE_TYPES) for e in v) or
all(isinstance(e, dict) for e in v) or
all(isinstance(e, params_dict.ParamsDict) for e in v))
@classmethod
def _import_config(cls, v, subconfig_type):
"""Returns v with dicts converted to Configs, recursively."""
if not issubclass(subconfig_type, params_dict.ParamsDict):
raise TypeError(
'Subconfig_type should be subclass of ParamsDict, found {!r}'.format(
subconfig_type))
if isinstance(v, cls.IMMUTABLE_TYPES):
return v
elif isinstance(v, cls.SEQUENCE_TYPES):
# Only support one layer of sequence.
if not cls._isvalidsequence(v):
raise TypeError(
'Invalid sequence: only supports single level {!r} of {!r} or '
'dict or ParamsDict found: {!r}'.format(cls.SEQUENCE_TYPES,
cls.IMMUTABLE_TYPES, v))
import_fn = functools.partial(
cls._import_config, subconfig_type=subconfig_type)
return type(v)(map(import_fn, v))
elif isinstance(v, params_dict.ParamsDict):
# Deepcopy here is a temporary solution for preserving type in nested
# Config object.
return copy.deepcopy(v)
elif isinstance(v, dict):
return subconfig_type(v)
else:
raise TypeError('Unknown type: {!r}'.format(type(v)))
@classmethod
def _export_config(cls, v):
"""Returns v with Configs converted to dicts, recursively."""
if isinstance(v, cls.IMMUTABLE_TYPES):
return v
elif isinstance(v, cls.SEQUENCE_TYPES):
return type(v)(map(cls._export_config, v))
elif isinstance(v, params_dict.ParamsDict):
return v.as_dict()
elif isinstance(v, dict):
raise TypeError('dict value not supported in converting.')
else:
raise TypeError('Unknown type: {!r}'.format(type(v)))
@classmethod
def _get_subconfig_type(
cls, k, subconfig_type=None
) -> Type[params_dict.ParamsDict]:
"""Get element type by the field name.
Args:
k: the key/name of the field.
subconfig_type: default subconfig_type. If None, it is set to
Config.
Returns:
Config as default. If a type annotation is found for `k`,
1) returns the type of the annotation if it is subtype of ParamsDict;
2) returns the element type if the annotation of `k` is List[SubType]
or Tuple[SubType].
"""
if not subconfig_type:
subconfig_type = Config
annotations = cls._get_annotations()
if k in annotations:
# Directly Config subtype.
type_annotation = annotations[k]
i = 0
# Loop for striping the Optional annotation.
traverse_in = True
while traverse_in:
i += 1
if (isinstance(type_annotation, type) and
issubclass(type_annotation, Config)):
subconfig_type = type_annotation
break
else:
# Check if the field is a sequence of subtypes.
field_type = typing.get_origin(type_annotation)
if (isinstance(field_type, type) and
issubclass(field_type, cls.SEQUENCE_TYPES)):
element_type = typing.get_args(type_annotation)[0]
subconfig_type = (
element_type if issubclass(element_type, params_dict.ParamsDict)
else subconfig_type)
break
elif _is_optional(type_annotation):
# Strip the `Optional` annotation and process the subtype.
type_annotation = typing.get_args(type_annotation)[0]
continue
traverse_in = False
return subconfig_type
def _set(self, k, v):
"""Overrides same method in ParamsDict.
Also called by ParamsDict methods.
Args:
k: key to set.
v: value.
Raises:
RuntimeError
"""
subconfig_type = self._get_subconfig_type(k)
def is_null(k):
if k not in self.__dict__ or not self.__dict__[k]:
return True
return False
if isinstance(v, dict):
if is_null(k):
# If the key not exist or the value is None, a new Config-family object
# sould be created for the key.
self.__dict__[k] = subconfig_type(v)
else:
self.__dict__[k].override(v)
elif not is_null(k) and isinstance(v, self.SEQUENCE_TYPES) and all(
[not isinstance(e, self.IMMUTABLE_TYPES) for e in v]):
if len(self.__dict__[k]) == len(v):
for i in range(len(v)):
self.__dict__[k][i].override(v[i])
elif not all([isinstance(e, self.IMMUTABLE_TYPES) for e in v]):
logging.warning(
"The list/tuple don't match the value dictionaries provided. Thus, "
'the list/tuple is determined by the type annotation and '
'values provided. This is error-prone.')
self.__dict__[k] = self._import_config(v, subconfig_type)
else:
self.__dict__[k] = self._import_config(v, subconfig_type)
else:
self.__dict__[k] = self._import_config(v, subconfig_type)
def __setattr__(self, k, v):
if k == 'BUILDER' or k == '_BUILDER':
raise AttributeError('`BUILDER` is a property and `_BUILDER` is the '
'reserved class attribute. We should only assign '
'`_BUILDER` at the class level.')
if k not in self.RESERVED_ATTR:
if getattr(self, '_locked', False):
raise ValueError('The Config has been locked. ' 'No change is allowed.')
self._set(k, v)
def _override(self, override_dict, is_strict=True):
"""Overrides same method in ParamsDict.
Also called by ParamsDict methods.
Args:
override_dict: dictionary to write to .
is_strict: If True, not allows to add new keys.
Raises:
KeyError: overriding reserved keys or keys not exist (is_strict=True).
"""
for k, v in sorted(override_dict.items()):
if k in self.RESERVED_ATTR:
raise KeyError('The key {!r} is internally reserved. '
'Can not be overridden.'.format(k))
if k not in self.__dict__:
if is_strict:
raise KeyError('The key {!r} does not exist in {!r}. '
'To extend the existing keys, use '
'`override` with `is_strict` = False.'.format(
k, type(self)))
else:
self._set(k, v)
else:
if isinstance(v, dict) and self.__dict__[k]:
self.__dict__[k]._override(v, is_strict) # pylint: disable=protected-access
elif isinstance(v, params_dict.ParamsDict) and self.__dict__[k]:
self.__dict__[k]._override(v.as_dict(), is_strict) # pylint: disable=protected-access
else:
self._set(k, v)
def as_dict(self):
"""Returns a dict representation of params_dict.ParamsDict.
For the nested params_dict.ParamsDict, a nested dict will be returned.
"""
return {
k: self._export_config(v)
for k, v in self.__dict__.items()
if k not in self.RESERVED_ATTR
}
def replace(self, **kwargs):
"""Overrides/returns a unlocked copy with the current config unchanged."""
# pylint: disable=protected-access
params = copy.deepcopy(self)
params._locked = False
params._override(kwargs, is_strict=True)
# pylint: enable=protected-access
return params
@classmethod
def from_yaml(cls, file_path: str):
# Note: This only works if the Config has all default values.
with tf.io.gfile.GFile(file_path, 'r') as f:
loaded = yaml.load(f, Loader=yaml.FullLoader)
config = cls()
config.override(loaded)
return config
@classmethod
def from_json(cls, file_path: str):
"""Wrapper for `from_yaml`."""
return cls.from_yaml(file_path)
@classmethod
def from_args(cls, *args, **kwargs):
"""Builds a config from the given list of arguments."""
# Note we intend to keep `__annotations__` instead of `_get_annotations`.
# Assuming a parent class of (a, b) with the sub-class of (c, d), the
# sub-class will take (c, d) for args, rather than starting from (a, b).
attributes = list(cls.__annotations__.keys())
default_params = {a: p for a, p in zip(attributes, args)}
default_params.update(kwargs)
return cls(default_params=default_params)
| 12,509 | 34.641026 | 96 | py |
models | models-master/official/modeling/hyperparams/oneof.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config class that supports oneof functionality."""
from typing import Optional
import dataclasses
from official.modeling.hyperparams import base_config
@dataclasses.dataclass
class OneOfConfig(base_config.Config):
"""Configuration for configs with one of feature.
Attributes:
type: 'str', name of the field to select.
"""
type: Optional[str] = None
def as_dict(self):
"""Returns a dict representation of OneOfConfig.
For the nested base_config.Config, a nested dict will be returned.
"""
if self.type is None:
return {'type': None}
elif self.__dict__['type'] not in self.__dict__:
raise ValueError('type: {!r} is not a valid key!'.format(
self.__dict__['type']))
else:
chosen_type = self.type
chosen_value = self.__dict__[chosen_type]
return {'type': self.type, chosen_type: self._export_config(chosen_value)}
def get(self):
"""Returns selected config based on the value of type.
If type is not set (None), None is returned.
"""
chosen_type = self.type
if chosen_type is None:
return None
if chosen_type not in self.__dict__:
raise ValueError('type: {!r} is not a valid key!'.format(self.type))
return self.__dict__[chosen_type]
| 1,870 | 31.258621 | 80 | py |
models | models-master/official/modeling/hyperparams/oneof_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import tensorflow as tf
from official.modeling.hyperparams import base_config
from official.modeling.hyperparams import oneof
@dataclasses.dataclass
class ResNet(base_config.Config):
model_depth: int = 50
@dataclasses.dataclass
class Backbone(oneof.OneOfConfig):
type: str = 'resnet'
resnet: ResNet = dataclasses.field(default_factory=ResNet)
not_resnet: int = 2
@dataclasses.dataclass
class OutputLayer(oneof.OneOfConfig):
type: str = 'single'
single: int = 1
multi_head: int = 2
@dataclasses.dataclass
class Network(base_config.Config):
backbone: Backbone = dataclasses.field(default_factory=Backbone)
output_layer: OutputLayer = dataclasses.field(default_factory=OutputLayer)
class OneOfTest(tf.test.TestCase):
def test_to_dict(self):
network_params = {
'backbone': {
'type': 'resnet',
'resnet': {
'model_depth': 50
}
},
'output_layer': {
'type': 'single',
'single': 1000
}
}
network_config = Network(network_params)
self.assertEqual(network_config.as_dict(), network_params)
def test_get_oneof(self):
backbone = Backbone()
self.assertIsInstance(backbone.get(), ResNet)
self.assertEqual(backbone.get().as_dict(), {'model_depth': 50})
if __name__ == '__main__':
tf.test.main()
| 1,981 | 26.527778 | 76 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.